detect_objects.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472
  1. import faulthandler; faulthandler.enable()
  2. import os
  3. import signal
  4. import sys
  5. import traceback
  6. import signal
  7. import cv2
  8. import time
  9. import datetime
  10. import queue
  11. import yaml
  12. import json
  13. import threading
  14. import multiprocessing as mp
  15. import subprocess as sp
  16. import numpy as np
  17. import logging
  18. from flask import Flask, Response, make_response, jsonify, request
  19. import paho.mqtt.client as mqtt
  20. from peewee import *
  21. from playhouse.shortcuts import model_to_dict
  22. from playhouse.sqlite_ext import *
  23. from playhouse.flask_utils import FlaskDB
  24. from frigate.video import capture_camera, track_camera, get_ffmpeg_input, get_frame_shape, CameraCapture, start_or_restart_ffmpeg
  25. from frigate.object_processing import TrackedObjectProcessor
  26. from frigate.events import EventProcessor
  27. from frigate.util import EventsPerSecond
  28. from frigate.edgetpu import EdgeTPUProcess
  29. FRIGATE_VARS = {k: v for k, v in os.environ.items() if k.startswith('FRIGATE_')}
  30. CONFIG_FILE = os.environ.get('CONFIG_FILE', '/config/config.yml')
  31. if CONFIG_FILE.endswith(".yml"):
  32. with open(CONFIG_FILE) as f:
  33. CONFIG = yaml.safe_load(f)
  34. elif CONFIG_FILE.endswith(".json"):
  35. with open(CONFIG_FILE) as f:
  36. CONFIG = json.load(f)
  37. CACHE_DIR = CONFIG.get('save_clips', {}).get('cache_dir', '/cache')
  38. CLIPS_DIR = CONFIG.get('save_clips', {}).get('clips_dir', '/clips')
  39. if not os.path.exists(CACHE_DIR) and not os.path.islink(CACHE_DIR):
  40. os.makedirs(CACHE_DIR)
  41. if not os.path.exists(CLIPS_DIR) and not os.path.islink(CLIPS_DIR):
  42. os.makedirs(CLIPS_DIR)
  43. DATABASE = f"sqliteext:///{os.path.join(CLIPS_DIR, 'frigate.db')}"
  44. MQTT_HOST = CONFIG['mqtt']['host']
  45. MQTT_PORT = CONFIG.get('mqtt', {}).get('port', 1883)
  46. MQTT_TOPIC_PREFIX = CONFIG.get('mqtt', {}).get('topic_prefix', 'frigate')
  47. MQTT_USER = CONFIG.get('mqtt', {}).get('user')
  48. MQTT_PASS = CONFIG.get('mqtt', {}).get('password')
  49. if not MQTT_PASS is None:
  50. MQTT_PASS = MQTT_PASS.format(**FRIGATE_VARS)
  51. MQTT_CLIENT_ID = CONFIG.get('mqtt', {}).get('client_id', 'frigate')
  52. # Set the default FFmpeg config
  53. FFMPEG_CONFIG = CONFIG.get('ffmpeg', {})
  54. FFMPEG_DEFAULT_CONFIG = {
  55. 'global_args': FFMPEG_CONFIG.get('global_args',
  56. ['-hide_banner','-loglevel','panic']),
  57. 'hwaccel_args': FFMPEG_CONFIG.get('hwaccel_args',
  58. []),
  59. 'input_args': FFMPEG_CONFIG.get('input_args',
  60. ['-avoid_negative_ts', 'make_zero',
  61. '-fflags', 'nobuffer',
  62. '-flags', 'low_delay',
  63. '-strict', 'experimental',
  64. '-fflags', '+genpts+discardcorrupt',
  65. '-rtsp_transport', 'tcp',
  66. '-stimeout', '5000000',
  67. '-use_wallclock_as_timestamps', '1']),
  68. 'output_args': FFMPEG_CONFIG.get('output_args',
  69. ['-f', 'rawvideo',
  70. '-pix_fmt', 'yuv420p'])
  71. }
  72. GLOBAL_OBJECT_CONFIG = CONFIG.get('objects', {})
  73. WEB_PORT = CONFIG.get('web_port', 5000)
  74. DETECTORS = CONFIG.get('detectors', {'coral': {'type': 'edgetpu', 'device': 'usb'}})
  75. # create a flask app
  76. app = Flask(__name__)
  77. app.config.from_object(__name__)
  78. flask_db = FlaskDB(app)
  79. db = flask_db.database
  80. log = logging.getLogger('werkzeug')
  81. log.setLevel(logging.ERROR)
  82. peewee_log = logging.getLogger('peewee')
  83. peewee_log.addHandler(logging.StreamHandler())
  84. peewee_log.setLevel(logging.DEBUG)
  85. class Event(flask_db.Model):
  86. id = CharField(null=False, primary_key=True, max_length=30)
  87. label = CharField(index=True, max_length=20)
  88. camera = CharField(index=True, max_length=20)
  89. start_time = DateTimeField()
  90. end_time = DateTimeField()
  91. top_score = FloatField()
  92. false_positive = BooleanField()
  93. zones = JSONField()
  94. def init_db():
  95. db.create_tables([Event], safe=True)
  96. class FrigateWatchdog(threading.Thread):
  97. def __init__(self, camera_processes, config, detectors, detection_queue, out_events, tracked_objects_queue, stop_event):
  98. threading.Thread.__init__(self)
  99. self.camera_processes = camera_processes
  100. self.config = config
  101. self.detectors = detectors
  102. self.detection_queue = detection_queue
  103. self.out_events = out_events
  104. self.tracked_objects_queue = tracked_objects_queue
  105. self.stop_event = stop_event
  106. def run(self):
  107. time.sleep(10)
  108. while True:
  109. # wait a bit before checking
  110. time.sleep(10)
  111. if self.stop_event.is_set():
  112. print(f"Exiting watchdog...")
  113. break
  114. now = datetime.datetime.now().timestamp()
  115. # check the detection processes
  116. for detector in self.detectors.values():
  117. detection_start = detector.detection_start.value
  118. if (detection_start > 0.0 and
  119. now - detection_start > 10):
  120. print("Detection appears to be stuck. Restarting detection process")
  121. detector.start_or_restart()
  122. elif not detector.detect_process.is_alive():
  123. print("Detection appears to have stopped. Restarting detection process")
  124. detector.start_or_restart()
  125. # check the camera processes
  126. for name, camera_process in self.camera_processes.items():
  127. process = camera_process['process']
  128. if not process.is_alive():
  129. print(f"Track process for {name} is not alive. Starting again...")
  130. camera_process['camera_fps'].value = 0.0
  131. camera_process['process_fps'].value = 0.0
  132. camera_process['detection_fps'].value = 0.0
  133. camera_process['read_start'].value = 0.0
  134. process = mp.Process(target=track_camera, args=(name, self.config,
  135. self.detection_queue, self.out_events[name], self.tracked_objects_queue, camera_process, self.stop_event))
  136. process.daemon = True
  137. camera_process['process'] = process
  138. process.start()
  139. print(f"Track process started for {name}: {process.pid}")
  140. def main():
  141. stop_event = threading.Event()
  142. # connect to mqtt and setup last will
  143. def on_connect(client, userdata, flags, rc):
  144. print("On connect called")
  145. if rc != 0:
  146. if rc == 3:
  147. print ("MQTT Server unavailable")
  148. elif rc == 4:
  149. print ("MQTT Bad username or password")
  150. elif rc == 5:
  151. print ("MQTT Not authorized")
  152. else:
  153. print ("Unable to connect to MQTT: Connection refused. Error code: " + str(rc))
  154. # publish a message to signal that the service is running
  155. client.publish(MQTT_TOPIC_PREFIX+'/available', 'online', retain=True)
  156. client = mqtt.Client(client_id=MQTT_CLIENT_ID)
  157. client.on_connect = on_connect
  158. client.will_set(MQTT_TOPIC_PREFIX+'/available', payload='offline', qos=1, retain=True)
  159. if not MQTT_USER is None:
  160. client.username_pw_set(MQTT_USER, password=MQTT_PASS)
  161. client.connect(MQTT_HOST, MQTT_PORT, 60)
  162. client.loop_start()
  163. ##
  164. # Setup config defaults for cameras
  165. ##
  166. for name, config in CONFIG['cameras'].items():
  167. config['snapshots'] = {
  168. 'show_timestamp': config.get('snapshots', {}).get('show_timestamp', True),
  169. 'draw_zones': config.get('snapshots', {}).get('draw_zones', False),
  170. 'draw_bounding_boxes': config.get('snapshots', {}).get('draw_bounding_boxes', True)
  171. }
  172. config['zones'] = config.get('zones', {})
  173. # Queue for cameras to push tracked objects to
  174. tracked_objects_queue = mp.Queue(maxsize=len(CONFIG['cameras'].keys())*2)
  175. # Queue for clip processing
  176. event_queue = mp.Queue()
  177. # create the detection pipes and shms
  178. out_events = {}
  179. camera_shms = []
  180. for name in CONFIG['cameras'].keys():
  181. out_events[name] = mp.Event()
  182. shm_in = mp.shared_memory.SharedMemory(name=name, create=True, size=300*300*3)
  183. shm_out = mp.shared_memory.SharedMemory(name=f"out-{name}", create=True, size=20*6*4)
  184. camera_shms.append(shm_in)
  185. camera_shms.append(shm_out)
  186. detection_queue = mp.Queue()
  187. detectors = {}
  188. for name, detector in DETECTORS.items():
  189. if detector['type'] == 'cpu':
  190. detectors[name] = EdgeTPUProcess(detection_queue, out_events=out_events, tf_device='cpu')
  191. if detector['type'] == 'edgetpu':
  192. detectors[name] = EdgeTPUProcess(detection_queue, out_events=out_events, tf_device=detector['device'])
  193. # create the camera processes
  194. camera_process_info = {}
  195. for name, config in CONFIG['cameras'].items():
  196. # Merge the ffmpeg config with the global config
  197. ffmpeg = config.get('ffmpeg', {})
  198. ffmpeg_input = get_ffmpeg_input(ffmpeg['input'])
  199. ffmpeg_global_args = ffmpeg.get('global_args', FFMPEG_DEFAULT_CONFIG['global_args'])
  200. ffmpeg_hwaccel_args = ffmpeg.get('hwaccel_args', FFMPEG_DEFAULT_CONFIG['hwaccel_args'])
  201. ffmpeg_input_args = ffmpeg.get('input_args', FFMPEG_DEFAULT_CONFIG['input_args'])
  202. ffmpeg_output_args = ffmpeg.get('output_args', FFMPEG_DEFAULT_CONFIG['output_args'])
  203. if not config.get('fps') is None:
  204. ffmpeg_output_args = ["-r", str(config.get('fps'))] + ffmpeg_output_args
  205. if config.get('save_clips', {}).get('enabled', False):
  206. ffmpeg_output_args = [
  207. "-f",
  208. "segment",
  209. "-segment_time",
  210. "10",
  211. "-segment_format",
  212. "mp4",
  213. "-reset_timestamps",
  214. "1",
  215. "-strftime",
  216. "1",
  217. "-c",
  218. "copy",
  219. "-an",
  220. "-map",
  221. "0",
  222. f"{os.path.join(CACHE_DIR, name)}-%Y%m%d%H%M%S.mp4"
  223. ] + ffmpeg_output_args
  224. ffmpeg_cmd = (['ffmpeg'] +
  225. ffmpeg_global_args +
  226. ffmpeg_hwaccel_args +
  227. ffmpeg_input_args +
  228. ['-i', ffmpeg_input] +
  229. ffmpeg_output_args +
  230. ['pipe:'])
  231. config['ffmpeg_cmd'] = ffmpeg_cmd
  232. if 'width' in config and 'height' in config:
  233. frame_shape = (config['height'], config['width'], 3)
  234. else:
  235. frame_shape = get_frame_shape(ffmpeg_input)
  236. config['frame_shape'] = frame_shape
  237. config['take_frame'] = config.get('take_frame', 1)
  238. camera_process_info[name] = {
  239. 'camera_fps': mp.Value('d', 0.0),
  240. 'skipped_fps': mp.Value('d', 0.0),
  241. 'process_fps': mp.Value('d', 0.0),
  242. 'detection_fps': mp.Value('d', 0.0),
  243. 'detection_frame': mp.Value('d', 0.0),
  244. 'read_start': mp.Value('d', 0.0),
  245. 'ffmpeg_pid': mp.Value('i', 0),
  246. 'frame_queue': mp.Queue(maxsize=2)
  247. }
  248. # merge global object config into camera object config
  249. camera_objects_config = config.get('objects', {})
  250. # get objects to track for camera
  251. objects_to_track = camera_objects_config.get('track', GLOBAL_OBJECT_CONFIG.get('track', ['person']))
  252. # get object filters
  253. object_filters = camera_objects_config.get('filters', GLOBAL_OBJECT_CONFIG.get('filters', {}))
  254. config['objects'] = {
  255. 'track': objects_to_track,
  256. 'filters': object_filters
  257. }
  258. capture_process = mp.Process(target=capture_camera, args=(name, config,
  259. camera_process_info[name], stop_event))
  260. capture_process.daemon = True
  261. camera_process_info[name]['capture_process'] = capture_process
  262. camera_process = mp.Process(target=track_camera, args=(name, config,
  263. detection_queue, out_events[name], tracked_objects_queue, camera_process_info[name], stop_event))
  264. camera_process.daemon = True
  265. camera_process_info[name]['process'] = camera_process
  266. # start the camera_processes
  267. for name, camera_process in camera_process_info.items():
  268. camera_process['capture_process'].start()
  269. print(f"Camera capture process started for {name}: {camera_process['capture_process'].pid}")
  270. camera_process['process'].start()
  271. print(f"Camera process started for {name}: {camera_process['process'].pid}")
  272. event_processor = EventProcessor(CONFIG, camera_process_info, CACHE_DIR, CLIPS_DIR, event_queue, stop_event, Event)
  273. event_processor.start()
  274. object_processor = TrackedObjectProcessor(CONFIG['cameras'], client, MQTT_TOPIC_PREFIX, tracked_objects_queue, event_queue, stop_event)
  275. object_processor.start()
  276. frigate_watchdog = FrigateWatchdog(camera_process_info, CONFIG['cameras'], detectors, detection_queue, out_events, tracked_objects_queue, stop_event)
  277. frigate_watchdog.start()
  278. def receiveSignal(signalNumber, frame):
  279. print('Received:', signalNumber)
  280. stop_event.set()
  281. event_processor.join()
  282. object_processor.join()
  283. frigate_watchdog.join()
  284. for detector in detectors.values():
  285. detector.stop()
  286. for shm in camera_shms:
  287. shm.close()
  288. shm.unlink()
  289. sys.exit()
  290. signal.signal(signal.SIGTERM, receiveSignal)
  291. signal.signal(signal.SIGINT, receiveSignal)
  292. @app.route('/')
  293. def ishealthy():
  294. # return a health
  295. return "Frigate is running. Alive and healthy!"
  296. @app.route('/debug/stack')
  297. def processor_stack():
  298. frame = sys._current_frames().get(object_processor.ident, None)
  299. if frame:
  300. return "<br>".join(traceback.format_stack(frame)), 200
  301. else:
  302. return "no frame found", 200
  303. @app.route('/debug/print_stack')
  304. def print_stack():
  305. pid = int(request.args.get('pid', 0))
  306. if pid == 0:
  307. return "missing pid", 200
  308. else:
  309. os.kill(pid, signal.SIGUSR1)
  310. return "check logs", 200
  311. @app.route('/events')
  312. def events():
  313. events = Event.select()
  314. return jsonify([model_to_dict(e) for e in events])
  315. @app.route('/debug/stats')
  316. def stats():
  317. stats = {}
  318. total_detection_fps = 0
  319. for name, camera_stats in camera_process_info.items():
  320. total_detection_fps += camera_stats['detection_fps'].value
  321. stats[name] = {
  322. 'camera_fps': round(camera_stats['camera_fps'].value, 2),
  323. 'process_fps': round(camera_stats['process_fps'].value, 2),
  324. 'skipped_fps': round(camera_stats['skipped_fps'].value, 2),
  325. 'detection_fps': round(camera_stats['detection_fps'].value, 2),
  326. 'pid': camera_stats['process'].pid,
  327. 'capture_pid': camera_stats['capture_process'].pid,
  328. 'frame_info': {
  329. 'detect': camera_stats['detection_frame'].value,
  330. 'process': object_processor.camera_data[name]['current_frame_time']
  331. }
  332. }
  333. stats['detectors'] = {}
  334. for name, detector in detectors.items():
  335. stats['detectors'][name] = {
  336. 'inference_speed': round(detector.avg_inference_speed.value*1000, 2),
  337. 'detection_start': detector.detection_start.value,
  338. 'pid': detector.detect_process.pid
  339. }
  340. stats['detection_fps'] = round(total_detection_fps, 2)
  341. return jsonify(stats)
  342. @app.route('/<camera_name>/<label>/best.jpg')
  343. def best(camera_name, label):
  344. if camera_name in CONFIG['cameras']:
  345. best_object = object_processor.get_best(camera_name, label)
  346. best_frame = best_object.get('frame')
  347. if best_frame is None:
  348. best_frame = np.zeros((720,1280,3), np.uint8)
  349. else:
  350. best_frame = cv2.cvtColor(best_frame, cv2.COLOR_YUV2BGR_I420)
  351. crop = bool(request.args.get('crop', 0, type=int))
  352. if crop:
  353. region = best_object.get('region', [0,0,300,300])
  354. best_frame = best_frame[region[1]:region[3], region[0]:region[2]]
  355. height = int(request.args.get('h', str(best_frame.shape[0])))
  356. width = int(height*best_frame.shape[1]/best_frame.shape[0])
  357. best_frame = cv2.resize(best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
  358. ret, jpg = cv2.imencode('.jpg', best_frame)
  359. response = make_response(jpg.tobytes())
  360. response.headers['Content-Type'] = 'image/jpg'
  361. return response
  362. else:
  363. return "Camera named {} not found".format(camera_name), 404
  364. @app.route('/<camera_name>')
  365. def mjpeg_feed(camera_name):
  366. fps = int(request.args.get('fps', '3'))
  367. height = int(request.args.get('h', '360'))
  368. if camera_name in CONFIG['cameras']:
  369. # return a multipart response
  370. return Response(imagestream(camera_name, fps, height),
  371. mimetype='multipart/x-mixed-replace; boundary=frame')
  372. else:
  373. return "Camera named {} not found".format(camera_name), 404
  374. @app.route('/<camera_name>/latest.jpg')
  375. def latest_frame(camera_name):
  376. if camera_name in CONFIG['cameras']:
  377. # max out at specified FPS
  378. frame = object_processor.get_current_frame(camera_name)
  379. if frame is None:
  380. frame = np.zeros((720,1280,3), np.uint8)
  381. height = int(request.args.get('h', str(frame.shape[0])))
  382. width = int(height*frame.shape[1]/frame.shape[0])
  383. frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
  384. ret, jpg = cv2.imencode('.jpg', frame)
  385. response = make_response(jpg.tobytes())
  386. response.headers['Content-Type'] = 'image/jpg'
  387. return response
  388. else:
  389. return "Camera named {} not found".format(camera_name), 404
  390. def imagestream(camera_name, fps, height):
  391. while True:
  392. # max out at specified FPS
  393. time.sleep(1/fps)
  394. frame = object_processor.get_current_frame(camera_name, draw=True)
  395. if frame is None:
  396. frame = np.zeros((height,int(height*16/9),3), np.uint8)
  397. width = int(height*frame.shape[1]/frame.shape[0])
  398. frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_LINEAR)
  399. ret, jpg = cv2.imencode('.jpg', frame)
  400. yield (b'--frame\r\n'
  401. b'Content-Type: image/jpeg\r\n\r\n' + jpg.tobytes() + b'\r\n\r\n')
  402. app.run(host='0.0.0.0', port=WEB_PORT, debug=False)
  403. object_processor.join()
  404. if __name__ == '__main__':
  405. init_db()
  406. main()