Explorar o código

enable and disable detection via mqtt

Blake Blackshear %!s(int64=4) %!d(string=hai) anos
pai
achega
39040c1874
Modificáronse 3 ficheiros con 46 adicións e 12 borrados
  1. 3 2
      frigate/app.py
  2. 25 1
      frigate/mqtt.py
  3. 18 9
      frigate/video.py

+ 3 - 2
frigate/app.py

@@ -70,11 +70,12 @@ class FrigateApp():
                 'camera_fps': mp.Value('d', 0.0),
                 'skipped_fps': mp.Value('d', 0.0),
                 'process_fps': mp.Value('d', 0.0),
+                'detection_enabled': mp.Value('i', 1),
                 'detection_fps': mp.Value('d', 0.0),
                 'detection_frame': mp.Value('d', 0.0),
                 'read_start': mp.Value('d', 0.0),
                 'ffmpeg_pid': mp.Value('i', 0),
-                'frame_queue': mp.Queue(maxsize=2)
+                'frame_queue': mp.Queue(maxsize=2),
             }
         
     def check_config(self):
@@ -129,7 +130,7 @@ class FrigateApp():
         self.flask_app = create_app(self.config, self.db, self.stats_tracking, self.detected_frames_processor)
 
     def init_mqtt(self):
-        self.mqtt_client = create_mqtt_client(self.config)
+        self.mqtt_client = create_mqtt_client(self.config, self.camera_metrics)
 
     def start_detectors(self):
         model_shape = (self.config.model.height, self.config.model.width)

+ 25 - 1
frigate/mqtt.py

@@ -7,7 +7,7 @@ from frigate.config import FrigateConfig
 
 logger = logging.getLogger(__name__)
 
-def create_mqtt_client(config: FrigateConfig):
+def create_mqtt_client(config: FrigateConfig, camera_metrics):
     mqtt_config = config.mqtt
 
     def on_clips_command(client, userdata, message):
@@ -57,6 +57,28 @@ def create_mqtt_client(config: FrigateConfig):
         if command == "set":
             state_topic = f"{message.topic[:-4]}/state"
             client.publish(state_topic, payload, retain=True)
+    
+    def on_detect_command(client, userdata, message):
+        payload = message.payload.decode()
+        logger.debug(f"on_detect_toggle: {message.topic} {payload}")
+
+        camera_name = message.topic.split('/')[-3]
+        command = message.topic.split('/')[-1]
+
+        if payload == 'ON':
+            if not camera_metrics[camera_name]["detection_enabled"].value:
+                logger.info(f"Turning on detection for {camera_name} via mqtt")
+                camera_metrics[camera_name]["detection_enabled"].value = True
+        elif payload == 'OFF':
+            if camera_metrics[camera_name]["detection_enabled"].value:
+                logger.info(f"Turning off detection for {camera_name} via mqtt")
+                camera_metrics[camera_name]["detection_enabled"].value = False
+        else:
+            logger.warning(f"Received unsupported value at {message.topic}: {payload}")
+
+        if command == "set":
+            state_topic = f"{message.topic[:-4]}/state"
+            client.publish(state_topic, payload, retain=True)
 
     def on_connect(client, userdata, flags, rc):
         threading.current_thread().name = "mqtt"
@@ -81,6 +103,7 @@ def create_mqtt_client(config: FrigateConfig):
     for name in config.cameras.keys():
         client.message_callback_add(f"{mqtt_config.topic_prefix}/{name}/clips/#", on_clips_command)
         client.message_callback_add(f"{mqtt_config.topic_prefix}/{name}/snapshots/#", on_snapshots_command)
+        client.message_callback_add(f"{mqtt_config.topic_prefix}/{name}/detection/#", on_detect_command)
 
     if not mqtt_config.user is None:
         client.username_pw_set(mqtt_config.user, password=mqtt_config.password)
@@ -93,5 +116,6 @@ def create_mqtt_client(config: FrigateConfig):
     client.loop_start()
     client.subscribe(f"{mqtt_config.topic_prefix}/+/clips/#")
     client.subscribe(f"{mqtt_config.topic_prefix}/+/snapshots/#")
+    client.subscribe(f"{mqtt_config.topic_prefix}/+/detection/#")
 
     return client

+ 18 - 9
frigate/video.py

@@ -255,6 +255,7 @@ def track_camera(name, config: CameraConfig, model_shape, detection_queue, resul
     listen()
 
     frame_queue = process_info['frame_queue']
+    detection_enabled = process_info['detection_enabled']
 
     frame_shape = config.frame_shape
     objects_to_track = config.objects.track
@@ -268,7 +269,7 @@ def track_camera(name, config: CameraConfig, model_shape, detection_queue, resul
     frame_manager = SharedMemoryFrameManager()
 
     process_frames(name, frame_queue, frame_shape, model_shape, frame_manager, motion_detector, object_detector,
-        object_tracker, detected_objects_queue, process_info, objects_to_track, object_filters, stop_event)
+        object_tracker, detected_objects_queue, process_info, objects_to_track, object_filters, detection_enabled, stop_event)
 
     logger.info(f"{name}: exiting subprocess")
 
@@ -305,7 +306,7 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, model_s
     frame_manager: FrameManager, motion_detector: MotionDetector, 
     object_detector: RemoteObjectDetector, object_tracker: ObjectTracker,
     detected_objects_queue: mp.Queue, process_info: Dict,
-    objects_to_track: List[str], object_filters, stop_event,
+    objects_to_track: List[str], object_filters, detection_enabled: mp.Value, stop_event,
     exit_on_empty: bool = False):
     
     fps = process_info['process_fps']
@@ -336,6 +337,14 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, model_s
             logger.info(f"{camera_name}: frame {frame_time} is not in memory store.")
             continue
 
+        if not detection_enabled.value:
+            fps.value = fps_tracker.eps()
+            object_tracker.match_and_update(frame_time, [])
+            detected_objects_queue.put((camera_name, frame_time, object_tracker.tracked_objects, [], []))
+            detection_fps.value = object_detector.fps.eps()
+            frame_manager.close(f"{camera_name}{frame_time}")
+            continue
+
         # look for motion
         motion_boxes = motion_detector.detect(frame)
 
@@ -410,11 +419,11 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, model_s
 
         # add to the queue if not full
         if(detected_objects_queue.full()):
-          frame_manager.delete(f"{camera_name}{frame_time}")
-          continue
+            frame_manager.delete(f"{camera_name}{frame_time}")
+            continue
         else:
-          fps_tracker.update()
-          fps.value = fps_tracker.eps()
-          detected_objects_queue.put((camera_name, frame_time, object_tracker.tracked_objects, motion_boxes, regions))
-          detection_fps.value = object_detector.fps.eps()
-          frame_manager.close(f"{camera_name}{frame_time}")
+            fps_tracker.update()
+            fps.value = fps_tracker.eps()
+            detected_objects_queue.put((camera_name, frame_time, object_tracker.tracked_objects, motion_boxes, regions))
+            detection_fps.value = object_detector.fps.eps()
+            frame_manager.close(f"{camera_name}{frame_time}")