Prechádzať zdrojové kódy

add jpg snapshots to disk and clean up config

Blake Blackshear 4 rokov pred
rodič
commit
9dc97d4b6b
3 zmenil súbory, kde vykonal 146 pridanie a 56 odobranie
  1. 32 10
      README.md
  2. 71 21
      frigate/config.py
  3. 43 25
      frigate/object_processing.py

+ 32 - 10
README.md

@@ -34,6 +34,7 @@ Use of a [Google Coral Accelerator](https://coral.ai/products/) is optional, but
 - [Masks](#masks)
 - [Zones](#zones)
 - [Recording Clips (save_clips)](#recording-clips)
+- [Snapshots (snapshots)](#snapshots)
 - [24/7 Recordings (record)](#247-recordings)
 - [RTMP Streams (rtmp)](#rtmp-streams)
 - [Integration with HomeAssistant](#integration-with-homeassistant)
@@ -428,20 +429,34 @@ cameras:
       # Required: Enable the live stream (default: True)
       enabled: True
 
-    # Optional: Configuration for the snapshots in the debug view and mqtt
+    # Optional: Configuration for the jpg snapshots written to the clips directory for each event
     snapshots:
+      # Optional: Enable writing jpg snapshot to /media/frigate/clips (default: shown below)
+      enabled: False
       # Optional: print a timestamp on the snapshots (default: shown below)
-      show_timestamp: True
-      # Optional: draw zones on the debug mjpeg feed (default: shown below)
-      draw_zones: False
-      # Optional: draw bounding boxes on the mqtt snapshots (default: shown below)
-      draw_bounding_boxes: True
-      # Optional: crop the snapshot to the detection region (default: shown below)
-      crop_to_region: True
-      # Optional: height to resize the snapshot to (default: shown below)
-      # NOTE: 175px is optimized for thumbnails in the homeassistant media browser
+      timestamp: False
+      # Optional: draw bounding box on the snapshots (default: shown below)
+      bounding_box: False
+      # Optional: crop the snapshot (default: shown below)
+      crop: False
+      # Optional: height to resize the snapshot to (default: original size)
       height: 175
 
+    # Optional: Configuration for the jpg snapshots published via MQTT
+    mqtt:
+      # Optional: Enable publishing snapshot via mqtt for camera (default: shown below)
+      # NOTE: Only applies to publishing image data to MQTT via 'frigate/<camera_name>/<object_name>/snapshot'. 
+      # All other messages will still be published.
+      enabled: True
+      # Optional: print a timestamp on the snapshots (default: shown below)
+      timestamp: True
+      # Optional: draw bounding box on the snapshots (default: shown below)
+      bounding_box: True
+      # Optional: crop the snapshot (default: shown below)
+      crop: True
+      # Optional: height to resize the snapshot to (default: shown below)
+      height: 270
+
     # Optional: Camera level object filters config. If defined, this is used instead of the global config.
     objects:
       track:
@@ -680,6 +695,10 @@ If you are storing your clips on a network share (SMB, NFS, etc), you may get a
 - `post_capture`: Defines how much time should be included in the clip after the end of the event. Defaults to 5 seconds.
 - `objects`: List of object types to save clips for. Object types here must be listed for tracking at the camera or global configuration. Defaults to all tracked objects.
 
+[Back to top](#documentation)
+
+## Snapshots
+Frigate can save a snapshot image to `/media/frigate/clips` for each event named as `<camera>-<id>.jpg`.
 
 [Back to top](#documentation)
 
@@ -874,6 +893,9 @@ Returns a snapshot for the event id optimized for notifications. Works while the
 ### `/clips/<camera>-<id>.mp4`
 Video clip for the given camera and event id.
 
+### `/clips/<camera>-<id>.jpg`
+JPG snapshot for the given camera and event id.
+
 [Back to top](#documentation)
 
 ## MQTT Topics

+ 71 - 21
frigate/config.py

@@ -192,11 +192,18 @@ CAMERAS_SCHEMA = vol.Schema(vol.All(
                 vol.Required('enabled', default=True): bool,
             },
             vol.Optional('snapshots', default={}): {
-                vol.Optional('show_timestamp', default=True): bool,
-                vol.Optional('draw_zones', default=False): bool,
-                vol.Optional('draw_bounding_boxes', default=True): bool,
-                vol.Optional('crop_to_region', default=True): bool,
-                vol.Optional('height', default=175): int
+                vol.Optional('enabled', default=False): bool,
+                vol.Optional('timestamp', default=False): bool,
+                vol.Optional('bounding_box', default=False): bool,
+                vol.Optional('crop', default=False): bool,
+                'height': int
+            },
+            vol.Optional('mqtt', default={}): {
+                vol.Optional('enabled', default=True): bool,
+                vol.Optional('timestamp', default=True): bool,
+                vol.Optional('bounding_box', default=True): bool,
+                vol.Optional('crop', default=True): bool,
+                vol.Optional('height', default=270): int
             },
             'objects': OBJECTS_SCHEMA,
             vol.Optional('motion', default={}): MOTION_SCHEMA,
@@ -510,27 +517,27 @@ class ObjectConfig():
 
 class CameraSnapshotsConfig():
     def __init__(self, config):
-        self._show_timestamp = config['show_timestamp']
-        self._draw_zones = config['draw_zones']
-        self._draw_bounding_boxes = config['draw_bounding_boxes']
-        self._crop_to_region = config['crop_to_region']
+        self._enabled = config['enabled']
+        self._timestamp = config['timestamp']
+        self._bounding_box = config['bounding_box']
+        self._crop = config['crop']
         self._height = config.get('height')
     
     @property
-    def show_timestamp(self):
-        return self._show_timestamp
+    def enabled(self):
+        return self._enabled
     
     @property
-    def draw_zones(self):
-        return self._draw_zones
+    def timestamp(self):
+        return self._timestamp
 
     @property
-    def draw_bounding_boxes(self):
-        return self._draw_bounding_boxes
+    def bounding_box(self):
+        return self._bounding_box
 
     @property
-    def crop_to_region(self):
-        return self._crop_to_region
+    def crop(self):
+        return self._crop
 
     @property
     def height(self):
@@ -538,10 +545,47 @@ class CameraSnapshotsConfig():
     
     def to_dict(self):
         return {
-            'show_timestamp': self.show_timestamp,
-            'draw_zones': self.draw_zones,
-            'draw_bounding_boxes': self.draw_bounding_boxes,
-            'crop_to_region': self.crop_to_region,
+            'enabled': self.enabled,
+            'timestamp': self.timestamp,
+            'bounding_box': self.bounding_box,
+            'crop': self.crop,
+            'height': self.height
+        }
+
+class CameraMqttConfig():
+    def __init__(self, config):
+        self._enabled = config['enabled']
+        self._timestamp = config['timestamp']
+        self._bounding_box = config['bounding_box']
+        self._crop = config['crop']
+        self._height = config.get('height')
+    
+    @property
+    def enabled(self):
+        return self._enabled
+    
+    @property
+    def timestamp(self):
+        return self._timestamp
+
+    @property
+    def bounding_box(self):
+        return self._bounding_box
+
+    @property
+    def crop(self):
+        return self._crop
+
+    @property
+    def height(self):
+        return self._height
+    
+    def to_dict(self):
+        return {
+            'enabled': self.enabled,
+            'timestamp': self.timestamp,
+            'bounding_box': self.bounding_box,
+            'crop': self.crop,
             'height': self.height
         }
 
@@ -708,6 +752,7 @@ class CameraConfig():
         self._record = RecordConfig(global_config['record'], config['record'])
         self._rtmp = CameraRtmpConfig(global_config, config['rtmp'])
         self._snapshots = CameraSnapshotsConfig(config['snapshots'])
+        self._mqtt = CameraMqttConfig(config['mqtt'])
         self._objects = ObjectConfig(global_config['objects'], config.get('objects', {}))
         self._motion = MotionConfig(global_config['motion'], config['motion'], self._height)
         self._detect = DetectConfig(global_config['detect'], config['detect'], config.get('fps', 5))
@@ -842,6 +887,10 @@ class CameraConfig():
     def snapshots(self):
         return self._snapshots
     
+    @property
+    def mqtt(self):
+        return self._mqtt
+    
     @property
     def objects(self):
         return self._objects
@@ -878,6 +927,7 @@ class CameraConfig():
             'record': self.record.to_dict(),
             'rtmp': self.rtmp.to_dict(),
             'snapshots': self.snapshots.to_dict(),
+            'mqtt': self.mqtt.to_dict(),
             'objects': self.objects.to_dict(),
             'motion': self.motion.to_dict(),
             'detect': self.detect.to_dict(),

+ 43 - 25
frigate/object_processing.py

@@ -74,9 +74,6 @@ class TrackedObject():
         self.thumbnail_data = None
         self.frame = None
         self.previous = self.to_dict()
-        self._snapshot_jpg_time = 0
-        ret, jpg = cv2.imencode('.jpg', np.zeros((300,300,3), np.uint8))
-        self._snapshot_jpg = jpg.tobytes()
 
         # start the score history
         self.score_history = [self.obj_data['score']]
@@ -167,41 +164,43 @@ class TrackedObject():
             'region': self.obj_data['region'],
             'current_zones': self.current_zones.copy(),
             'entered_zones': list(self.entered_zones).copy(),
-            'thumbnail': base64.b64encode(self.get_jpg_bytes()).decode('utf-8') if include_thumbnail else None
+            'thumbnail': base64.b64encode(self.get_thumbnail()).decode('utf-8') if include_thumbnail else None
         }
-
-    def get_jpg_bytes(self):
-        if self.thumbnail_data is None or self._snapshot_jpg_time == self.thumbnail_data['frame_time']:
-            return self._snapshot_jpg
-
         if not self.thumbnail_data['frame_time'] in self.frame_cache:
             logger.error(f"Unable to create thumbnail for {self.obj_data['id']}")
             logger.error(f"Looking for frame_time of {self.thumbnail_data['frame_time']}")
             logger.error(f"Thumbnail frames: {','.join([str(k) for k in self.frame_cache.keys()])}")
-            return self._snapshot_jpg
+            ret, jpg = cv2.imencode('.jpg', np.zeros((175,175,3), np.uint8))
 
-        # TODO: crop first to avoid converting the entire frame?
-        snapshot_config = self.camera_config.snapshots
-        best_frame = cv2.cvtColor(self.frame_cache[self.thumbnail_data['frame_time']], cv2.COLOR_YUV2BGR_I420)
+        jpg_bytes = self.get_jpg_bytes(timestamp=False, bounding_box=False, crop=True, height=175)
 
-        if snapshot_config.draw_bounding_boxes:
+        if jpg_bytes:
+            return jpg_bytes
+        else:
+            ret, jpg = cv2.imencode('.jpg', np.zeros((175,175,3), np.uint8))
+            return jpg.tobytes()
+    
+    def get_jpg_bytes(self, timestamp=False, bounding_box=False, crop=False, height=None):
+        best_frame = cv2.cvtColor(self.frame_cache[self.thumbnail_data['frame_time']], cv2.COLOR_YUV2BGR_I420)
+ 
+        if bounding_box:
             thickness = 2
             color = COLOR_MAP[self.obj_data['label']]
+
+            # draw the bounding boxes on the frame
             box = self.thumbnail_data['box']
-            draw_box_with_label(best_frame, box[0], box[1], box[2], box[3], self.obj_data['label'],
-                f"{int(self.thumbnail_data['score']*100)}% {int(self.thumbnail_data['area'])}", thickness=thickness, color=color)
+            draw_box_with_label(best_frame, box[0], box[1], box[2], box[3], self.obj_data['label'], f"{int(self.thumbnail_data['score']*100)}% {int(self.thumbnail_data['area'])}", thickness=thickness, color=color)
 
-        if snapshot_config.crop_to_region:
+        if crop:
             box = self.thumbnail_data['box']
             region = calculate_region(best_frame.shape, box[0], box[1], box[2], box[3], 1.1)
             best_frame = best_frame[region[1]:region[3], region[0]:region[2]]
 
-        if snapshot_config.height:
-            height = snapshot_config.height
+        if height:
             width = int(height*best_frame.shape[1]/best_frame.shape[0])
             best_frame = cv2.resize(best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
 
-        if snapshot_config.show_timestamp:
+        if timestamp:
             time_to_show = datetime.datetime.fromtimestamp(self.thumbnail_data['frame_time']).strftime("%m/%d/%Y %H:%M:%S")
             size = cv2.getTextSize(time_to_show, cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, thickness=2)
             text_width = size[0][0]
@@ -212,9 +211,9 @@ class TrackedObject():
 
         ret, jpg = cv2.imencode('.jpg', best_frame)
         if ret:
-            self._snapshot_jpg = jpg.tobytes()
-
-        return self._snapshot_jpg
+            return jpg.tobytes()
+        else:
+            return None
 
 def zone_filtered(obj: TrackedObject, object_config):
     object_name = obj.obj_data['label']
@@ -432,14 +431,33 @@ class TrackedObjectProcessor(threading.Thread):
             obj.previous = after
 
         def end(camera, obj: TrackedObject, current_frame_time):
+            snapshot_config = self.config.cameras[camera].snapshots
             if not obj.false_positive:
                 message = { 'before': obj.previous, 'after': obj.to_dict() }
                 self.client.publish(f"{self.topic_prefix}/events", json.dumps(message), retain=False)
+                # write snapshot to disk if enabled
+                if snapshot_config.enabled:
+                    jpg_bytes = obj.get_jpg_bytes(
+                        timestamp=snapshot_config.timestamp,
+                        bounding_box=snapshot_config.bounding_box,
+                        crop=snapshot_config.crop,
+                        height=snapshot_config.height
+                    )
+                    with open(os.path.join(CLIPS_DIR, f"{camera}-{obj.obj_data['id']}.jpg"), 'wb') as j:
+                        j.write(jpg_bytes)
             self.event_queue.put(('end', camera, obj.to_dict(include_thumbnail=True)))
 
         def snapshot(camera, obj: TrackedObject, current_frame_time):
-            self.client.publish(f"{self.topic_prefix}/{camera}/{obj.obj_data['label']}/snapshot", obj.get_jpg_bytes(), retain=True)
-
+            mqtt_config = self.config.cameras[camera].mqtt
+            if mqtt_config.enabled:
+                jpg_bytes = obj.get_jpg_bytes(
+                    timestamp=mqtt_config.timestamp,
+                    bounding_box=mqtt_config.bounding_box,
+                    crop=mqtt_config.crop,
+                    height=mqtt_config.height
+                )
+                self.client.publish(f"{self.topic_prefix}/{camera}/{obj.obj_data['label']}/snapshot", jpg_bytes, retain=True)
+        
         def object_status(camera, object_name, status):
             self.client.publish(f"{self.topic_prefix}/{camera}/{object_name}", status, retain=False)