Prechádzať zdrojové kódy

allow runtime drawing settings for mjpeg and latest

Blake Blackshear 4 rokov pred
rodič
commit
899d41f361
5 zmenil súbory, kde vykonal 80 pridanie a 26 odobranie
  1. 24 1
      README.md
  2. 20 4
      frigate/http.py
  3. 1 1
      frigate/motion.py
  4. 32 19
      frigate/object_processing.py
  5. 3 1
      frigate/video.py

+ 24 - 1
README.md

@@ -719,7 +719,19 @@ A web server is available on port 5000 with the following endpoints.
 ### `/<camera_name>`
 ### `/<camera_name>`
 An mjpeg stream for debugging. Keep in mind the mjpeg endpoint is for debugging only and will put additional load on the system when in use. 
 An mjpeg stream for debugging. Keep in mind the mjpeg endpoint is for debugging only and will put additional load on the system when in use. 
 
 
-You can access a higher resolution mjpeg stream by appending `h=height-in-pixels` to the endpoint. For example `http://localhost:5000/back?h=1080`. You can also increase the FPS by appending `fps=frame-rate` to the URL such as `http://localhost:5000/back?fps=10` or both with `?fps=10&h=1000`
+Accepts the following query string parameters:
+|param|Type|Description|
+|----|-----|--|
+|`fps`|int|Frame rate|
+|`h`|int|Height in pixels|
+|`bbox`|int|Show bounding boxes for detected objects (0 or 1)|
+|`timestamp`|int|Print the timestamp in the upper left (0 or 1)|
+|`zones`|int|Draw the zones on the image (0 or 1)|
+|`mask`|int|Overlay the mask on the image (0 or 1)|
+|`motion`|int|Draw blue boxes for areas with detected motion (0 or 1)|
+|`regions`|int|Draw green boxes for areas where object detection was run (0 or 1)|
+
+You can access a higher resolution mjpeg stream by appending `h=height-in-pixels` to the endpoint. For example `http://localhost:5000/back?h=1080`. You can also increase the FPS by appending `fps=frame-rate` to the URL such as `http://localhost:5000/back?fps=10` or both with `?fps=10&h=1000`.
 
 
 ### `/<camera_name>/<object_name>/best.jpg[?h=300&crop=1]`
 ### `/<camera_name>/<object_name>/best.jpg[?h=300&crop=1]`
 The best snapshot for any object type. It is a full resolution image by default.
 The best snapshot for any object type. It is a full resolution image by default.
@@ -731,6 +743,17 @@ Example parameters:
 ### `/<camera_name>/latest.jpg[?h=300]`
 ### `/<camera_name>/latest.jpg[?h=300]`
 The most recent frame that frigate has finished processing. It is a full resolution image by default.
 The most recent frame that frigate has finished processing. It is a full resolution image by default.
 
 
+Accepts the following query string parameters:
+|param|Type|Description|
+|----|-----|--|
+|`h`|int|Height in pixels|
+|`bbox`|int|Show bounding boxes for detected objects (0 or 1)|
+|`timestamp`|int|Print the timestamp in the upper left (0 or 1)|
+|`zones`|int|Draw the zones on the image (0 or 1)|
+|`mask`|int|Overlay the mask on the image (0 or 1)|
+|`motion`|int|Draw blue boxes for areas with detected motion (0 or 1)|
+|`regions`|int|Draw green boxes for areas where object detection was run (0 or 1)|
+
 Example parameters:
 Example parameters:
 - `h=300`: resizes the image to 300 pixes tall
 - `h=300`: resizes the image to 300 pixes tall
 
 

+ 20 - 4
frigate/http.py

@@ -208,18 +208,34 @@ def best(camera_name, label):
 def mjpeg_feed(camera_name):
 def mjpeg_feed(camera_name):
     fps = int(request.args.get('fps', '3'))
     fps = int(request.args.get('fps', '3'))
     height = int(request.args.get('h', '360'))
     height = int(request.args.get('h', '360'))
+    draw_options = {
+        'bounding_boxes': request.args.get('bbox', type=int),
+        'timestamp': request.args.get('timestamp', type=int),
+        'zones': request.args.get('zones', type=int),
+        'mask': request.args.get('mask', type=int),
+        'motion_boxes': request.args.get('motion', type=int),
+        'regions': request.args.get('regions', type=int),
+    }
     if camera_name in current_app.frigate_config.cameras:
     if camera_name in current_app.frigate_config.cameras:
         # return a multipart response
         # return a multipart response
-        return Response(imagestream(current_app.detected_frames_processor, camera_name, fps, height),
+        return Response(imagestream(current_app.detected_frames_processor, camera_name, fps, height, draw_options),
                         mimetype='multipart/x-mixed-replace; boundary=frame')
                         mimetype='multipart/x-mixed-replace; boundary=frame')
     else:
     else:
         return "Camera named {} not found".format(camera_name), 404
         return "Camera named {} not found".format(camera_name), 404
 
 
 @bp.route('/<camera_name>/latest.jpg')
 @bp.route('/<camera_name>/latest.jpg')
 def latest_frame(camera_name):
 def latest_frame(camera_name):
+    draw_options = {
+        'bounding_boxes': request.args.get('bbox', type=int),
+        'timestamp': request.args.get('timestamp', type=int),
+        'zones': request.args.get('zones', type=int),
+        'mask': request.args.get('mask', type=int),
+        'motion_boxes': request.args.get('motion', type=int),
+        'regions': request.args.get('regions', type=int),
+    }
     if camera_name in current_app.frigate_config.cameras:
     if camera_name in current_app.frigate_config.cameras:
         # max out at specified FPS
         # max out at specified FPS
-        frame = current_app.detected_frames_processor.get_current_frame(camera_name)
+        frame = current_app.detected_frames_processor.get_current_frame(camera_name, draw_options)
         if frame is None:
         if frame is None:
             frame = np.zeros((720,1280,3), np.uint8)
             frame = np.zeros((720,1280,3), np.uint8)
 
 
@@ -235,11 +251,11 @@ def latest_frame(camera_name):
     else:
     else:
         return "Camera named {} not found".format(camera_name), 404
         return "Camera named {} not found".format(camera_name), 404
         
         
-def imagestream(detected_frames_processor, camera_name, fps, height):
+def imagestream(detected_frames_processor, camera_name, fps, height, draw_options):
     while True:
     while True:
         # max out at specified FPS
         # max out at specified FPS
         time.sleep(1/fps)
         time.sleep(1/fps)
-        frame = detected_frames_processor.get_current_frame(camera_name, draw=True)
+        frame = detected_frames_processor.get_current_frame(camera_name, draw_options)
         if frame is None:
         if frame is None:
             frame = np.zeros((height,int(height*16/9),3), np.uint8)
             frame = np.zeros((height,int(height*16/9),3), np.uint8)
 
 

+ 1 - 1
frigate/motion.py

@@ -70,7 +70,7 @@ class MotionDetector():
                 contour_area = cv2.contourArea(c)
                 contour_area = cv2.contourArea(c)
                 if contour_area > self.config.contour_area:
                 if contour_area > self.config.contour_area:
                     x, y, w, h = cv2.boundingRect(c)
                     x, y, w, h = cv2.boundingRect(c)
-                    motion_boxes.append((x*self.resize_factor, y*self.resize_factor, (x+w)*self.resize_factor, (y+h)*self.resize_factor))
+                    motion_boxes.append((int(x*self.resize_factor), int(y*self.resize_factor), int((x+w)*self.resize_factor), int((y+h)*self.resize_factor)))
         
         
         if len(motion_boxes) > 0:
         if len(motion_boxes) > 0:
             self.motion_frame_count += 1
             self.motion_frame_count += 1

+ 32 - 19
frigate/object_processing.py

@@ -250,15 +250,17 @@ class CameraState():
         self.previous_frame_id = None
         self.previous_frame_id = None
         self.callbacks = defaultdict(lambda: [])
         self.callbacks = defaultdict(lambda: [])
 
 
-    def get_current_frame(self, draw=False):
+    def get_current_frame(self, draw_options={}):
         with self.current_frame_lock:
         with self.current_frame_lock:
             frame_copy = np.copy(self._current_frame)
             frame_copy = np.copy(self._current_frame)
             frame_time = self.current_frame_time
             frame_time = self.current_frame_time
             tracked_objects = {k: v.to_dict() for k,v in self.tracked_objects.items()}
             tracked_objects = {k: v.to_dict() for k,v in self.tracked_objects.items()}
+            motion_boxes = self.motion_boxes.copy()
+            regions = self.regions.copy()
         
         
         frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_YUV2BGR_I420)
         frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_YUV2BGR_I420)
         # draw on the frame
         # draw on the frame
-        if draw:
+        if draw_options.get('bounding_boxes'):
             # draw the bounding boxes on the frame
             # draw the bounding boxes on the frame
             for obj in tracked_objects.values():
             for obj in tracked_objects.values():
                 thickness = 2
                 thickness = 2
@@ -271,19 +273,28 @@ class CameraState():
                 # draw the bounding boxes on the frame
                 # draw the bounding boxes on the frame
                 box = obj['box']
                 box = obj['box']
                 draw_box_with_label(frame_copy, box[0], box[1], box[2], box[3], obj['label'], f"{int(obj['score']*100)}% {int(obj['area'])}", thickness=thickness, color=color)
                 draw_box_with_label(frame_copy, box[0], box[1], box[2], box[3], obj['label'], f"{int(obj['score']*100)}% {int(obj['area'])}", thickness=thickness, color=color)
-                # draw the regions on the frame
-                region = obj['region']
-                cv2.rectangle(frame_copy, (region[0], region[1]), (region[2], region[3]), (0,255,0), 1)
-            
-            if self.camera_config.snapshots.show_timestamp:
-                time_to_show = datetime.datetime.fromtimestamp(frame_time).strftime("%m/%d/%Y %H:%M:%S")
-                cv2.putText(frame_copy, time_to_show, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2)
-
-            if self.camera_config.snapshots.draw_zones:
-                for name, zone in self.camera_config.zones.items():
-                    thickness = 8 if any([name in obj['current_zones'] for obj in tracked_objects.values()]) else 2
-                    cv2.drawContours(frame_copy, [zone.contour], -1, zone.color, thickness)
         
         
+        if draw_options.get('regions'):
+            for region in regions:
+                cv2.rectangle(frame_copy, (region[0], region[1]), (region[2], region[3]), (0,255,0), 2)
+        
+        if draw_options.get('timestamp'):
+            time_to_show = datetime.datetime.fromtimestamp(frame_time).strftime("%m/%d/%Y %H:%M:%S")
+            cv2.putText(frame_copy, time_to_show, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2)
+
+        if draw_options.get('zones'):
+            for name, zone in self.camera_config.zones.items():
+                thickness = 8 if any([name in obj['current_zones'] for obj in tracked_objects.values()]) else 2
+                cv2.drawContours(frame_copy, [zone.contour], -1, zone.color, thickness)
+        
+        if draw_options.get('mask'):
+            mask_overlay = np.where(self.camera_config.mask==[0])
+            frame_copy[mask_overlay] = [0,0,0]
+        
+        if draw_options.get('motion_boxes'):
+            for m_box in motion_boxes:
+                cv2.rectangle(frame_copy, (m_box[0], m_box[1]), (m_box[2], m_box[3]), (0,0,255), 2)
+
         return frame_copy
         return frame_copy
 
 
     def finished(self, obj_id):
     def finished(self, obj_id):
@@ -292,8 +303,10 @@ class CameraState():
     def on(self, event_type: str, callback: Callable[[Dict], None]):
     def on(self, event_type: str, callback: Callable[[Dict], None]):
         self.callbacks[event_type].append(callback)
         self.callbacks[event_type].append(callback)
 
 
-    def update(self, frame_time, current_detections):
+    def update(self, frame_time, current_detections, motion_boxes, regions):
         self.current_frame_time = frame_time
         self.current_frame_time = frame_time
+        self.motion_boxes = motion_boxes
+        self.regions = regions
         # get the new frame
         # get the new frame
         frame_id = f"{self.name}{frame_time}"
         frame_id = f"{self.name}{frame_time}"
         current_frame = self.frame_manager.get(frame_id, self.camera_config.frame_shape_yuv)
         current_frame = self.frame_manager.get(frame_id, self.camera_config.frame_shape_yuv)
@@ -453,8 +466,8 @@ class TrackedObjectProcessor(threading.Thread):
         else:
         else:
             return {}
             return {}
     
     
-    def get_current_frame(self, camera, draw=False):
-        return self.camera_states[camera].get_current_frame(draw)
+    def get_current_frame(self, camera, draw_options={}):
+        return self.camera_states[camera].get_current_frame(draw_options)
 
 
     def run(self):
     def run(self):
         while True:
         while True:
@@ -463,13 +476,13 @@ class TrackedObjectProcessor(threading.Thread):
                 break
                 break
 
 
             try:
             try:
-                camera, frame_time, current_tracked_objects = self.tracked_objects_queue.get(True, 10)
+                camera, frame_time, current_tracked_objects, motion_boxes, regions = self.tracked_objects_queue.get(True, 10)
             except queue.Empty:
             except queue.Empty:
                 continue
                 continue
 
 
             camera_state = self.camera_states[camera]
             camera_state = self.camera_states[camera]
 
 
-            camera_state.update(frame_time, current_tracked_objects)
+            camera_state.update(frame_time, current_tracked_objects, motion_boxes, regions)
 
 
             # update zone counts for each label
             # update zone counts for each label
             # for each zone in the current camera
             # for each zone in the current camera

+ 3 - 1
frigate/video.py

@@ -388,6 +388,8 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, model_s
                         region = calculate_region(frame_shape, 
                         region = calculate_region(frame_shape, 
                             box[0], box[1],
                             box[0], box[1],
                             box[2], box[3])
                             box[2], box[3])
+
+                        regions.append(region)
                         
                         
                         selected_objects.extend(detect(object_detector, frame, model_shape, region, objects_to_track, object_filters, mask))
                         selected_objects.extend(detect(object_detector, frame, model_shape, region, objects_to_track, object_filters, mask))
 
 
@@ -411,6 +413,6 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, model_s
         else:
         else:
           fps_tracker.update()
           fps_tracker.update()
           fps.value = fps_tracker.eps()
           fps.value = fps_tracker.eps()
-          detected_objects_queue.put((camera_name, frame_time, object_tracker.tracked_objects))
+          detected_objects_queue.put((camera_name, frame_time, object_tracker.tracked_objects, motion_boxes, regions))
           detection_fps.value = object_detector.fps.eps()
           detection_fps.value = object_detector.fps.eps()
           frame_manager.close(f"{camera_name}{frame_time}")
           frame_manager.close(f"{camera_name}{frame_time}")