浏览代码

use yuv420p pixel format for motion

Blake Blackshear 4 年之前
父节点
当前提交
a611cbb942
共有 5 个文件被更改,包括 19 次插入13 次删除
  1. 1 1
      config/config.example.yml
  2. 1 1
      detect_objects.py
  3. 9 6
      frigate/motion.py
  4. 1 1
      frigate/object_processing.py
  5. 7 4
      frigate/video.py

+ 1 - 1
config/config.example.yml

@@ -66,7 +66,7 @@ save_clips:
 #     - -f
 #     - rawvideo
 #     - -pix_fmt
-#     - rgb24
+#     - yuv420p
 
 ####################
 # Global object configuration. Applies to all cameras

+ 1 - 1
detect_objects.py

@@ -55,7 +55,7 @@ FFMPEG_DEFAULT_CONFIG = {
          '-use_wallclock_as_timestamps', '1']),
     'output_args': FFMPEG_CONFIG.get('output_args',
         ['-f', 'rawvideo',
-         '-pix_fmt', 'rgb24'])
+         '-pix_fmt', 'yuv420p'])
 }
 
 GLOBAL_OBJECT_CONFIG = CONFIG.get('objects', {})

+ 9 - 6
frigate/motion.py

@@ -4,6 +4,7 @@ import numpy as np
 
 class MotionDetector():
     def __init__(self, frame_shape, mask, resize_factor=4):
+        self.frame_shape = frame_shape
         self.resize_factor = resize_factor
         self.motion_frame_size = (int(frame_shape[0]/resize_factor), int(frame_shape[1]/resize_factor))
         self.avg_frame = np.zeros(self.motion_frame_size, np.float)
@@ -16,14 +17,16 @@ class MotionDetector():
     def detect(self, frame):
         motion_boxes = []
 
+        gray = frame[0:self.frame_shape[0], 0:self.frame_shape[1]]
+
         # resize frame
-        resized_frame = cv2.resize(frame, dsize=(self.motion_frame_size[1], self.motion_frame_size[0]), interpolation=cv2.INTER_LINEAR)
+        resized_frame = cv2.resize(gray, dsize=(self.motion_frame_size[1], self.motion_frame_size[0]), interpolation=cv2.INTER_LINEAR)
 
         # convert to grayscale
-        gray = cv2.cvtColor(resized_frame, cv2.COLOR_BGR2GRAY)
+        # resized_frame = cv2.cvtColor(resized_frame, cv2.COLOR_BGR2GRAY)
 
         # mask frame
-        gray[self.mask] = [255]
+        resized_frame[self.mask] = [255]
 
         # it takes ~30 frames to establish a baseline
         # dont bother looking for motion
@@ -31,7 +34,7 @@ class MotionDetector():
             self.frame_counter += 1
         else:
             # compare to average
-            frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(self.avg_frame))
+            frameDelta = cv2.absdiff(resized_frame, cv2.convertScaleAbs(self.avg_frame))
 
             # compute the average delta over the past few frames
             # the alpha value can be modified to configure how sensitive the motion detection is.
@@ -70,10 +73,10 @@ class MotionDetector():
             # TODO: this really depends on FPS
             if self.motion_frame_count >= 10:
                 # only average in the current frame if the difference persists for at least 3 frames
-                cv2.accumulateWeighted(gray, self.avg_frame, 0.2)
+                cv2.accumulateWeighted(resized_frame, self.avg_frame, 0.2)
         else:
             # when no motion, just keep averaging the frames together
-            cv2.accumulateWeighted(gray, self.avg_frame, 0.2)
+            cv2.accumulateWeighted(resized_frame, self.avg_frame, 0.2)
             self.motion_frame_count = 0
 
         return motion_boxes

+ 1 - 1
frigate/object_processing.py

@@ -93,7 +93,7 @@ class CameraState():
         # get the new frame and delete the old frame
         frame_id = f"{self.name}{frame_time}"
         with self.current_frame_lock:
-            self._current_frame = self.frame_manager.get(frame_id, self.config['frame_shape'])
+            self._current_frame = self.frame_manager.get(frame_id, (self.config['frame_shape'][0]*3//2, self.config['frame_shape'][1]))
             if not self.previous_frame_id is None:
                 self.frame_manager.delete(self.previous_frame_id)
             self.previous_frame_id = frame_id

+ 7 - 4
frigate/video.py

@@ -120,7 +120,7 @@ def capture_frames(ffmpeg_process, camera_name, frame_shape, frame_manager: Fram
     stop_event: mp.Event, current_frame: mp.Value):
 
     frame_num = 0
-    frame_size = frame_shape[0] * frame_shape[1] * frame_shape[2]
+    frame_size = frame_shape[0] * frame_shape[1] * 3 // 2
     skipped_fps.start()
     while True:
         if stop_event.is_set():
@@ -276,7 +276,7 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape,
         
         current_frame_time.value = frame_time
 
-        frame = frame_manager.get(f"{camera_name}{frame_time}", frame_shape)
+        frame = frame_manager.get(f"{camera_name}{frame_time}", (frame_shape[0]*3//2, frame_shape[1]))
 
         if frame is None:
             print(f"{camera_name}: frame {frame_time} is not in memory store.")
@@ -304,10 +304,13 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape,
         regions = [calculate_region(frame_shape, a[0], a[1], a[2], a[3], 1.0)
             for a in combined_regions]
         
+        if len(regions) > 0:
+            rgb_frame = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420)
+
         # resize regions and detect
         detections = []
         for region in regions:
-            detections.extend(detect(object_detector, frame, region, objects_to_track, object_filters, mask))
+            detections.extend(detect(object_detector, rgb_frame, region, objects_to_track, object_filters, mask))
         
         #########
         # merge objects, check for clipped objects and look again up to 4 times
@@ -340,7 +343,7 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape,
                             box[0], box[1],
                             box[2], box[3])
                         
-                        selected_objects.extend(detect(object_detector, frame, region, objects_to_track, object_filters, mask))
+                        selected_objects.extend(detect(object_detector, rgb_frame, region, objects_to_track, object_filters, mask))
 
                         refining = True
                     else: