浏览代码

improve frame memory management

Blake Blackshear 4 年之前
父节点
当前提交
35ba5e2f7c
共有 3 个文件被更改,包括 29 次插入25 次删除
  1. 2 2
      README.md
  2. 1 1
      detect_objects.py
  3. 26 22
      frigate/video.py

+ 2 - 2
README.md

@@ -127,11 +127,11 @@ lxc.cap.drop:
 ```
 
 ### Calculating shm-size
-The default shm-size of 64m should be fine for most setups. If you start seeing segfault errors, it could be because you have too many high resolution cameras and you need to specify a higher shm size.
+The default shm-size of 64m is fine for setups with 3 or less 1080p cameras. If frigate is exiting with "Bus error" messages, it could be because you have too many high resolution cameras and you need to specify a higher shm size.
 
 You can calculate the necessary shm-size for each camera with the following formula:
 ```
-(width * height * 3 + 270480)/1048576 = <shm size in mb>
+(width * height * 1.5 * 7 + 270480)/1048576 = <shm size in mb>
 ```
 [Back to top](#documentation)
 

+ 1 - 1
detect_objects.py

@@ -185,7 +185,7 @@ def main():
         config['zones'] = config.get('zones', {})
 
     # Queue for cameras to push tracked objects to
-    tracked_objects_queue = mp.Queue()
+    tracked_objects_queue = mp.Queue(maxsize=len(CONFIG['cameras'].keys())*2)
 
     # Queue for clip processing
     event_queue = mp.Queue()

+ 26 - 22
frigate/video.py

@@ -127,34 +127,37 @@ def capture_frames(ffmpeg_process, camera_name, frame_shape, frame_manager: Fram
             print(f"{camera_name}: stop event set. exiting capture thread...")
             break
 
-        frame_bytes = ffmpeg_process.stdout.read(frame_size)
         current_frame.value = datetime.datetime.now().timestamp()
+        frame_name = f"{camera_name}{current_frame.value}"
+        frame_buffer = frame_manager.create(frame_name, frame_size)
+        try:
+          frame_buffer[:] = ffmpeg_process.stdout.read(frame_size)
+        except:
+          print(f"{camera_name}: ffmpeg sent a broken frame. something is wrong.")
 
-        if len(frame_bytes) < frame_size:
-            print(f"{camera_name}: ffmpeg sent a broken frame. something is wrong.")
-
-            if ffmpeg_process.poll() != None:
-                print(f"{camera_name}: ffmpeg process is not running. exiting capture thread...")
-                break
-            else:
-                continue
+          if ffmpeg_process.poll() != None:
+              print(f"{camera_name}: ffmpeg process is not running. exiting capture thread...")
+              frame_manager.delete(frame_name)
+              break
+          
+          continue
 
         fps.update()
 
         frame_num += 1
         if (frame_num % take_frame) != 0:
             skipped_fps.update()
+            frame_manager.delete(frame_name)
             continue
 
         # if the queue is full, skip this frame
         if frame_queue.full():
             skipped_fps.update()
+            frame_manager.delete(frame_name)
             continue
 
-        # put the frame in the frame manager
-        frame_buffer = frame_manager.create(f"{camera_name}{current_frame.value}", frame_size)
-        frame_buffer[:] = frame_bytes[:]
-        frame_manager.close(f"{camera_name}{current_frame.value}")
+        # close the frame
+        frame_manager.close(frame_name)
 
         # add to the queue
         frame_queue.put(current_frame.value)
@@ -281,9 +284,6 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape,
         if frame is None:
             print(f"{camera_name}: frame {frame_time} is not in memory store.")
             continue
-        
-        fps_tracker.update()
-        fps.value = fps_tracker.eps()
 
         # look for motion
         motion_boxes = motion_detector.detect(frame)
@@ -355,9 +355,13 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape,
         # now that we have refined our detections, we need to track objects
         object_tracker.match_and_update(frame_time, detections)
 
-        # add to the queue
-        detected_objects_queue.put((camera_name, frame_time, object_tracker.tracked_objects))
-
-        detection_fps.value = object_detector.fps.eps()
-
-        frame_manager.close(f"{camera_name}{frame_time}")
+        # add to the queue if not full
+        if(detected_objects_queue.full()):
+          frame_manager.delete(f"{camera_name}{frame_time}")
+          continue
+        else:
+          fps_tracker.update()
+          fps.value = fps_tracker.eps()
+          detected_objects_queue.put((camera_name, frame_time, object_tracker.tracked_objects))
+          detection_fps.value = object_detector.fps.eps()
+          frame_manager.close(f"{camera_name}{frame_time}")