浏览代码

improve box merging and keep tracking

Blake Blackshear 3 年之前
父节点
当前提交
e36099a342
共有 3 个文件被更改,包括 54 次插入33 次删除
  1. 27 0
      frigate/test/test_reduce_boxes.py
  2. 1 1
      frigate/util.py
  3. 26 32
      frigate/video.py

+ 27 - 0
frigate/test/test_reduce_boxes.py

@@ -0,0 +1,27 @@
+import cv2
+import numpy as np
+from unittest import TestCase, main
+from frigate.video import box_overlaps, reduce_boxes
+
+
+class TestBoxOverlaps(TestCase):
+    def test_overlap(self):
+        assert box_overlaps((100, 100, 200, 200), (50, 50, 150, 150))
+
+    def test_overlap_2(self):
+        assert box_overlaps((50, 50, 150, 150), (100, 100, 200, 200))
+
+    def test_no_overlap(self):
+        assert not box_overlaps((100, 100, 200, 200), (250, 250, 350, 350))
+
+
+class TestReduceBoxes(TestCase):
+    def test_cluster(self):
+        clusters = reduce_boxes(
+            [(144, 290, 221, 459), (225, 178, 426, 341), (343, 105, 584, 250)]
+        )
+        assert len(clusters) == 2
+
+
+if __name__ == "__main__":
+    main(verbosity=2)

+ 1 - 1
frigate/util.py

@@ -191,7 +191,7 @@ def draw_box_with_label(
 
 def calculate_region(frame_shape, xmin, ymin, xmax, ymax, multiplier=2):
     # size is the longest edge and divisible by 4
-    size = int(max(xmax - xmin, ymax - ymin) // 4 * 4 * multiplier)
+    size = int((max(xmax - xmin, ymax - ymin) * multiplier) // 4 * 4)
     # dont go any smaller than 300
     if size < 300:
         size = 300

+ 26 - 32
frigate/video.py

@@ -379,26 +379,37 @@ def track_camera(
     logger.info(f"{name}: exiting subprocess")
 
 
+def box_overlaps(b1, b2):
+    if b1[2] < b2[0] or b1[0] > b2[2] or b1[1] > b2[3] or b1[3] < b2[1]:
+        return False
+    return True
+
+
 def reduce_boxes(boxes):
-    if len(boxes) == 0:
-        return []
-    reduced_boxes = cv2.groupRectangles(
-        [list(b) for b in itertools.chain(boxes, boxes)], 1, 0.2
-    )[0]
-    return [tuple(b) for b in reduced_boxes]
+    clusters = []
+
+    for box in boxes:
+        matched = 0
+        for cluster in clusters:
+            if box_overlaps(box, cluster):
+                matched = 1
+                cluster[0] = min(cluster[0], box[0])
+                cluster[1] = min(cluster[1], box[1])
+                cluster[2] = max(cluster[2], box[2])
+                cluster[3] = max(cluster[3], box[3])
+
+        if not matched:
+            clusters.append(list(box))
+
+    return [tuple(c) for c in clusters]
 
 
-# modified from https://stackoverflow.com/a/40795835
 def intersects_any(box_a, boxes):
     for box in boxes:
-        if (
-            box_a[2] < box[0]
-            or box_a[0] > box[2]
-            or box_a[1] > box[3]
-            or box_a[3] < box[1]
-        ):
+        if box_overlaps(box_a, box):
             continue
         return True
+    return False
 
 
 def detect(
@@ -489,9 +500,7 @@ def process_frames(
 
         # only get the tracked object boxes that intersect with motion
         tracked_object_boxes = [
-            obj["box"]
-            for obj in object_tracker.tracked_objects.values()
-            if intersects_any(obj["box"], motion_boxes)
+            obj["box"] for obj in object_tracker.tracked_objects.values()
         ]
 
         # combine motion boxes with known locations of existing objects
@@ -503,15 +512,6 @@ def process_frames(
             for a in combined_boxes
         ]
 
-        # combine overlapping regions
-        combined_regions = reduce_boxes(regions)
-
-        # re-compute regions
-        regions = [
-            calculate_region(frame_shape, a[0], a[1], a[2], a[3], 1.0)
-            for a in combined_regions
-        ]
-
         # resize regions and detect
         detections = []
         for region in regions:
@@ -582,14 +582,8 @@ def process_frames(
             if refining:
                 refine_count += 1
 
-        # Limit to the detections overlapping with motion areas
-        # to avoid picking up stationary background objects
-        detections_with_motion = [
-            d for d in detections if intersects_any(d[2], motion_boxes)
-        ]
-
         # now that we have refined our detections, we need to track objects
-        object_tracker.match_and_update(frame_time, detections_with_motion)
+        object_tracker.match_and_update(frame_time, detections)
 
         # add to the queue if not full
         if detected_objects_queue.full():