Browse Source

add color map to use different colors for different objects

Blake Blackshear 5 years ago
parent
commit
480175d70f
5 changed files with 41 additions and 33 deletions
  1. 9 8
      Dockerfile
  2. 2 17
      frigate/object_detection.py
  3. 1 2
      frigate/objects.py
  4. 28 4
      frigate/util.py
  5. 1 2
      frigate/video.py

+ 9 - 8
Dockerfile

@@ -53,14 +53,6 @@ RUN apt-get -qq update && apt-get -qq install --no-install-recommends -y \
  libva-drm2 libva2 i965-va-driver vainfo \
  && rm -rf /var/lib/apt/lists/* 
 
-# Install core packages 
-RUN wget -q -O /tmp/get-pip.py --no-check-certificate https://bootstrap.pypa.io/get-pip.py && python3 /tmp/get-pip.py
-RUN  pip install -U pip \
- numpy \
- Flask \
- paho-mqtt \
- PyYAML
-
 # Download & build OpenCV
 # TODO: use multistage build to reduce image size: 
 #   https://medium.com/@denismakogon/pain-and-gain-running-opencv-application-with-golang-and-docker-on-alpine-3-7-435aa11c7aec
@@ -101,6 +93,15 @@ RUN ln -s /coco_labels.txt /label_map.pbtext
 RUN (apt-get autoremove -y; \
      apt-get autoclean -y)
 
+# Install core packages 
+RUN wget -q -O /tmp/get-pip.py --no-check-certificate https://bootstrap.pypa.io/get-pip.py && python3 /tmp/get-pip.py
+RUN  pip install -U pip \
+ numpy \
+ Flask \
+ paho-mqtt \
+ PyYAML \
+ matplotlib
+
 WORKDIR /opt/frigate/
 ADD frigate frigate/
 COPY detect_objects.py .

+ 2 - 17
frigate/object_detection.py

@@ -4,22 +4,7 @@ import cv2
 import threading
 import numpy as np
 from edgetpu.detection.engine import DetectionEngine
-from . util import tonumpyarray
-
-# Path to frozen detection graph. This is the actual model that is used for the object detection.
-PATH_TO_CKPT = '/frozen_inference_graph.pb'
-# List of the strings that is used to add correct label for each box.
-PATH_TO_LABELS = '/label_map.pbtext'
-
-# Function to read labels from text files.
-def ReadLabelFile(file_path):
-    with open(file_path, 'r') as f:
-        lines = f.readlines()
-    ret = {}
-    for line in lines:
-        pair = line.strip().split(maxsplit=1)
-        ret[int(pair[0])] = pair[1].strip()
-    return ret
+from . util import tonumpyarray, LABELS, PATH_TO_CKPT
 
 class PreppedQueueProcessor(threading.Thread):
     def __init__(self, cameras, prepped_frame_queue):
@@ -30,7 +15,7 @@ class PreppedQueueProcessor(threading.Thread):
         
         # Load the edgetpu engine and labels
         self.engine = DetectionEngine(PATH_TO_CKPT)
-        self.labels = ReadLabelFile(PATH_TO_LABELS)
+        self.labels = LABELS
 
     def run(self):
         # process queue...

+ 1 - 2
frigate/objects.py

@@ -73,9 +73,8 @@ class BestFrames(threading.Thread):
                 if obj['frame_time'] in recent_frames:
                     best_frame = recent_frames[obj['frame_time']] #, np.zeros((720,1280,3), np.uint8))
 
-                    label = "{}: {}% {}".format(name,int(obj['score']*100),int(obj['area']))
                     draw_box_with_label(best_frame, obj['xmin'], obj['ymin'], 
-                        obj['xmax'], obj['ymax'], label)
+                        obj['xmax'], obj['ymax'], obj['name'], obj['score'], obj['area'])
                     
                     # print a timestamp
                     time_to_show = datetime.datetime.fromtimestamp(obj['frame_time']).strftime("%m/%d/%Y %H:%M:%S")

+ 28 - 4
frigate/util.py

@@ -1,19 +1,31 @@
 import numpy as np
 import cv2
+import matplotlib.pyplot as plt
+
+# Function to read labels from text files.
+def ReadLabelFile(file_path):
+    with open(file_path, 'r') as f:
+        lines = f.readlines()
+    ret = {}
+    for line in lines:
+        pair = line.strip().split(maxsplit=1)
+        ret[int(pair[0])] = pair[1].strip()
+    return ret
 
 # convert shared memory array into numpy array
 def tonumpyarray(mp_arr):
     return np.frombuffer(mp_arr.get_obj(), dtype=np.uint8)
 
-def draw_box_with_label(frame, x_min, y_min, x_max, y_max, label):
-    color = (255,0,0)
+def draw_box_with_label(frame, x_min, y_min, x_max, y_max, label, score, area):
+    color = COLOR_MAP[label]
+    display_text = "{}: {}% {}".format(label,int(score*100),int(area))
     cv2.rectangle(frame, (x_min, y_min), 
         (x_max, y_max), 
         color, 2)
     font_scale = 0.5
     font = cv2.FONT_HERSHEY_SIMPLEX
     # get the width and height of the text box
-    size = cv2.getTextSize(label, font, fontScale=font_scale, thickness=2)
+    size = cv2.getTextSize(display_text, font, fontScale=font_scale, thickness=2)
     text_width = size[0][0]
     text_height = size[0][1]
     line_height = text_height + size[1]
@@ -23,4 +35,16 @@ def draw_box_with_label(frame, x_min, y_min, x_max, y_max, label):
     # make the coords of the box with a small padding of two pixels
     textbox_coords = ((text_offset_x, text_offset_y), (text_offset_x + text_width + 2, text_offset_y + line_height))
     cv2.rectangle(frame, textbox_coords[0], textbox_coords[1], color, cv2.FILLED)
-    cv2.putText(frame, label, (text_offset_x, text_offset_y + line_height - 3), font, fontScale=font_scale, color=(0, 0, 0), thickness=2)
+    cv2.putText(frame, display_text, (text_offset_x, text_offset_y + line_height - 3), font, fontScale=font_scale, color=(0, 0, 0), thickness=2)
+
+# Path to frozen detection graph. This is the actual model that is used for the object detection.
+PATH_TO_CKPT = '/frozen_inference_graph.pb'
+# List of the strings that is used to add correct label for each box.
+PATH_TO_LABELS = '/label_map.pbtext'
+
+LABELS = ReadLabelFile(PATH_TO_LABELS)
+cmap = plt.cm.get_cmap('tab10', len(LABELS.keys()))
+
+COLOR_MAP = {}
+for key, val in LABELS.items():
+    COLOR_MAP[val] = tuple(int(round(255 * c)) for c in cmap(key)[:3])

+ 1 - 2
frigate/video.py

@@ -318,8 +318,7 @@ class Camera:
 
         # draw the bounding boxes on the screen
         for obj in detected_objects:
-            label = "{}: {}% {}".format(obj['name'],int(obj['score']*100),int(obj['area']))
-            draw_box_with_label(frame, obj['xmin'], obj['ymin'], obj['xmax'], obj['ymax'], label)
+            draw_box_with_label(frame, obj['xmin'], obj['ymin'], obj['xmax'], obj['ymax'], obj['name'], obj['score'], obj['area'])
 
         for region in self.regions:
             color = (255,255,255)