|
@@ -34,7 +34,9 @@ class TrackedObjectProcessor(threading.Thread):
|
|
|
'best_objects': {},
|
|
|
'object_status': defaultdict(lambda: defaultdict(lambda: 'OFF')),
|
|
|
'tracked_objects': {},
|
|
|
- 'current_frame_time': None
|
|
|
+ 'current_frame_time': None,
|
|
|
+ 'current_frame': np.zeros((720,1280,3), np.uint8),
|
|
|
+ 'object_id': None
|
|
|
})
|
|
|
|
|
|
def get_best(self, camera, label):
|
|
@@ -64,33 +66,40 @@ class TrackedObjectProcessor(threading.Thread):
|
|
|
object_id_hash = hashlib.sha1(str.encode(f"{camera}{frame_time}"))
|
|
|
object_id_bytes = object_id_hash.digest()
|
|
|
object_id = plasma.ObjectID(object_id_bytes)
|
|
|
- current_frame = self.plasma_client.get(object_id)
|
|
|
+ current_frame = self.plasma_client.get(object_id, timeout_ms=0)
|
|
|
|
|
|
- # draw the bounding boxes on the frame
|
|
|
- for obj in tracked_objects.values():
|
|
|
- thickness = 2
|
|
|
- color = COLOR_MAP[obj['label']]
|
|
|
+ if not current_frame is plasma.ObjectNotAvailable:
|
|
|
+ # draw the bounding boxes on the frame
|
|
|
+ for obj in tracked_objects.values():
|
|
|
+ thickness = 2
|
|
|
+ color = COLOR_MAP[obj['label']]
|
|
|
+
|
|
|
+ if obj['frame_time'] != frame_time:
|
|
|
+ thickness = 1
|
|
|
+ color = (255,0,0)
|
|
|
+
|
|
|
+ # draw the bounding boxes on the frame
|
|
|
+ box = obj['box']
|
|
|
+ draw_box_with_label(current_frame, box[0], box[1], box[2], box[3], obj['label'], f"{int(obj['score']*100)}% {int(obj['area'])}", thickness=thickness, color=color)
|
|
|
+ # draw the regions on the frame
|
|
|
+ region = obj['region']
|
|
|
+ cv2.rectangle(current_frame, (region[0], region[1]), (region[2], region[3]), (0,255,0), 1)
|
|
|
|
|
|
- if obj['frame_time'] != frame_time:
|
|
|
- thickness = 1
|
|
|
- color = (255,0,0)
|
|
|
+ if config['snapshots']['show_timestamp']:
|
|
|
+ time_to_show = datetime.datetime.fromtimestamp(frame_time).strftime("%m/%d/%Y %H:%M:%S")
|
|
|
+ cv2.putText(current_frame, time_to_show, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2)
|
|
|
|
|
|
- # draw the bounding boxes on the frame
|
|
|
- box = obj['box']
|
|
|
- draw_box_with_label(current_frame, box[0], box[1], box[2], box[3], obj['label'], f"{int(obj['score']*100)}% {int(obj['area'])}", thickness=thickness, color=color)
|
|
|
- # draw the regions on the frame
|
|
|
- region = obj['region']
|
|
|
- cv2.rectangle(current_frame, (region[0], region[1]), (region[2], region[3]), (0,255,0), 1)
|
|
|
-
|
|
|
- if config['snapshots']['show_timestamp']:
|
|
|
- time_to_show = datetime.datetime.fromtimestamp(frame_time).strftime("%m/%d/%Y %H:%M:%S")
|
|
|
- cv2.putText(current_frame, time_to_show, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2)
|
|
|
+ ###
|
|
|
+ # Set the current frame as ready
|
|
|
+ ###
|
|
|
+ self.camera_data[camera]['current_frame'] = current_frame
|
|
|
+ self.camera_data[camera]['current_frame_time'] = frame_time
|
|
|
|
|
|
- ###
|
|
|
- # Set the current frame as ready
|
|
|
- ###
|
|
|
- self.camera_data[camera]['current_frame'] = current_frame
|
|
|
- self.camera_data[camera]['current_frame_time'] = frame_time
|
|
|
+ # store the object id, so you can delete it at the next loop
|
|
|
+ previous_object_id = self.camera_data[camera]['object_id']
|
|
|
+ if not previous_object_id is None:
|
|
|
+ self.plasma_client.delete([previous_object_id])
|
|
|
+ self.camera_data[camera]['object_id'] = object_id
|
|
|
|
|
|
###
|
|
|
# Maintain the highest scoring recent object and frame for each label
|
|
@@ -104,10 +113,10 @@ class TrackedObjectProcessor(threading.Thread):
|
|
|
# if the object is a higher score than the current best score
|
|
|
# or the current object is more than 1 minute old, use the new object
|
|
|
if obj['score'] > best_objects[obj['label']]['score'] or (now - best_objects[obj['label']]['frame_time']) > 60:
|
|
|
- obj['frame'] = np.copy(current_frame)
|
|
|
+ obj['frame'] = np.copy(self.camera_data[camera]['current_frame'])
|
|
|
best_objects[obj['label']] = obj
|
|
|
else:
|
|
|
- obj['frame'] = np.copy(current_frame)
|
|
|
+ obj['frame'] = np.copy(self.camera_data[camera]['current_frame'])
|
|
|
best_objects[obj['label']] = obj
|
|
|
|
|
|
###
|