process_clip.py 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145
  1. import sys
  2. import click
  3. import os
  4. import datetime
  5. from unittest import TestCase, main
  6. from frigate.video import process_frames, start_or_restart_ffmpeg, capture_frames, get_frame_shape
  7. from frigate.util import DictFrameManager, EventsPerSecond, draw_box_with_label
  8. from frigate.motion import MotionDetector
  9. from frigate.edgetpu import LocalObjectDetector
  10. from frigate.objects import ObjectTracker
  11. import multiprocessing as mp
  12. import numpy as np
  13. import cv2
  14. from frigate.object_processing import COLOR_MAP, CameraState
  15. class ProcessClip():
  16. def __init__(self, clip_path, frame_shape, config):
  17. self.clip_path = clip_path
  18. self.frame_shape = frame_shape
  19. self.camera_name = 'camera'
  20. self.frame_manager = DictFrameManager()
  21. self.frame_queue = mp.Queue()
  22. self.detected_objects_queue = mp.Queue()
  23. self.camera_state = CameraState(self.camera_name, config, self.frame_manager)
  24. def load_frames(self):
  25. fps = EventsPerSecond()
  26. skipped_fps = EventsPerSecond()
  27. stop_event = mp.Event()
  28. detection_frame = mp.Value('d', datetime.datetime.now().timestamp()+100000)
  29. current_frame = mp.Value('d', 0.0)
  30. ffmpeg_cmd = f"ffmpeg -hide_banner -loglevel panic -i {self.clip_path} -f rawvideo -pix_fmt rgb24 pipe:".split(" ")
  31. ffmpeg_process = start_or_restart_ffmpeg(ffmpeg_cmd, self.frame_shape[0]*self.frame_shape[1]*self.frame_shape[2])
  32. capture_frames(ffmpeg_process, self.camera_name, self.frame_shape, self.frame_manager, self.frame_queue, 1, fps, skipped_fps, stop_event, detection_frame, current_frame)
  33. ffmpeg_process.wait()
  34. ffmpeg_process.communicate()
  35. def process_frames(self, objects_to_track=['person'], object_filters={}):
  36. mask = np.zeros((self.frame_shape[0], self.frame_shape[1], 1), np.uint8)
  37. mask[:] = 255
  38. motion_detector = MotionDetector(self.frame_shape, mask)
  39. object_detector = LocalObjectDetector(labels='/labelmap.txt')
  40. object_tracker = ObjectTracker(10)
  41. process_fps = EventsPerSecond()
  42. current_frame = mp.Value('d', 0.0)
  43. stop_event = mp.Event()
  44. process_frames(self.camera_name, self.frame_queue, self.frame_shape, self.frame_manager, motion_detector, object_detector, object_tracker, self.detected_objects_queue,
  45. process_fps, current_frame, objects_to_track, object_filters, mask, stop_event, exit_on_empty=True)
  46. def objects_found(self, debug_path=None):
  47. obj_detected = False
  48. top_computed_score = 0.0
  49. def handle_event(name, obj):
  50. nonlocal obj_detected
  51. nonlocal top_computed_score
  52. if obj['computed_score'] > top_computed_score:
  53. top_computed_score = obj['computed_score']
  54. if not obj['false_positive']:
  55. obj_detected = True
  56. self.camera_state.on('new', handle_event)
  57. self.camera_state.on('update', handle_event)
  58. while(not self.detected_objects_queue.empty()):
  59. camera_name, frame_time, current_tracked_objects = self.detected_objects_queue.get()
  60. if not debug_path is None:
  61. self.save_debug_frame(debug_path, frame_time, current_tracked_objects.values())
  62. self.camera_state.update(frame_time, current_tracked_objects)
  63. return {
  64. 'object_detected': obj_detected,
  65. 'top_score': top_computed_score
  66. }
  67. def save_debug_frame(self, debug_path, frame_time, tracked_objects):
  68. current_frame = self.frame_manager.get(f"{self.camera_name}{frame_time}")
  69. # draw the bounding boxes on the frame
  70. for obj in tracked_objects:
  71. thickness = 2
  72. color = (0,0,175)
  73. if obj['frame_time'] != frame_time:
  74. thickness = 1
  75. color = (255,0,0)
  76. else:
  77. color = (255,255,0)
  78. # draw the bounding boxes on the frame
  79. box = obj['box']
  80. draw_box_with_label(current_frame, box[0], box[1], box[2], box[3], obj['label'], f"{int(obj['score']*100)}% {int(obj['area'])}", thickness=thickness, color=color)
  81. # draw the regions on the frame
  82. region = obj['region']
  83. draw_box_with_label(current_frame, region[0], region[1], region[2], region[3], 'region', "", thickness=1, color=(0,255,0))
  84. cv2.imwrite(f"{os.path.join(debug_path, os.path.basename(self.clip_path))}.{int(frame_time*1000000)}.jpg", cv2.cvtColor(current_frame, cv2.COLOR_RGB2BGR))
  85. @click.command()
  86. @click.option("-p", "--path", required=True, help="Path to clip or directory to test.")
  87. @click.option("-l", "--label", default='person', help="Label name to detect.")
  88. @click.option("-t", "--threshold", default=0.85, help="Threshold value for objects.")
  89. @click.option("--debug-path", default=None, help="Path to output frames for debugging.")
  90. def process(path, label, threshold, debug_path):
  91. clips = []
  92. if os.path.isdir(path):
  93. files = os.listdir(path)
  94. files.sort()
  95. clips = [os.path.join(path, file) for file in files]
  96. elif os.path.isfile(path):
  97. clips.append(path)
  98. config = {
  99. 'snapshots': {
  100. 'show_timestamp': False,
  101. 'draw_zones': False
  102. },
  103. 'zones': {},
  104. 'objects': {
  105. 'track': [label],
  106. 'filters': {
  107. 'person': {
  108. 'threshold': threshold
  109. }
  110. }
  111. }
  112. }
  113. results = []
  114. for c in clips:
  115. frame_shape = get_frame_shape(c)
  116. process_clip = ProcessClip(c, frame_shape, config)
  117. process_clip.load_frames()
  118. process_clip.process_frames(objects_to_track=config['objects']['track'])
  119. results.append((c, process_clip.objects_found(debug_path)))
  120. for result in results:
  121. print(f"{result[0]}: {result[1]}")
  122. positive_count = sum(1 for result in results if result[1]['object_detected'])
  123. print(f"Objects were detected in {positive_count}/{len(results)}({positive_count/len(results)*100:.2f}%) clip(s).")
  124. if __name__ == '__main__':
  125. process()