process_clip.py 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325
  1. import sys
  2. from typing_extensions import runtime
  3. sys.path.append("/lab/frigate")
  4. import json
  5. import logging
  6. import multiprocessing as mp
  7. import os
  8. import subprocess as sp
  9. import sys
  10. import click
  11. import csv
  12. import cv2
  13. import numpy as np
  14. from frigate.config import FrigateConfig
  15. from frigate.edgetpu import LocalObjectDetector
  16. from frigate.motion import MotionDetector
  17. from frigate.object_processing import CameraState
  18. from frigate.objects import ObjectTracker
  19. from frigate.util import (
  20. EventsPerSecond,
  21. SharedMemoryFrameManager,
  22. draw_box_with_label,
  23. )
  24. from frigate.video import capture_frames, process_frames, start_or_restart_ffmpeg
  25. logging.basicConfig()
  26. logging.root.setLevel(logging.DEBUG)
  27. logger = logging.getLogger(__name__)
  28. def get_frame_shape(source):
  29. ffprobe_cmd = [
  30. "ffprobe",
  31. "-v",
  32. "panic",
  33. "-show_error",
  34. "-show_streams",
  35. "-of",
  36. "json",
  37. source,
  38. ]
  39. p = sp.run(ffprobe_cmd, capture_output=True)
  40. info = json.loads(p.stdout)
  41. video_info = [s for s in info["streams"] if s["codec_type"] == "video"][0]
  42. if video_info["height"] != 0 and video_info["width"] != 0:
  43. return (video_info["height"], video_info["width"], 3)
  44. # fallback to using opencv if ffprobe didnt succeed
  45. video = cv2.VideoCapture(source)
  46. ret, frame = video.read()
  47. frame_shape = frame.shape
  48. video.release()
  49. return frame_shape
  50. class ProcessClip:
  51. def __init__(self, clip_path, frame_shape, config: FrigateConfig):
  52. self.clip_path = clip_path
  53. self.camera_name = "camera"
  54. self.config = config
  55. self.camera_config = self.config.cameras["camera"]
  56. self.frame_shape = self.camera_config.frame_shape
  57. self.ffmpeg_cmd = [
  58. c["cmd"] for c in self.camera_config.ffmpeg_cmds if "detect" in c["roles"]
  59. ][0]
  60. self.frame_manager = SharedMemoryFrameManager()
  61. self.frame_queue = mp.Queue()
  62. self.detected_objects_queue = mp.Queue()
  63. self.camera_state = CameraState(self.camera_name, config, self.frame_manager)
  64. def load_frames(self):
  65. fps = EventsPerSecond()
  66. skipped_fps = EventsPerSecond()
  67. current_frame = mp.Value("d", 0.0)
  68. frame_size = (
  69. self.camera_config.frame_shape_yuv[0]
  70. * self.camera_config.frame_shape_yuv[1]
  71. )
  72. ffmpeg_process = start_or_restart_ffmpeg(
  73. self.ffmpeg_cmd, logger, sp.DEVNULL, frame_size
  74. )
  75. capture_frames(
  76. ffmpeg_process,
  77. self.camera_name,
  78. self.camera_config.frame_shape_yuv,
  79. self.frame_manager,
  80. self.frame_queue,
  81. fps,
  82. skipped_fps,
  83. current_frame,
  84. )
  85. ffmpeg_process.wait()
  86. ffmpeg_process.communicate()
  87. def process_frames(
  88. self, object_detector, objects_to_track=["person"], object_filters={}
  89. ):
  90. mask = np.zeros((self.frame_shape[0], self.frame_shape[1], 1), np.uint8)
  91. mask[:] = 255
  92. motion_detector = MotionDetector(self.frame_shape, self.camera_config.motion)
  93. motion_detector.save_images = False
  94. object_tracker = ObjectTracker(self.camera_config.detect)
  95. process_info = {
  96. "process_fps": mp.Value("d", 0.0),
  97. "detection_fps": mp.Value("d", 0.0),
  98. "detection_frame": mp.Value("d", 0.0),
  99. }
  100. detection_enabled = mp.Value("d", 1)
  101. stop_event = mp.Event()
  102. model_shape = (self.config.model.height, self.config.model.width)
  103. process_frames(
  104. self.camera_name,
  105. self.frame_queue,
  106. self.frame_shape,
  107. model_shape,
  108. self.camera_config.detect,
  109. self.frame_manager,
  110. motion_detector,
  111. object_detector,
  112. object_tracker,
  113. self.detected_objects_queue,
  114. process_info,
  115. objects_to_track,
  116. object_filters,
  117. detection_enabled,
  118. stop_event,
  119. exit_on_empty=True,
  120. )
  121. def stats(self, debug_path=None):
  122. total_regions = 0
  123. total_motion_boxes = 0
  124. object_ids = set()
  125. total_frames = 0
  126. while not self.detected_objects_queue.empty():
  127. (
  128. camera_name,
  129. frame_time,
  130. current_tracked_objects,
  131. motion_boxes,
  132. regions,
  133. ) = self.detected_objects_queue.get()
  134. if debug_path:
  135. self.save_debug_frame(
  136. debug_path, frame_time, current_tracked_objects.values()
  137. )
  138. self.camera_state.update(
  139. frame_time, current_tracked_objects, motion_boxes, regions
  140. )
  141. total_regions += len(regions)
  142. total_motion_boxes += len(motion_boxes)
  143. top_score = 0
  144. for id, obj in self.camera_state.tracked_objects.items():
  145. if not obj.false_positive:
  146. object_ids.add(id)
  147. if obj.top_score > top_score:
  148. top_score = obj.top_score
  149. total_frames += 1
  150. self.frame_manager.delete(self.camera_state.previous_frame_id)
  151. return {
  152. "total_regions": total_regions,
  153. "total_motion_boxes": total_motion_boxes,
  154. "true_positive_objects": len(object_ids),
  155. "total_frames": total_frames,
  156. "top_score": top_score,
  157. }
  158. def save_debug_frame(self, debug_path, frame_time, tracked_objects):
  159. current_frame = cv2.cvtColor(
  160. self.frame_manager.get(
  161. f"{self.camera_name}{frame_time}", self.camera_config.frame_shape_yuv
  162. ),
  163. cv2.COLOR_YUV2BGR_I420,
  164. )
  165. # draw the bounding boxes on the frame
  166. for obj in tracked_objects:
  167. thickness = 2
  168. color = (0, 0, 175)
  169. if obj["frame_time"] != frame_time:
  170. thickness = 1
  171. color = (255, 0, 0)
  172. else:
  173. color = (255, 255, 0)
  174. # draw the bounding boxes on the frame
  175. box = obj["box"]
  176. draw_box_with_label(
  177. current_frame,
  178. box[0],
  179. box[1],
  180. box[2],
  181. box[3],
  182. obj["id"],
  183. f"{int(obj['score']*100)}% {int(obj['area'])}",
  184. thickness=thickness,
  185. color=color,
  186. )
  187. # draw the regions on the frame
  188. region = obj["region"]
  189. draw_box_with_label(
  190. current_frame,
  191. region[0],
  192. region[1],
  193. region[2],
  194. region[3],
  195. "region",
  196. "",
  197. thickness=1,
  198. color=(0, 255, 0),
  199. )
  200. cv2.imwrite(
  201. f"{os.path.join(debug_path, os.path.basename(self.clip_path))}.{int(frame_time*1000000)}.jpg",
  202. current_frame,
  203. )
  204. @click.command()
  205. @click.option("-p", "--path", required=True, help="Path to clip or directory to test.")
  206. @click.option("-l", "--label", default="person", help="Label name to detect.")
  207. @click.option("-o", "--output", default=None, help="File to save csv of data")
  208. @click.option("--debug-path", default=None, help="Path to output frames for debugging.")
  209. def process(path, label, output, debug_path):
  210. clips = []
  211. if os.path.isdir(path):
  212. files = os.listdir(path)
  213. files.sort()
  214. clips = [os.path.join(path, file) for file in files]
  215. elif os.path.isfile(path):
  216. clips.append(path)
  217. json_config = {
  218. "mqtt": {"host": "mqtt"},
  219. "detectors": {"coral": {"type": "edgetpu", "device": "usb"}},
  220. "cameras": {
  221. "camera": {
  222. "ffmpeg": {
  223. "inputs": [
  224. {
  225. "path": "path.mp4",
  226. "global_args": "-hide_banner",
  227. "input_args": "-loglevel info",
  228. "roles": ["detect"],
  229. }
  230. ]
  231. },
  232. "rtmp": {"enabled": False},
  233. "record": {"enabled": False},
  234. }
  235. },
  236. }
  237. object_detector = LocalObjectDetector(labels="/labelmap.txt")
  238. results = []
  239. for c in clips:
  240. logger.info(c)
  241. frame_shape = get_frame_shape(c)
  242. json_config["cameras"]["camera"]["detect"] = {
  243. "height": frame_shape[0],
  244. "width": frame_shape[1],
  245. }
  246. json_config["cameras"]["camera"]["ffmpeg"]["inputs"][0]["path"] = c
  247. frigate_config = FrigateConfig(**json_config)
  248. runtime_config = frigate_config.runtime_config
  249. runtime_config.cameras["camera"].create_ffmpeg_cmds()
  250. process_clip = ProcessClip(c, frame_shape, runtime_config)
  251. process_clip.load_frames()
  252. process_clip.process_frames(object_detector, objects_to_track=[label])
  253. results.append((c, process_clip.stats(debug_path)))
  254. positive_count = sum(
  255. 1 for result in results if result[1]["true_positive_objects"] > 0
  256. )
  257. print(
  258. f"Objects were detected in {positive_count}/{len(results)}({positive_count/len(results)*100:.2f}%) clip(s)."
  259. )
  260. if output:
  261. # now we will open a file for writing
  262. data_file = open(output, "w")
  263. # create the csv writer object
  264. csv_writer = csv.writer(data_file)
  265. # Counter variable used for writing
  266. # headers to the CSV file
  267. count = 0
  268. for result in results:
  269. if count == 0:
  270. # Writing headers of CSV file
  271. header = ["file"] + list(result[1].keys())
  272. csv_writer.writerow(header)
  273. count += 1
  274. # Writing data of CSV file
  275. csv_writer.writerow([result[0]] + list(result[1].values()))
  276. data_file.close()
  277. if __name__ == "__main__":
  278. process()