start_no_thread.py 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619
  1. import datetime
  2. import time
  3. import threading
  4. import queue
  5. import itertools
  6. from collections import defaultdict
  7. import cv2
  8. import imutils
  9. import numpy as np
  10. from scipy.spatial import distance as dist
  11. import tflite_runtime.interpreter as tflite
  12. from tflite_runtime.interpreter import load_delegate
  13. def load_labels(path, encoding='utf-8'):
  14. """Loads labels from file (with or without index numbers).
  15. Args:
  16. path: path to label file.
  17. encoding: label file encoding.
  18. Returns:
  19. Dictionary mapping indices to labels.
  20. """
  21. with open(path, 'r', encoding=encoding) as f:
  22. lines = f.readlines()
  23. if not lines:
  24. return {}
  25. if lines[0].split(' ', maxsplit=1)[0].isdigit():
  26. pairs = [line.split(' ', maxsplit=1) for line in lines]
  27. return {int(index): label.strip() for index, label in pairs}
  28. else:
  29. return {index: line.strip() for index, line in enumerate(lines)}
  30. def draw_box_with_label(frame, x_min, y_min, x_max, y_max, label, info, thickness=2, color=None, position='ul'):
  31. if color is None:
  32. color = (0,0,255)
  33. display_text = "{}: {}".format(label, info)
  34. cv2.rectangle(frame, (x_min, y_min), (x_max, y_max), color, thickness)
  35. font_scale = 0.5
  36. font = cv2.FONT_HERSHEY_SIMPLEX
  37. # get the width and height of the text box
  38. size = cv2.getTextSize(display_text, font, fontScale=font_scale, thickness=2)
  39. text_width = size[0][0]
  40. text_height = size[0][1]
  41. line_height = text_height + size[1]
  42. # set the text start position
  43. if position == 'ul':
  44. text_offset_x = x_min
  45. text_offset_y = 0 if y_min < line_height else y_min - (line_height+8)
  46. elif position == 'ur':
  47. text_offset_x = x_max - (text_width+8)
  48. text_offset_y = 0 if y_min < line_height else y_min - (line_height+8)
  49. elif position == 'bl':
  50. text_offset_x = x_min
  51. text_offset_y = y_max
  52. elif position == 'br':
  53. text_offset_x = x_max - (text_width+8)
  54. text_offset_y = y_max
  55. # make the coords of the box with a small padding of two pixels
  56. textbox_coords = ((text_offset_x, text_offset_y), (text_offset_x + text_width + 2, text_offset_y + line_height))
  57. cv2.rectangle(frame, textbox_coords[0], textbox_coords[1], color, cv2.FILLED)
  58. cv2.putText(frame, display_text, (text_offset_x, text_offset_y + line_height - 3), font, fontScale=font_scale, color=(0, 0, 0), thickness=2)
  59. def calculate_region(frame_shape, xmin, ymin, xmax, ymax, multiplier=2):
  60. # size is larger than longest edge
  61. size = int(max(xmax-xmin, ymax-ymin)*multiplier)
  62. # if the size is too big to fit in the frame
  63. if size > min(frame_shape[0], frame_shape[1]):
  64. size = min(frame_shape[0], frame_shape[1])
  65. # x_offset is midpoint of bounding box minus half the size
  66. x_offset = int((xmax-xmin)/2.0+xmin-size/2.0)
  67. # if outside the image
  68. if x_offset < 0:
  69. x_offset = 0
  70. elif x_offset > (frame_shape[1]-size):
  71. x_offset = (frame_shape[1]-size)
  72. # y_offset is midpoint of bounding box minus half the size
  73. y_offset = int((ymax-ymin)/2.0+ymin-size/2.0)
  74. # if outside the image
  75. if y_offset < 0:
  76. y_offset = 0
  77. elif y_offset > (frame_shape[0]-size):
  78. y_offset = (frame_shape[0]-size)
  79. return (x_offset, y_offset, x_offset+size, y_offset+size)
  80. def intersection(box_a, box_b):
  81. return (
  82. max(box_a[0], box_b[0]),
  83. max(box_a[1], box_b[1]),
  84. min(box_a[2], box_b[2]),
  85. min(box_a[3], box_b[3])
  86. )
  87. def area(box):
  88. return (box[2]-box[0] + 1)*(box[3]-box[1] + 1)
  89. def intersection_over_union(box_a, box_b):
  90. # determine the (x, y)-coordinates of the intersection rectangle
  91. intersect = intersection(box_a, box_b)
  92. # compute the area of intersection rectangle
  93. inter_area = max(0, intersect[2] - intersect[0] + 1) * max(0, intersect[3] - intersect[1] + 1)
  94. if inter_area == 0:
  95. return 0.0
  96. # compute the area of both the prediction and ground-truth
  97. # rectangles
  98. box_a_area = (box_a[2] - box_a[0] + 1) * (box_a[3] - box_a[1] + 1)
  99. box_b_area = (box_b[2] - box_b[0] + 1) * (box_b[3] - box_b[1] + 1)
  100. # compute the intersection over union by taking the intersection
  101. # area and dividing it by the sum of prediction + ground-truth
  102. # areas - the interesection area
  103. iou = inter_area / float(box_a_area + box_b_area - inter_area)
  104. # return the intersection over union value
  105. return iou
  106. def clipped(obj, frame_shape):
  107. # if the object is within 5 pixels of the region border, and the region is not on the edge
  108. # consider the object to be clipped
  109. box = obj[2]
  110. region = obj[3]
  111. if ((region[0] > 5 and box[0]-region[0] <= 5) or
  112. (region[1] > 5 and box[1]-region[1] <= 5) or
  113. (frame_shape[1]-region[2] > 5 and region[2]-box[2] <= 5) or
  114. (frame_shape[0]-region[3] > 5 and region[3]-box[3] <= 5)):
  115. return True
  116. else:
  117. return False
  118. def filtered(obj):
  119. if obj[0] != 'person':
  120. return True
  121. return False
  122. def create_tensor_input(frame, region):
  123. cropped_frame = frame[region[1]:region[3], region[0]:region[2]]
  124. # Resize to 300x300 if needed
  125. if cropped_frame.shape != (300, 300, 3):
  126. # TODO: use Pillow-SIMD?
  127. cropped_frame = cv2.resize(cropped_frame, dsize=(300, 300), interpolation=cv2.INTER_LINEAR)
  128. # Expand dimensions since the model expects images to have shape: [1, 300, 300, 3]
  129. return np.expand_dims(cropped_frame, axis=0)
  130. class MotionDetector():
  131. # TODO: add motion masking
  132. def __init__(self, frame_shape, resize_factor=4):
  133. self.resize_factor = resize_factor
  134. self.motion_frame_size = (int(frame_shape[0]/resize_factor), int(frame_shape[1]/resize_factor))
  135. self.avg_frame = np.zeros(self.motion_frame_size, np.float)
  136. self.avg_delta = np.zeros(self.motion_frame_size, np.float)
  137. self.motion_frame_count = 0
  138. self.frame_counter = 0
  139. def detect(self, frame):
  140. motion_boxes = []
  141. # resize frame
  142. resized_frame = cv2.resize(frame, dsize=(self.motion_frame_size[1], self.motion_frame_size[0]), interpolation=cv2.INTER_LINEAR)
  143. # convert to grayscale
  144. gray = cv2.cvtColor(resized_frame, cv2.COLOR_BGR2GRAY)
  145. # it takes ~30 frames to establish a baseline
  146. # dont bother looking for motion
  147. if self.frame_counter < 30:
  148. self.frame_counter += 1
  149. else:
  150. # compare to average
  151. frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(self.avg_frame))
  152. # compute the average delta over the past few frames
  153. # the alpha value can be modified to configure how sensitive the motion detection is.
  154. # higher values mean the current frame impacts the delta a lot, and a single raindrop may
  155. # register as motion, too low and a fast moving person wont be detected as motion
  156. # this also assumes that a person is in the same location across more than a single frame
  157. cv2.accumulateWeighted(frameDelta, self.avg_delta, 0.2)
  158. # compute the threshold image for the current frame
  159. current_thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
  160. # black out everything in the avg_delta where there isnt motion in the current frame
  161. avg_delta_image = cv2.convertScaleAbs(self.avg_delta)
  162. avg_delta_image[np.where(current_thresh==[0])] = [0]
  163. # then look for deltas above the threshold, but only in areas where there is a delta
  164. # in the current frame. this prevents deltas from previous frames from being included
  165. thresh = cv2.threshold(avg_delta_image, 25, 255, cv2.THRESH_BINARY)[1]
  166. # dilate the thresholded image to fill in holes, then find contours
  167. # on thresholded image
  168. thresh = cv2.dilate(thresh, None, iterations=2)
  169. cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
  170. cnts = imutils.grab_contours(cnts)
  171. # loop over the contours
  172. for c in cnts:
  173. # if the contour is big enough, count it as motion
  174. contour_area = cv2.contourArea(c)
  175. if contour_area > 100:
  176. # cv2.drawContours(resized_frame, [c], -1, (255,255,255), 2)
  177. x, y, w, h = cv2.boundingRect(c)
  178. motion_boxes.append((x*self.resize_factor, y*self.resize_factor, (x+w)*self.resize_factor, (y+h)*self.resize_factor))
  179. if len(motion_boxes) > 0:
  180. self.motion_frame_count += 1
  181. # TODO: this really depends on FPS
  182. if self.motion_frame_count >= 10:
  183. # only average in the current frame if the difference persists for at least 3 frames
  184. cv2.accumulateWeighted(gray, self.avg_frame, 0.2)
  185. else:
  186. # when no motion, just keep averaging the frames together
  187. cv2.accumulateWeighted(gray, self.avg_frame, 0.2)
  188. self.motion_frame_count = 0
  189. return motion_boxes
  190. class ObjectDetector():
  191. def __init__(self, model_file, label_file):
  192. self.labels = load_labels(label_file)
  193. edge_tpu_delegate = None
  194. try:
  195. edge_tpu_delegate = load_delegate('libedgetpu.so.1.0')
  196. except ValueError:
  197. print("No EdgeTPU detected. Falling back to CPU.")
  198. if edge_tpu_delegate is None:
  199. self.interpreter = tflite.Interpreter(
  200. model_path=model_file)
  201. else:
  202. self.interpreter = tflite.Interpreter(
  203. model_path=model_file,
  204. experimental_delegates=[edge_tpu_delegate])
  205. self.interpreter.allocate_tensors()
  206. self.tensor_input_details = self.interpreter.get_input_details()
  207. self.tensor_output_details = self.interpreter.get_output_details()
  208. def detect(self, tensor_input, threshold=.4):
  209. self.interpreter.set_tensor(self.tensor_input_details[0]['index'], tensor_input)
  210. self.interpreter.invoke()
  211. boxes = np.squeeze(self.interpreter.get_tensor(self.tensor_output_details[0]['index']))
  212. label_codes = np.squeeze(self.interpreter.get_tensor(self.tensor_output_details[1]['index']))
  213. scores = np.squeeze(self.interpreter.get_tensor(self.tensor_output_details[2]['index']))
  214. detections = []
  215. for i, score in enumerate(scores):
  216. label = self.labels[int(label_codes[i])]
  217. if score < threshold:
  218. break
  219. detections.append((
  220. label,
  221. float(score),
  222. boxes[i]
  223. ))
  224. return detections
  225. class ObjectTracker():
  226. def __init__(self, max_disappeared):
  227. self.tracked_objects = {}
  228. self.disappeared = {}
  229. self.max_disappeared = max_disappeared
  230. def register(self, index, frame_time, obj):
  231. id = f"{frame_time}-{index}"
  232. obj['id'] = id
  233. obj['frame_time'] = frame_time
  234. obj['top_score'] = obj['score']
  235. self.add_history(obj)
  236. self.tracked_objects[id] = obj
  237. self.disappeared[id] = 0
  238. def deregister(self, id):
  239. del self.tracked_objects[id]
  240. del self.disappeared[id]
  241. def update(self, id, new_obj):
  242. self.disappeared[id] = 0
  243. self.tracked_objects[id].update(new_obj)
  244. self.add_history(self.tracked_objects[id])
  245. if self.tracked_objects[id]['score'] > self.tracked_objects[id]['top_score']:
  246. self.tracked_objects[id]['top_score'] = self.tracked_objects[id]['score']
  247. def add_history(self, obj):
  248. entry = {
  249. 'score': obj['score'],
  250. 'box': obj['box'],
  251. 'region': obj['region'],
  252. 'centroid': obj['centroid'],
  253. 'frame_time': obj['frame_time']
  254. }
  255. if 'history' in obj:
  256. obj['history'].append(entry)
  257. else:
  258. obj['history'] = [entry]
  259. def match_and_update(self, frame_time, new_objects):
  260. if len(new_objects) == 0:
  261. for id in list(self.tracked_objects.keys()):
  262. if self.disappeared[id] >= self.max_disappeared:
  263. self.deregister(id)
  264. else:
  265. self.disappeared[id] += 1
  266. return
  267. # group by name
  268. new_object_groups = defaultdict(lambda: [])
  269. for obj in new_objects:
  270. new_object_groups[obj[0]].append({
  271. 'label': obj[0],
  272. 'score': obj[1],
  273. 'box': obj[2],
  274. 'region': obj[3]
  275. })
  276. # track objects for each label type
  277. for label, group in new_object_groups.items():
  278. current_objects = [o for o in self.tracked_objects.values() if o['label'] == label]
  279. current_ids = [o['id'] for o in current_objects]
  280. current_centroids = np.array([o['centroid'] for o in current_objects])
  281. # compute centroids of new objects
  282. for obj in group:
  283. centroid_x = int((obj['box'][0]+obj['box'][2]) / 2.0)
  284. centroid_y = int((obj['box'][1]+obj['box'][3]) / 2.0)
  285. obj['centroid'] = (centroid_x, centroid_y)
  286. if len(current_objects) == 0:
  287. for index, obj in enumerate(group):
  288. self.register(index, frame_time, obj)
  289. return
  290. new_centroids = np.array([o['centroid'] for o in group])
  291. # compute the distance between each pair of tracked
  292. # centroids and new centroids, respectively -- our
  293. # goal will be to match each new centroid to an existing
  294. # object centroid
  295. D = dist.cdist(current_centroids, new_centroids)
  296. # in order to perform this matching we must (1) find the
  297. # smallest value in each row and then (2) sort the row
  298. # indexes based on their minimum values so that the row
  299. # with the smallest value is at the *front* of the index
  300. # list
  301. rows = D.min(axis=1).argsort()
  302. # next, we perform a similar process on the columns by
  303. # finding the smallest value in each column and then
  304. # sorting using the previously computed row index list
  305. cols = D.argmin(axis=1)[rows]
  306. # in order to determine if we need to update, register,
  307. # or deregister an object we need to keep track of which
  308. # of the rows and column indexes we have already examined
  309. usedRows = set()
  310. usedCols = set()
  311. # loop over the combination of the (row, column) index
  312. # tuples
  313. for (row, col) in zip(rows, cols):
  314. # if we have already examined either the row or
  315. # column value before, ignore it
  316. if row in usedRows or col in usedCols:
  317. continue
  318. # otherwise, grab the object ID for the current row,
  319. # set its new centroid, and reset the disappeared
  320. # counter
  321. objectID = current_ids[row]
  322. self.update(objectID, group[col])
  323. # indicate that we have examined each of the row and
  324. # column indexes, respectively
  325. usedRows.add(row)
  326. usedCols.add(col)
  327. # compute the column index we have NOT yet examined
  328. unusedRows = set(range(0, D.shape[0])).difference(usedRows)
  329. unusedCols = set(range(0, D.shape[1])).difference(usedCols)
  330. # in the event that the number of object centroids is
  331. # equal or greater than the number of input centroids
  332. # we need to check and see if some of these objects have
  333. # potentially disappeared
  334. if D.shape[0] >= D.shape[1]:
  335. for row in unusedRows:
  336. id = current_ids[row]
  337. if self.disappeared[id] >= self.max_disappeared:
  338. self.deregister(id)
  339. else:
  340. self.disappeared[id] += 1
  341. # if the number of input centroids is greater
  342. # than the number of existing object centroids we need to
  343. # register each new input centroid as a trackable object
  344. else:
  345. for col in unusedCols:
  346. self.register(col, frame_time, group[col])
  347. def main():
  348. frames = 0
  349. # frame_queue = queue.Queue(maxsize=5)
  350. # frame_cache = {}
  351. # frame_shape = (1080,1920,3)
  352. frame_shape = (720,1280,3)
  353. frame_size = frame_shape[0]*frame_shape[1]*frame_shape[2]
  354. frame = np.zeros(frame_shape, np.uint8)
  355. motion_detector = MotionDetector(frame_shape, resize_factor=4)
  356. object_detector = ObjectDetector('/lab/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite', '/lab/labelmap.txt')
  357. # object_detector = ObjectDetector('/lab/detect.tflite', '/lab/labelmap.txt')
  358. object_tracker = ObjectTracker(10)
  359. # f = open('/debug/input/back.rgb24', 'rb')
  360. f = open('/debug/back.raw_video', 'rb')
  361. # f = open('/debug/ali-jake.raw_video', 'rb')
  362. total_detections = 0
  363. start = datetime.datetime.now().timestamp()
  364. while True:
  365. frame_detections = 0
  366. frame_bytes = f.read(frame_size)
  367. if not frame_bytes:
  368. break
  369. frame_time = datetime.datetime.now().timestamp()
  370. # Store frame in numpy array
  371. frame[:] = (np
  372. .frombuffer(frame_bytes, np.uint8)
  373. .reshape(frame_shape))
  374. frames += 1
  375. # look for motion
  376. motion_boxes = motion_detector.detect(frame)
  377. tracked_objects = object_tracker.tracked_objects.values()
  378. # merge areas of motion that intersect with a known tracked object into a single area to look at
  379. areas_of_interest = []
  380. used_motion_boxes = []
  381. for obj in tracked_objects:
  382. x_min, y_min, x_max, y_max = obj['box']
  383. for m_index, motion_box in enumerate(motion_boxes):
  384. if area(intersection(obj['box'], motion_box))/area(motion_box) > .5:
  385. used_motion_boxes.append(m_index)
  386. x_min = min(obj['box'][0], motion_box[0])
  387. y_min = min(obj['box'][1], motion_box[1])
  388. x_max = max(obj['box'][2], motion_box[2])
  389. y_max = max(obj['box'][3], motion_box[3])
  390. areas_of_interest.append((x_min, y_min, x_max, y_max))
  391. unused_motion_boxes = set(range(0, len(motion_boxes))).difference(used_motion_boxes)
  392. # compute motion regions
  393. motion_regions = [calculate_region(frame_shape, motion_boxes[i][0], motion_boxes[i][1], motion_boxes[i][2], motion_boxes[i][3], 1.2)
  394. for i in unused_motion_boxes]
  395. # compute tracked object regions
  396. object_regions = [calculate_region(frame_shape, a[0], a[1], a[2], a[3], 1.2)
  397. for a in areas_of_interest]
  398. # merge regions with high IOU
  399. merged_regions = motion_regions+object_regions
  400. while True:
  401. max_iou = 0.0
  402. max_indices = None
  403. region_indices = range(len(merged_regions))
  404. for a, b in itertools.combinations(region_indices, 2):
  405. iou = intersection_over_union(merged_regions[a], merged_regions[b])
  406. if iou > max_iou:
  407. max_iou = iou
  408. max_indices = (a, b)
  409. if max_iou > 0.1:
  410. a = merged_regions[max_indices[0]]
  411. b = merged_regions[max_indices[1]]
  412. merged_regions.append(calculate_region(frame_shape,
  413. min(a[0], b[0]),
  414. min(a[1], b[1]),
  415. max(a[2], b[2]),
  416. max(a[3], b[3]),
  417. 1
  418. ))
  419. del merged_regions[max(max_indices[0], max_indices[1])]
  420. del merged_regions[min(max_indices[0], max_indices[1])]
  421. else:
  422. break
  423. # resize regions and detect
  424. detections = []
  425. for region in merged_regions:
  426. tensor_input = create_tensor_input(frame, region)
  427. region_detections = object_detector.detect(tensor_input)
  428. frame_detections += 1
  429. for d in region_detections:
  430. if filtered(d):
  431. continue
  432. box = d[2]
  433. size = region[2]-region[0]
  434. x_min = int((box[1] * size) + region[0])
  435. y_min = int((box[0] * size) + region[1])
  436. x_max = int((box[3] * size) + region[0])
  437. y_max = int((box[2] * size) + region[1])
  438. detections.append((
  439. d[0],
  440. d[1],
  441. (x_min, y_min, x_max, y_max),
  442. region))
  443. #########
  444. # merge objects, check for clipped objects and look again up to N times
  445. #########
  446. refining = True
  447. refine_count = 0
  448. while refining and refine_count < 4:
  449. refining = False
  450. # group by name
  451. detected_object_groups = defaultdict(lambda: [])
  452. for detection in detections:
  453. detected_object_groups[detection[0]].append(detection)
  454. selected_objects = []
  455. for group in detected_object_groups.values():
  456. # apply non-maxima suppression to suppress weak, overlapping bounding boxes
  457. boxes = [(o[2][0], o[2][1], o[2][2]-o[2][0], o[2][3]-o[2][1])
  458. for o in group]
  459. confidences = [o[1] for o in group]
  460. idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
  461. for index in idxs:
  462. obj = group[index[0]]
  463. if clipped(obj, frame_shape): #obj['clipped']:
  464. box = obj[2]
  465. # calculate a new region that will hopefully get the entire object
  466. region = calculate_region(frame_shape,
  467. box[0], box[1],
  468. box[2], box[3])
  469. tensor_input = create_tensor_input(frame, region)
  470. # run detection on new region
  471. refined_detections = object_detector.detect(tensor_input)
  472. frame_detections += 1
  473. for d in refined_detections:
  474. if filtered(d):
  475. continue
  476. box = d[2]
  477. size = region[2]-region[0]
  478. x_min = int((box[1] * size) + region[0])
  479. y_min = int((box[0] * size) + region[1])
  480. x_max = int((box[3] * size) + region[0])
  481. y_max = int((box[2] * size) + region[1])
  482. selected_objects.append((
  483. d[0],
  484. d[1],
  485. (x_min, y_min, x_max, y_max),
  486. region))
  487. refining = True
  488. else:
  489. selected_objects.append(obj)
  490. # set the detections list to only include top, complete objects
  491. # and new detections
  492. detections = selected_objects
  493. if refining:
  494. refine_count += 1
  495. # now that we have refined our detections, we need to track objects
  496. object_tracker.match_and_update(frame_time, detections)
  497. total_detections += frame_detections
  498. # if (frames >= 700 and frames <= 1635) or (frames >= 2500):
  499. # if (frames >= 700 and frames <= 1000):
  500. # if (frames >= 0):
  501. # # row1 = cv2.hconcat([gray, cv2.convertScaleAbs(avg_frame)])
  502. # # row2 = cv2.hconcat([frameDelta, thresh])
  503. # # cv2.imwrite(f"/lab/debug/output/{frames}.jpg", cv2.vconcat([row1, row2]))
  504. # # # cv2.imwrite(f"/lab/debug/output/resized-frame-{frames}.jpg", resized_frame)
  505. # # for region in motion_regions:
  506. # # cv2.rectangle(frame, (region[0], region[1]), (region[2], region[3]), (255,128,0), 2)
  507. # # for region in object_regions:
  508. # # cv2.rectangle(frame, (region[0], region[1]), (region[2], region[3]), (0,128,255), 2)
  509. # for region in merged_regions:
  510. # cv2.rectangle(frame, (region[0], region[1]), (region[2], region[3]), (0,255,0), 2)
  511. # for box in motion_boxes:
  512. # cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3]), (255,0,0), 2)
  513. # for detection in detections:
  514. # box = detection[2]
  515. # draw_box_with_label(frame, box[0], box[1], box[2], box[3], detection[0], f"{detection[1]*100}%")
  516. # for obj in object_tracker.tracked_objects.values():
  517. # box = obj['box']
  518. # draw_box_with_label(frame, box[0], box[1], box[2], box[3], obj['label'], obj['id'], thickness=1, color=(0,0,255), position='bl')
  519. # cv2.putText(frame, str(total_detections), (10, 10), cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.5, color=(0, 0, 0), thickness=2)
  520. # cv2.putText(frame, str(frame_detections), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.5, color=(0, 0, 0), thickness=2)
  521. # cv2.imwrite(f"/lab/debug/output/frame-{frames}.jpg", frame)
  522. # break
  523. duration = datetime.datetime.now().timestamp()-start
  524. print(f"Processed {frames} frames for {duration:.2f} seconds and {(frames/duration):.2f} FPS.")
  525. print(f"Total detections: {total_detections}")
  526. if __name__ == '__main__':
  527. main()