motion.py 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147
  1. import cv2
  2. import imutils
  3. import numpy as np
  4. from frigate.config import MotionConfig
  5. class MotionDetector:
  6. def __init__(self, frame_shape, config: MotionConfig):
  7. self.config = config
  8. self.frame_shape = frame_shape
  9. self.resize_factor = frame_shape[0] / config.frame_height
  10. self.motion_frame_size = (
  11. config.frame_height,
  12. config.frame_height * frame_shape[1] // frame_shape[0],
  13. )
  14. self.avg_frame = np.zeros(self.motion_frame_size, np.float)
  15. self.avg_delta = np.zeros(self.motion_frame_size, np.float)
  16. self.motion_frame_count = 0
  17. self.frame_counter = 0
  18. resized_mask = cv2.resize(
  19. config.mask,
  20. dsize=(self.motion_frame_size[1], self.motion_frame_size[0]),
  21. interpolation=cv2.INTER_LINEAR,
  22. )
  23. self.mask = np.where(resized_mask == [0])
  24. self.save_images = False
  25. def detect(self, frame):
  26. motion_boxes = []
  27. gray = frame[0 : self.frame_shape[0], 0 : self.frame_shape[1]]
  28. # resize frame
  29. resized_frame = cv2.resize(
  30. gray,
  31. dsize=(self.motion_frame_size[1], self.motion_frame_size[0]),
  32. interpolation=cv2.INTER_LINEAR,
  33. )
  34. # Improve contrast
  35. minval = np.percentile(resized_frame, 4)
  36. maxval = np.percentile(resized_frame, 96)
  37. resized_frame = np.clip(resized_frame, minval, maxval)
  38. resized_frame = (((resized_frame - minval) / (maxval - minval)) * 255).astype(
  39. np.uint8
  40. )
  41. # mask frame
  42. resized_frame[self.mask] = [255]
  43. # it takes ~30 frames to establish a baseline
  44. # dont bother looking for motion
  45. if self.frame_counter < 30:
  46. self.frame_counter += 1
  47. else:
  48. if self.save_images:
  49. self.frame_counter += 1
  50. # compare to average
  51. frameDelta = cv2.absdiff(resized_frame, cv2.convertScaleAbs(self.avg_frame))
  52. # compute the average delta over the past few frames
  53. # higher values mean the current frame impacts the delta a lot, and a single raindrop may
  54. # register as motion, too low and a fast moving person wont be detected as motion
  55. cv2.accumulateWeighted(frameDelta, self.avg_delta, self.config.delta_alpha)
  56. # compute the threshold image for the current frame
  57. current_thresh = cv2.threshold(
  58. frameDelta, self.config.threshold, 255, cv2.THRESH_BINARY
  59. )[1]
  60. # black out everything in the avg_delta where there isnt motion in the current frame
  61. avg_delta_image = cv2.convertScaleAbs(self.avg_delta)
  62. avg_delta_image = cv2.bitwise_and(avg_delta_image, current_thresh)
  63. # then look for deltas above the threshold, but only in areas where there is a delta
  64. # in the current frame. this prevents deltas from previous frames from being included
  65. thresh = cv2.threshold(
  66. avg_delta_image, self.config.threshold, 255, cv2.THRESH_BINARY
  67. )[1]
  68. # dilate the thresholded image to fill in holes, then find contours
  69. # on thresholded image
  70. thresh_dilated = cv2.dilate(thresh, None, iterations=2)
  71. cnts = cv2.findContours(
  72. thresh_dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
  73. )
  74. cnts = imutils.grab_contours(cnts)
  75. # loop over the contours
  76. for c in cnts:
  77. # if the contour is big enough, count it as motion
  78. contour_area = cv2.contourArea(c)
  79. if contour_area > self.config.contour_area:
  80. x, y, w, h = cv2.boundingRect(c)
  81. motion_boxes.append(
  82. (
  83. int(x * self.resize_factor),
  84. int(y * self.resize_factor),
  85. int((x + w) * self.resize_factor),
  86. int((y + h) * self.resize_factor),
  87. )
  88. )
  89. if self.save_images:
  90. thresh_dilated = cv2.cvtColor(thresh_dilated, cv2.COLOR_GRAY2BGR)
  91. # print("--------")
  92. # print(self.frame_counter)
  93. for c in cnts:
  94. contour_area = cv2.contourArea(c)
  95. # print(contour_area)
  96. if contour_area > self.config.contour_area:
  97. x, y, w, h = cv2.boundingRect(c)
  98. cv2.rectangle(
  99. thresh_dilated,
  100. (x, y),
  101. (x + w, y + h),
  102. (0, 0, 255),
  103. 2,
  104. )
  105. # print("--------")
  106. image_row_1 = cv2.hconcat(
  107. [
  108. cv2.cvtColor(frameDelta, cv2.COLOR_GRAY2BGR),
  109. cv2.cvtColor(avg_delta_image, cv2.COLOR_GRAY2BGR),
  110. ]
  111. )
  112. image_row_2 = cv2.hconcat(
  113. [cv2.cvtColor(thresh, cv2.COLOR_GRAY2BGR), thresh_dilated]
  114. )
  115. combined_image = cv2.vconcat([image_row_1, image_row_2])
  116. cv2.imwrite(f"motion/motion-{self.frame_counter}.jpg", combined_image)
  117. if len(motion_boxes) > 0:
  118. self.motion_frame_count += 1
  119. if self.motion_frame_count >= 10:
  120. # only average in the current frame if the difference persists for a bit
  121. cv2.accumulateWeighted(
  122. resized_frame, self.avg_frame, self.config.frame_alpha
  123. )
  124. else:
  125. # when no motion, just keep averaging the frames together
  126. cv2.accumulateWeighted(
  127. resized_frame, self.avg_frame, self.config.frame_alpha
  128. )
  129. self.motion_frame_count = 0
  130. return motion_boxes