motion.py 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150
  1. import cv2
  2. import imutils
  3. import numpy as np
  4. from frigate.config import MotionConfig
  5. class MotionDetector:
  6. def __init__(self, frame_shape, config: MotionConfig):
  7. self.config = config
  8. self.frame_shape = frame_shape
  9. self.resize_factor = frame_shape[0] / config.frame_height
  10. self.motion_frame_size = (
  11. config.frame_height,
  12. config.frame_height * frame_shape[1] // frame_shape[0],
  13. )
  14. self.avg_frame = np.zeros(self.motion_frame_size, np.float)
  15. self.avg_delta = np.zeros(self.motion_frame_size, np.float)
  16. self.motion_frame_count = 0
  17. self.frame_counter = 0
  18. resized_mask = cv2.resize(
  19. config.mask,
  20. dsize=(self.motion_frame_size[1], self.motion_frame_size[0]),
  21. interpolation=cv2.INTER_LINEAR,
  22. )
  23. self.mask = np.where(resized_mask == [0])
  24. self.save_images = False
  25. def detect(self, frame):
  26. motion_boxes = []
  27. gray = frame[0 : self.frame_shape[0], 0 : self.frame_shape[1]]
  28. # resize frame
  29. resized_frame = cv2.resize(
  30. gray,
  31. dsize=(self.motion_frame_size[1], self.motion_frame_size[0]),
  32. interpolation=cv2.INTER_LINEAR,
  33. )
  34. # Improve contrast
  35. if self.config.improve_contrast:
  36. minval = np.percentile(resized_frame, 4)
  37. maxval = np.percentile(resized_frame, 96)
  38. # don't adjust if the image is a single color
  39. if minval < maxval:
  40. resized_frame = np.clip(resized_frame, minval, maxval)
  41. resized_frame = (
  42. ((resized_frame - minval) / (maxval - minval)) * 255
  43. ).astype(np.uint8)
  44. # mask frame
  45. resized_frame[self.mask] = [255]
  46. # it takes ~30 frames to establish a baseline
  47. # dont bother looking for motion
  48. if self.frame_counter < 30:
  49. self.frame_counter += 1
  50. else:
  51. if self.save_images:
  52. self.frame_counter += 1
  53. # compare to average
  54. frameDelta = cv2.absdiff(resized_frame, cv2.convertScaleAbs(self.avg_frame))
  55. # compute the average delta over the past few frames
  56. # higher values mean the current frame impacts the delta a lot, and a single raindrop may
  57. # register as motion, too low and a fast moving person wont be detected as motion
  58. cv2.accumulateWeighted(frameDelta, self.avg_delta, self.config.delta_alpha)
  59. # compute the threshold image for the current frame
  60. current_thresh = cv2.threshold(
  61. frameDelta, self.config.threshold, 255, cv2.THRESH_BINARY
  62. )[1]
  63. # black out everything in the avg_delta where there isnt motion in the current frame
  64. avg_delta_image = cv2.convertScaleAbs(self.avg_delta)
  65. avg_delta_image = cv2.bitwise_and(avg_delta_image, current_thresh)
  66. # then look for deltas above the threshold, but only in areas where there is a delta
  67. # in the current frame. this prevents deltas from previous frames from being included
  68. thresh = cv2.threshold(
  69. avg_delta_image, self.config.threshold, 255, cv2.THRESH_BINARY
  70. )[1]
  71. # dilate the thresholded image to fill in holes, then find contours
  72. # on thresholded image
  73. thresh_dilated = cv2.dilate(thresh, None, iterations=2)
  74. cnts = cv2.findContours(
  75. thresh_dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
  76. )
  77. cnts = imutils.grab_contours(cnts)
  78. # loop over the contours
  79. for c in cnts:
  80. # if the contour is big enough, count it as motion
  81. contour_area = cv2.contourArea(c)
  82. if contour_area > self.config.contour_area:
  83. x, y, w, h = cv2.boundingRect(c)
  84. motion_boxes.append(
  85. (
  86. int(x * self.resize_factor),
  87. int(y * self.resize_factor),
  88. int((x + w) * self.resize_factor),
  89. int((y + h) * self.resize_factor),
  90. )
  91. )
  92. if self.save_images:
  93. thresh_dilated = cv2.cvtColor(thresh_dilated, cv2.COLOR_GRAY2BGR)
  94. # print("--------")
  95. # print(self.frame_counter)
  96. for c in cnts:
  97. contour_area = cv2.contourArea(c)
  98. # print(contour_area)
  99. if contour_area > self.config.contour_area:
  100. x, y, w, h = cv2.boundingRect(c)
  101. cv2.rectangle(
  102. thresh_dilated,
  103. (x, y),
  104. (x + w, y + h),
  105. (0, 0, 255),
  106. 2,
  107. )
  108. # print("--------")
  109. image_row_1 = cv2.hconcat(
  110. [
  111. cv2.cvtColor(frameDelta, cv2.COLOR_GRAY2BGR),
  112. cv2.cvtColor(avg_delta_image, cv2.COLOR_GRAY2BGR),
  113. ]
  114. )
  115. image_row_2 = cv2.hconcat(
  116. [cv2.cvtColor(thresh, cv2.COLOR_GRAY2BGR), thresh_dilated]
  117. )
  118. combined_image = cv2.vconcat([image_row_1, image_row_2])
  119. cv2.imwrite(f"motion/motion-{self.frame_counter}.jpg", combined_image)
  120. if len(motion_boxes) > 0:
  121. self.motion_frame_count += 1
  122. if self.motion_frame_count >= 10:
  123. # only average in the current frame if the difference persists for a bit
  124. cv2.accumulateWeighted(
  125. resized_frame, self.avg_frame, self.config.frame_alpha
  126. )
  127. else:
  128. # when no motion, just keep averaging the frames together
  129. cv2.accumulateWeighted(
  130. resized_frame, self.avg_frame, self.config.frame_alpha
  131. )
  132. self.motion_frame_count = 0
  133. return motion_boxes