|
@@ -190,11 +190,14 @@ def main():
|
|
|
regions = []
|
|
|
for region_string in REGIONS.split(':'):
|
|
|
region_parts = region_string.split(',')
|
|
|
+ region_mask_image = cv2.imread("/config/{}".format(region_parts[4]), cv2.IMREAD_GRAYSCALE)
|
|
|
+ region_mask = np.where(region_mask_image==[0])
|
|
|
regions.append({
|
|
|
'size': int(region_parts[0]),
|
|
|
'x_offset': int(region_parts[1]),
|
|
|
'y_offset': int(region_parts[2]),
|
|
|
'min_object_size': int(region_parts[3]),
|
|
|
+ 'mask': region_mask,
|
|
|
# Event for motion detection signaling
|
|
|
'motion_detected': mp.Event(),
|
|
|
# create shared array for storing 10 detected objects
|
|
@@ -259,7 +262,7 @@ def main():
|
|
|
motion_changed,
|
|
|
frame_shape,
|
|
|
region['size'], region['x_offset'], region['y_offset'],
|
|
|
- region['min_object_size'],
|
|
|
+ region['min_object_size'], region['mask'],
|
|
|
True))
|
|
|
motion_process.daemon = True
|
|
|
motion_processes.append(motion_process)
|
|
@@ -426,22 +429,16 @@ def process_frames(shared_arr, shared_output_arr, shared_frame_time, frame_lock,
|
|
|
|
|
|
# do the actual motion detection
|
|
|
def detect_motion(shared_arr, shared_frame_time, frame_lock, frame_ready, motion_detected, motion_changed,
|
|
|
- frame_shape, region_size, region_x_offset, region_y_offset, min_motion_area, debug):
|
|
|
+ frame_shape, region_size, region_x_offset, region_y_offset, min_motion_area, mask, debug):
|
|
|
# shape shared input array into frame for processing
|
|
|
arr = tonumpyarray(shared_arr).reshape(frame_shape)
|
|
|
|
|
|
avg_frame = None
|
|
|
- last_motion = -1
|
|
|
+ avg_delta = None
|
|
|
frame_time = 0.0
|
|
|
motion_frames = 0
|
|
|
while True:
|
|
|
now = datetime.datetime.now().timestamp()
|
|
|
- # if it has been long enough since the last motion, clear the flag
|
|
|
- if last_motion > 0 and (now - last_motion) > 2:
|
|
|
- last_motion = -1
|
|
|
- motion_detected.clear()
|
|
|
- with motion_changed:
|
|
|
- motion_changed.notify_all()
|
|
|
|
|
|
with frame_ready:
|
|
|
# if there isnt a frame ready for processing or it is old, wait for a signal
|
|
@@ -455,6 +452,10 @@ def detect_motion(shared_arr, shared_frame_time, frame_lock, frame_ready, motion
|
|
|
|
|
|
# convert to grayscale
|
|
|
gray = cv2.cvtColor(cropped_frame, cv2.COLOR_BGR2GRAY)
|
|
|
+
|
|
|
+ # apply image mask to remove areas from motion detection
|
|
|
+ gray[mask] = [255]
|
|
|
+
|
|
|
# apply gaussian blur
|
|
|
gray = cv2.GaussianBlur(gray, (21, 21), 0)
|
|
|
|
|
@@ -463,15 +464,33 @@ def detect_motion(shared_arr, shared_frame_time, frame_lock, frame_ready, motion
|
|
|
continue
|
|
|
|
|
|
# look at the delta from the avg_frame
|
|
|
- cv2.accumulateWeighted(gray, avg_frame, 0.01)
|
|
|
frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg_frame))
|
|
|
- thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
|
|
|
+
|
|
|
+ if avg_delta is None:
|
|
|
+ avg_delta = frameDelta.copy().astype("float")
|
|
|
+
|
|
|
+ # compute the average delta over the past few frames
|
|
|
+ # the alpha value can be modified to configure how sensitive the motion detection is
|
|
|
+ # higher values mean the current frame impacts the delta a lot, and a single raindrop may
|
|
|
+ # put it over the edge, too low and a fast moving person wont be detected as motion
|
|
|
+ # this also assumes that a person is in the same location across more than a single frame
|
|
|
+ cv2.accumulateWeighted(frameDelta, avg_delta, 0.2)
|
|
|
+
|
|
|
+ # compute the threshold image for the current frame
|
|
|
+ current_thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
|
|
|
+
|
|
|
+ # black out everything in the avg_delta where there isnt motion in the current frame
|
|
|
+ avg_delta_image = cv2.convertScaleAbs(avg_delta)
|
|
|
+ avg_delta_image[np.where(current_thresh==[0])] = [0]
|
|
|
+
|
|
|
+ # then look for deltas above the threshold, but only in areas where there is a delta
|
|
|
+ # in the current frame. this prevents deltas from previous frames from being included
|
|
|
+ thresh = cv2.threshold(avg_delta_image, 25, 255, cv2.THRESH_BINARY)[1]
|
|
|
|
|
|
# dilate the thresholded image to fill in holes, then find contours
|
|
|
# on thresholded image
|
|
|
thresh = cv2.dilate(thresh, None, iterations=2)
|
|
|
- cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
|
|
|
- cv2.CHAIN_APPROX_SIMPLE)
|
|
|
+ cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
|
cnts = imutils.grab_contours(cnts)
|
|
|
|
|
|
# if there are no contours, there is no motion
|
|
@@ -499,15 +518,22 @@ def detect_motion(shared_arr, shared_frame_time, frame_lock, frame_ready, motion
|
|
|
motion_frames += 1
|
|
|
# if there have been enough consecutive motion frames, report motion
|
|
|
if motion_frames >= 3:
|
|
|
+ # only average in the current frame if the difference persists for at least 3 frames
|
|
|
+ cv2.accumulateWeighted(gray, avg_frame, 0.01)
|
|
|
motion_detected.set()
|
|
|
with motion_changed:
|
|
|
motion_changed.notify_all()
|
|
|
- last_motion = now
|
|
|
else:
|
|
|
+ # when no motion, just keep averaging the frames together
|
|
|
+ cv2.accumulateWeighted(gray, avg_frame, 0.01)
|
|
|
motion_frames = 0
|
|
|
+ motion_detected.clear()
|
|
|
+ with motion_changed:
|
|
|
+ motion_changed.notify_all()
|
|
|
|
|
|
if debug and motion_frames >= 3:
|
|
|
cv2.imwrite("/lab/debug/motion-{}-{}-{}.jpg".format(region_x_offset, region_y_offset, datetime.datetime.now().timestamp()), cropped_frame)
|
|
|
+ cv2.imwrite("/lab/debug/avg_delta-{}-{}-{}.jpg".format(region_x_offset, region_y_offset, datetime.datetime.now().timestamp()), avg_delta_image)
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
mp.freeze_support()
|