object_processing.py 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559
  1. import copy
  2. import base64
  3. import datetime
  4. import hashlib
  5. import itertools
  6. import json
  7. import logging
  8. import os
  9. import queue
  10. import threading
  11. import time
  12. from collections import Counter, defaultdict
  13. from statistics import mean, median
  14. from typing import Callable, Dict
  15. import cv2
  16. import matplotlib.pyplot as plt
  17. import numpy as np
  18. from frigate.config import FrigateConfig, CameraConfig
  19. from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR
  20. from frigate.edgetpu import load_labels
  21. from frigate.util import SharedMemoryFrameManager, draw_box_with_label, calculate_region
  22. logger = logging.getLogger(__name__)
  23. PATH_TO_LABELS = '/labelmap.txt'
  24. LABELS = load_labels(PATH_TO_LABELS)
  25. cmap = plt.cm.get_cmap('tab10', len(LABELS.keys()))
  26. COLOR_MAP = {}
  27. for key, val in LABELS.items():
  28. COLOR_MAP[val] = tuple(int(round(255 * c)) for c in cmap(key)[:3])
  29. def on_edge(box, frame_shape):
  30. if (
  31. box[0] == 0 or
  32. box[1] == 0 or
  33. box[2] == frame_shape[1]-1 or
  34. box[3] == frame_shape[0]-1
  35. ):
  36. return True
  37. def is_better_thumbnail(current_thumb, new_obj, frame_shape) -> bool:
  38. # larger is better
  39. # cutoff images are less ideal, but they should also be smaller?
  40. # better scores are obviously better too
  41. # if the new_thumb is on an edge, and the current thumb is not
  42. if on_edge(new_obj['box'], frame_shape) and not on_edge(current_thumb['box'], frame_shape):
  43. return False
  44. # if the score is better by more than 5%
  45. if new_obj['score'] > current_thumb['score']+.05:
  46. return True
  47. # if the area is 10% larger
  48. if new_obj['area'] > current_thumb['area']*1.1:
  49. return True
  50. return False
  51. class TrackedObject():
  52. def __init__(self, camera, camera_config: CameraConfig, frame_cache, obj_data):
  53. self.obj_data = obj_data
  54. self.camera = camera
  55. self.camera_config = camera_config
  56. self.frame_cache = frame_cache
  57. self.current_zones = []
  58. self.entered_zones = set()
  59. self.false_positive = True
  60. self.top_score = self.computed_score = 0.0
  61. self.thumbnail_data = None
  62. self.last_updated = 0
  63. self.last_published = 0
  64. self.frame = None
  65. self.previous = self.to_dict()
  66. # start the score history
  67. self.score_history = [self.obj_data['score']]
  68. def _is_false_positive(self):
  69. # once a true positive, always a true positive
  70. if not self.false_positive:
  71. return False
  72. threshold = self.camera_config.objects.filters[self.obj_data['label']].threshold
  73. if self.computed_score < threshold:
  74. return True
  75. return False
  76. def compute_score(self):
  77. scores = self.score_history[:]
  78. # pad with zeros if you dont have at least 3 scores
  79. if len(scores) < 3:
  80. scores += [0.0]*(3 - len(scores))
  81. return median(scores)
  82. def update(self, current_frame_time, obj_data):
  83. significant_update = False
  84. self.obj_data.update(obj_data)
  85. # if the object is not in the current frame, add a 0.0 to the score history
  86. if self.obj_data['frame_time'] != current_frame_time:
  87. self.score_history.append(0.0)
  88. else:
  89. self.score_history.append(self.obj_data['score'])
  90. # only keep the last 10 scores
  91. if len(self.score_history) > 10:
  92. self.score_history = self.score_history[-10:]
  93. # calculate if this is a false positive
  94. self.computed_score = self.compute_score()
  95. if self.computed_score > self.top_score:
  96. self.top_score = self.computed_score
  97. self.false_positive = self._is_false_positive()
  98. if not self.false_positive:
  99. # determine if this frame is a better thumbnail
  100. if (
  101. self.thumbnail_data is None
  102. or is_better_thumbnail(self.thumbnail_data, self.obj_data, self.camera_config.frame_shape)
  103. ):
  104. self.thumbnail_data = {
  105. 'frame_time': self.obj_data['frame_time'],
  106. 'box': self.obj_data['box'],
  107. 'area': self.obj_data['area'],
  108. 'region': self.obj_data['region'],
  109. 'score': self.obj_data['score']
  110. }
  111. significant_update = True
  112. # check zones
  113. current_zones = []
  114. bottom_center = (self.obj_data['centroid'][0], self.obj_data['box'][3])
  115. # check each zone
  116. for name, zone in self.camera_config.zones.items():
  117. contour = zone.contour
  118. # check if the object is in the zone
  119. if (cv2.pointPolygonTest(contour, bottom_center, False) >= 0):
  120. # if the object passed the filters once, dont apply again
  121. if name in self.current_zones or not zone_filtered(self, zone.filters):
  122. current_zones.append(name)
  123. self.entered_zones.add(name)
  124. # if the zones changed, signal an update
  125. if not self.false_positive and set(self.current_zones) != set(current_zones):
  126. significant_update = True
  127. self.current_zones = current_zones
  128. return significant_update
  129. def to_dict(self, include_thumbnail: bool = False):
  130. return {
  131. 'id': self.obj_data['id'],
  132. 'camera': self.camera,
  133. 'frame_time': self.obj_data['frame_time'],
  134. 'label': self.obj_data['label'],
  135. 'top_score': self.top_score,
  136. 'false_positive': self.false_positive,
  137. 'start_time': self.obj_data['start_time'],
  138. 'end_time': self.obj_data.get('end_time', None),
  139. 'score': self.obj_data['score'],
  140. 'box': self.obj_data['box'],
  141. 'area': self.obj_data['area'],
  142. 'region': self.obj_data['region'],
  143. 'current_zones': self.current_zones.copy(),
  144. 'entered_zones': list(self.entered_zones).copy(),
  145. 'thumbnail': base64.b64encode(self.get_thumbnail()).decode('utf-8') if include_thumbnail else None
  146. }
  147. def get_thumbnail(self):
  148. if self.thumbnail_data is None or not self.thumbnail_data['frame_time'] in self.frame_cache:
  149. ret, jpg = cv2.imencode('.jpg', np.zeros((175,175,3), np.uint8))
  150. jpg_bytes = self.get_jpg_bytes(timestamp=False, bounding_box=False, crop=True, height=175)
  151. if jpg_bytes:
  152. return jpg_bytes
  153. else:
  154. ret, jpg = cv2.imencode('.jpg', np.zeros((175,175,3), np.uint8))
  155. return jpg.tobytes()
  156. def get_jpg_bytes(self, timestamp=False, bounding_box=False, crop=False, height=None):
  157. if self.thumbnail_data is None:
  158. return None
  159. try:
  160. best_frame = cv2.cvtColor(self.frame_cache[self.thumbnail_data['frame_time']], cv2.COLOR_YUV2BGR_I420)
  161. except KeyError:
  162. logger.warning(f"Unable to create jpg because frame {self.thumbnail_data['frame_time']} is not in the cache")
  163. return None
  164. if bounding_box:
  165. thickness = 2
  166. color = COLOR_MAP[self.obj_data['label']]
  167. # draw the bounding boxes on the frame
  168. box = self.thumbnail_data['box']
  169. draw_box_with_label(best_frame, box[0], box[1], box[2], box[3], self.obj_data['label'], f"{int(self.thumbnail_data['score']*100)}% {int(self.thumbnail_data['area'])}", thickness=thickness, color=color)
  170. if crop:
  171. box = self.thumbnail_data['box']
  172. region = calculate_region(best_frame.shape, box[0], box[1], box[2], box[3], 1.1)
  173. best_frame = best_frame[region[1]:region[3], region[0]:region[2]]
  174. if height:
  175. width = int(height*best_frame.shape[1]/best_frame.shape[0])
  176. best_frame = cv2.resize(best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
  177. if timestamp:
  178. time_to_show = datetime.datetime.fromtimestamp(self.thumbnail_data['frame_time']).strftime("%m/%d/%Y %H:%M:%S")
  179. size = cv2.getTextSize(time_to_show, cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, thickness=2)
  180. text_width = size[0][0]
  181. desired_size = max(150, 0.33*best_frame.shape[1])
  182. font_scale = desired_size/text_width
  183. cv2.putText(best_frame, time_to_show, (5, best_frame.shape[0]-7), cv2.FONT_HERSHEY_SIMPLEX,
  184. fontScale=font_scale, color=(255, 255, 255), thickness=2)
  185. ret, jpg = cv2.imencode('.jpg', best_frame)
  186. if ret:
  187. return jpg.tobytes()
  188. else:
  189. return None
  190. def zone_filtered(obj: TrackedObject, object_config):
  191. object_name = obj.obj_data['label']
  192. if object_name in object_config:
  193. obj_settings = object_config[object_name]
  194. # if the min area is larger than the
  195. # detected object, don't add it to detected objects
  196. if obj_settings.min_area > obj.obj_data['area']:
  197. return True
  198. # if the detected object is larger than the
  199. # max area, don't add it to detected objects
  200. if obj_settings.max_area < obj.obj_data['area']:
  201. return True
  202. # if the score is lower than the threshold, skip
  203. if obj_settings.threshold > obj.computed_score:
  204. return True
  205. return False
  206. # Maintains the state of a camera
  207. class CameraState():
  208. def __init__(self, name, config, frame_manager):
  209. self.name = name
  210. self.config = config
  211. self.camera_config = config.cameras[name]
  212. self.frame_manager = frame_manager
  213. self.best_objects: Dict[str, TrackedObject] = {}
  214. self.object_counts = defaultdict(lambda: 0)
  215. self.tracked_objects: Dict[str, TrackedObject] = {}
  216. self.frame_cache = {}
  217. self.zone_objects = defaultdict(lambda: [])
  218. self._current_frame = np.zeros(self.camera_config.frame_shape_yuv, np.uint8)
  219. self.current_frame_lock = threading.Lock()
  220. self.current_frame_time = 0.0
  221. self.motion_boxes = []
  222. self.regions = []
  223. self.previous_frame_id = None
  224. self.callbacks = defaultdict(lambda: [])
  225. def get_current_frame(self, draw_options={}):
  226. with self.current_frame_lock:
  227. frame_copy = np.copy(self._current_frame)
  228. frame_time = self.current_frame_time
  229. tracked_objects = {k: v.to_dict() for k,v in self.tracked_objects.items()}
  230. motion_boxes = self.motion_boxes.copy()
  231. regions = self.regions.copy()
  232. frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_YUV2BGR_I420)
  233. # draw on the frame
  234. if draw_options.get('bounding_boxes'):
  235. # draw the bounding boxes on the frame
  236. for obj in tracked_objects.values():
  237. thickness = 2
  238. color = COLOR_MAP[obj['label']]
  239. if obj['frame_time'] != frame_time:
  240. thickness = 1
  241. color = (255,0,0)
  242. # draw the bounding boxes on the frame
  243. box = obj['box']
  244. draw_box_with_label(frame_copy, box[0], box[1], box[2], box[3], obj['label'], f"{int(obj['score']*100)}% {int(obj['area'])}", thickness=thickness, color=color)
  245. if draw_options.get('regions'):
  246. for region in regions:
  247. cv2.rectangle(frame_copy, (region[0], region[1]), (region[2], region[3]), (0,255,0), 2)
  248. if draw_options.get('zones'):
  249. for name, zone in self.camera_config.zones.items():
  250. thickness = 8 if any([name in obj['current_zones'] for obj in tracked_objects.values()]) else 2
  251. cv2.drawContours(frame_copy, [zone.contour], -1, zone.color, thickness)
  252. if draw_options.get('mask'):
  253. mask_overlay = np.where(self.camera_config.motion.mask==[0])
  254. frame_copy[mask_overlay] = [0,0,0]
  255. if draw_options.get('motion_boxes'):
  256. for m_box in motion_boxes:
  257. cv2.rectangle(frame_copy, (m_box[0], m_box[1]), (m_box[2], m_box[3]), (0,0,255), 2)
  258. if draw_options.get('timestamp'):
  259. time_to_show = datetime.datetime.fromtimestamp(frame_time).strftime("%m/%d/%Y %H:%M:%S")
  260. cv2.putText(frame_copy, time_to_show, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2)
  261. return frame_copy
  262. def finished(self, obj_id):
  263. del self.tracked_objects[obj_id]
  264. def on(self, event_type: str, callback: Callable[[Dict], None]):
  265. self.callbacks[event_type].append(callback)
  266. def update(self, frame_time, current_detections, motion_boxes, regions):
  267. self.current_frame_time = frame_time
  268. self.motion_boxes = motion_boxes
  269. self.regions = regions
  270. # get the new frame
  271. frame_id = f"{self.name}{frame_time}"
  272. current_frame = self.frame_manager.get(frame_id, self.camera_config.frame_shape_yuv)
  273. current_ids = current_detections.keys()
  274. previous_ids = self.tracked_objects.keys()
  275. removed_ids = list(set(previous_ids).difference(current_ids))
  276. new_ids = list(set(current_ids).difference(previous_ids))
  277. updated_ids = list(set(current_ids).intersection(previous_ids))
  278. for id in new_ids:
  279. new_obj = self.tracked_objects[id] = TrackedObject(self.name, self.camera_config, self.frame_cache, current_detections[id])
  280. # call event handlers
  281. for c in self.callbacks['start']:
  282. c(self.name, new_obj, frame_time)
  283. for id in updated_ids:
  284. updated_obj = self.tracked_objects[id]
  285. significant_update = updated_obj.update(frame_time, current_detections[id])
  286. if significant_update:
  287. # ensure this frame is stored in the cache
  288. if updated_obj.thumbnail_data['frame_time'] == frame_time and frame_time not in self.frame_cache:
  289. self.frame_cache[frame_time] = np.copy(current_frame)
  290. updated_obj.last_updated = frame_time
  291. # if it has been more than 5 seconds since the last publish
  292. # and the last update is greater than the last publish
  293. if frame_time - updated_obj.last_published > 5 and updated_obj.last_updated > updated_obj.last_published:
  294. # call event handlers
  295. for c in self.callbacks['update']:
  296. c(self.name, updated_obj, frame_time)
  297. updated_obj.last_published = frame_time
  298. for id in removed_ids:
  299. # publish events to mqtt
  300. removed_obj = self.tracked_objects[id]
  301. if not 'end_time' in removed_obj.obj_data:
  302. removed_obj.obj_data['end_time'] = frame_time
  303. for c in self.callbacks['end']:
  304. c(self.name, removed_obj, frame_time)
  305. # TODO: can i switch to looking this up and only changing when an event ends?
  306. # maintain best objects
  307. for obj in self.tracked_objects.values():
  308. object_type = obj.obj_data['label']
  309. # if the object's thumbnail is not from the current frame
  310. if obj.false_positive or obj.thumbnail_data['frame_time'] != self.current_frame_time:
  311. continue
  312. if object_type in self.best_objects:
  313. current_best = self.best_objects[object_type]
  314. now = datetime.datetime.now().timestamp()
  315. # if the object is a higher score than the current best score
  316. # or the current object is older than desired, use the new object
  317. if (is_better_thumbnail(current_best.thumbnail_data, obj.thumbnail_data, self.camera_config.frame_shape)
  318. or (now - current_best.thumbnail_data['frame_time']) > self.camera_config.best_image_timeout):
  319. self.best_objects[object_type] = obj
  320. for c in self.callbacks['snapshot']:
  321. c(self.name, self.best_objects[object_type], frame_time)
  322. else:
  323. self.best_objects[object_type] = obj
  324. for c in self.callbacks['snapshot']:
  325. c(self.name, self.best_objects[object_type], frame_time)
  326. # update overall camera state for each object type
  327. obj_counter = Counter()
  328. for obj in self.tracked_objects.values():
  329. if not obj.false_positive:
  330. obj_counter[obj.obj_data['label']] += 1
  331. # report on detected objects
  332. for obj_name, count in obj_counter.items():
  333. if count != self.object_counts[obj_name]:
  334. self.object_counts[obj_name] = count
  335. for c in self.callbacks['object_status']:
  336. c(self.name, obj_name, count)
  337. # expire any objects that are >0 and no longer detected
  338. expired_objects = [obj_name for obj_name, count in self.object_counts.items() if count > 0 and not obj_name in obj_counter]
  339. for obj_name in expired_objects:
  340. self.object_counts[obj_name] = 0
  341. for c in self.callbacks['object_status']:
  342. c(self.name, obj_name, 0)
  343. for c in self.callbacks['snapshot']:
  344. c(self.name, self.best_objects[obj_name], frame_time)
  345. # cleanup thumbnail frame cache
  346. current_thumb_frames = set([obj.thumbnail_data['frame_time'] for obj in self.tracked_objects.values() if not obj.false_positive])
  347. current_best_frames = set([obj.thumbnail_data['frame_time'] for obj in self.best_objects.values()])
  348. thumb_frames_to_delete = [t for t in self.frame_cache.keys() if not t in current_thumb_frames and not t in current_best_frames]
  349. for t in thumb_frames_to_delete:
  350. del self.frame_cache[t]
  351. with self.current_frame_lock:
  352. self._current_frame = current_frame
  353. if not self.previous_frame_id is None:
  354. self.frame_manager.delete(self.previous_frame_id)
  355. self.previous_frame_id = frame_id
  356. class TrackedObjectProcessor(threading.Thread):
  357. def __init__(self, config: FrigateConfig, client, topic_prefix, tracked_objects_queue, event_queue, event_processed_queue, stop_event):
  358. threading.Thread.__init__(self)
  359. self.name = "detected_frames_processor"
  360. self.config = config
  361. self.client = client
  362. self.topic_prefix = topic_prefix
  363. self.tracked_objects_queue = tracked_objects_queue
  364. self.event_queue = event_queue
  365. self.event_processed_queue = event_processed_queue
  366. self.stop_event = stop_event
  367. self.camera_states: Dict[str, CameraState] = {}
  368. self.frame_manager = SharedMemoryFrameManager()
  369. def start(camera, obj: TrackedObject, current_frame_time):
  370. self.event_queue.put(('start', camera, obj.to_dict()))
  371. def update(camera, obj: TrackedObject, current_frame_time):
  372. after = obj.to_dict()
  373. message = { 'before': obj.previous, 'after': after, 'type': 'new' if obj.previous['false_positive'] else 'update' }
  374. self.client.publish(f"{self.topic_prefix}/events", json.dumps(message), retain=False)
  375. obj.previous = after
  376. def end(camera, obj: TrackedObject, current_frame_time):
  377. snapshot_config = self.config.cameras[camera].snapshots
  378. event_data = obj.to_dict(include_thumbnail=True)
  379. event_data['has_snapshot'] = False
  380. if not obj.false_positive:
  381. message = { 'before': obj.previous, 'after': obj.to_dict(), 'type': 'end' }
  382. self.client.publish(f"{self.topic_prefix}/events", json.dumps(message), retain=False)
  383. # write snapshot to disk if enabled
  384. if snapshot_config.enabled:
  385. jpg_bytes = obj.get_jpg_bytes(
  386. timestamp=snapshot_config.timestamp,
  387. bounding_box=snapshot_config.bounding_box,
  388. crop=snapshot_config.crop,
  389. height=snapshot_config.height
  390. )
  391. with open(os.path.join(CLIPS_DIR, f"{camera}-{obj.obj_data['id']}.jpg"), 'wb') as j:
  392. j.write(jpg_bytes)
  393. event_data['has_snapshot'] = True
  394. self.event_queue.put(('end', camera, event_data))
  395. def snapshot(camera, obj: TrackedObject, current_frame_time):
  396. mqtt_config = self.config.cameras[camera].mqtt
  397. if mqtt_config.enabled:
  398. jpg_bytes = obj.get_jpg_bytes(
  399. timestamp=mqtt_config.timestamp,
  400. bounding_box=mqtt_config.bounding_box,
  401. crop=mqtt_config.crop,
  402. height=mqtt_config.height
  403. )
  404. self.client.publish(f"{self.topic_prefix}/{camera}/{obj.obj_data['label']}/snapshot", jpg_bytes, retain=True)
  405. def object_status(camera, object_name, status):
  406. self.client.publish(f"{self.topic_prefix}/{camera}/{object_name}", status, retain=False)
  407. for camera in self.config.cameras.keys():
  408. camera_state = CameraState(camera, self.config, self.frame_manager)
  409. camera_state.on('start', start)
  410. camera_state.on('update', update)
  411. camera_state.on('end', end)
  412. camera_state.on('snapshot', snapshot)
  413. camera_state.on('object_status', object_status)
  414. self.camera_states[camera] = camera_state
  415. # {
  416. # 'zone_name': {
  417. # 'person': {
  418. # 'camera_1': 2,
  419. # 'camera_2': 1
  420. # }
  421. # }
  422. # }
  423. self.zone_data = defaultdict(lambda: defaultdict(lambda: {}))
  424. def get_best(self, camera, label):
  425. # TODO: need a lock here
  426. camera_state = self.camera_states[camera]
  427. if label in camera_state.best_objects:
  428. best_obj = camera_state.best_objects[label]
  429. best = best_obj.thumbnail_data.copy()
  430. best['frame'] = camera_state.frame_cache.get(best_obj.thumbnail_data['frame_time'])
  431. return best
  432. else:
  433. return {}
  434. def get_current_frame(self, camera, draw_options={}):
  435. return self.camera_states[camera].get_current_frame(draw_options)
  436. def run(self):
  437. while True:
  438. if self.stop_event.is_set():
  439. logger.info(f"Exiting object processor...")
  440. break
  441. try:
  442. camera, frame_time, current_tracked_objects, motion_boxes, regions = self.tracked_objects_queue.get(True, 10)
  443. except queue.Empty:
  444. continue
  445. camera_state = self.camera_states[camera]
  446. camera_state.update(frame_time, current_tracked_objects, motion_boxes, regions)
  447. # update zone counts for each label
  448. # for each zone in the current camera
  449. for zone in self.config.cameras[camera].zones.keys():
  450. # count labels for the camera in the zone
  451. obj_counter = Counter()
  452. for obj in camera_state.tracked_objects.values():
  453. if zone in obj.current_zones and not obj.false_positive:
  454. obj_counter[obj.obj_data['label']] += 1
  455. # update counts and publish status
  456. for label in set(list(self.zone_data[zone].keys()) + list(obj_counter.keys())):
  457. # if we have previously published a count for this zone/label
  458. zone_label = self.zone_data[zone][label]
  459. if camera in zone_label:
  460. current_count = sum(zone_label.values())
  461. zone_label[camera] = obj_counter[label] if label in obj_counter else 0
  462. new_count = sum(zone_label.values())
  463. if new_count != current_count:
  464. self.client.publish(f"{self.topic_prefix}/{zone}/{label}", new_count, retain=False)
  465. # if this is a new zone/label combo for this camera
  466. else:
  467. if label in obj_counter:
  468. zone_label[camera] = obj_counter[label]
  469. self.client.publish(f"{self.topic_prefix}/{zone}/{label}", obj_counter[label], retain=False)
  470. # cleanup event finished queue
  471. while not self.event_processed_queue.empty():
  472. event_id, camera = self.event_processed_queue.get()
  473. self.camera_states[camera].finished(event_id)