http.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581
  1. import base64
  2. from collections import OrderedDict
  3. from datetime import datetime, timedelta
  4. import json
  5. import glob
  6. import logging
  7. import os
  8. import re
  9. import time
  10. from functools import reduce
  11. from pathlib import Path
  12. import cv2
  13. import numpy as np
  14. from flask import (
  15. Blueprint,
  16. Flask,
  17. Response,
  18. current_app,
  19. jsonify,
  20. make_response,
  21. request,
  22. )
  23. from peewee import SqliteDatabase, operator, fn, DoesNotExist, Value
  24. from playhouse.shortcuts import model_to_dict
  25. from frigate.const import CLIPS_DIR, RECORD_DIR
  26. from frigate.models import Event, Recordings
  27. from frigate.stats import stats_snapshot
  28. from frigate.util import calculate_region
  29. from frigate.version import VERSION
  30. logger = logging.getLogger(__name__)
  31. bp = Blueprint("frigate", __name__)
  32. def create_app(
  33. frigate_config,
  34. database: SqliteDatabase,
  35. stats_tracking,
  36. detected_frames_processor,
  37. ):
  38. app = Flask(__name__)
  39. @app.before_request
  40. def _db_connect():
  41. if database.is_closed():
  42. database.connect()
  43. @app.teardown_request
  44. def _db_close(exc):
  45. if not database.is_closed():
  46. database.close()
  47. app.frigate_config = frigate_config
  48. app.stats_tracking = stats_tracking
  49. app.detected_frames_processor = detected_frames_processor
  50. app.register_blueprint(bp)
  51. return app
  52. @bp.route("/")
  53. def is_healthy():
  54. return "Frigate is running. Alive and healthy!"
  55. @bp.route("/events/summary")
  56. def events_summary():
  57. has_clip = request.args.get("has_clip", type=int)
  58. has_snapshot = request.args.get("has_snapshot", type=int)
  59. clauses = []
  60. if not has_clip is None:
  61. clauses.append((Event.has_clip == has_clip))
  62. if not has_snapshot is None:
  63. clauses.append((Event.has_snapshot == has_snapshot))
  64. if len(clauses) == 0:
  65. clauses.append((True))
  66. groups = (
  67. Event.select(
  68. Event.camera,
  69. Event.label,
  70. fn.strftime(
  71. "%Y-%m-%d", fn.datetime(Event.start_time, "unixepoch", "localtime")
  72. ).alias("day"),
  73. Event.zones,
  74. fn.COUNT(Event.id).alias("count"),
  75. )
  76. .where(reduce(operator.and_, clauses))
  77. .group_by(
  78. Event.camera,
  79. Event.label,
  80. fn.strftime(
  81. "%Y-%m-%d", fn.datetime(Event.start_time, "unixepoch", "localtime")
  82. ),
  83. Event.zones,
  84. )
  85. )
  86. return jsonify([e for e in groups.dicts()])
  87. @bp.route("/events/<id>", methods=("GET",))
  88. def event(id):
  89. try:
  90. return model_to_dict(Event.get(Event.id == id))
  91. except DoesNotExist:
  92. return "Event not found", 404
  93. @bp.route("/events/<id>", methods=("DELETE",))
  94. def delete_event(id):
  95. try:
  96. event = Event.get(Event.id == id)
  97. except DoesNotExist:
  98. return make_response(
  99. jsonify({"success": False, "message": "Event" + id + " not found"}), 404
  100. )
  101. media_name = f"{event.camera}-{event.id}"
  102. if event.has_snapshot:
  103. media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
  104. media.unlink(missing_ok=True)
  105. if event.has_clip:
  106. media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.mp4")
  107. media.unlink(missing_ok=True)
  108. event.delete_instance()
  109. return make_response(
  110. jsonify({"success": True, "message": "Event" + id + " deleted"}), 200
  111. )
  112. @bp.route("/events/<id>/thumbnail.jpg")
  113. def event_thumbnail(id):
  114. format = request.args.get("format", "ios")
  115. thumbnail_bytes = None
  116. try:
  117. event = Event.get(Event.id == id)
  118. thumbnail_bytes = base64.b64decode(event.thumbnail)
  119. except DoesNotExist:
  120. # see if the object is currently being tracked
  121. try:
  122. camera_states = current_app.detected_frames_processor.camera_states.values()
  123. for camera_state in camera_states:
  124. if id in camera_state.tracked_objects:
  125. tracked_obj = camera_state.tracked_objects.get(id)
  126. if not tracked_obj is None:
  127. thumbnail_bytes = tracked_obj.get_thumbnail()
  128. except:
  129. return "Event not found", 404
  130. if thumbnail_bytes is None:
  131. return "Event not found", 404
  132. # android notifications prefer a 2:1 ratio
  133. if format == "android":
  134. jpg_as_np = np.frombuffer(thumbnail_bytes, dtype=np.uint8)
  135. img = cv2.imdecode(jpg_as_np, flags=1)
  136. thumbnail = cv2.copyMakeBorder(
  137. img,
  138. 0,
  139. 0,
  140. int(img.shape[1] * 0.5),
  141. int(img.shape[1] * 0.5),
  142. cv2.BORDER_CONSTANT,
  143. (0, 0, 0),
  144. )
  145. ret, jpg = cv2.imencode(".jpg", thumbnail, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
  146. thumbnail_bytes = jpg.tobytes()
  147. response = make_response(thumbnail_bytes)
  148. response.headers["Content-Type"] = "image/jpg"
  149. return response
  150. @bp.route("/events/<id>/snapshot.jpg")
  151. def event_snapshot(id):
  152. jpg_bytes = None
  153. try:
  154. event = Event.get(Event.id == id)
  155. if not event.has_snapshot:
  156. return "Snapshot not available", 404
  157. # read snapshot from disk
  158. with open(
  159. os.path.join(CLIPS_DIR, f"{event.camera}-{id}.jpg"), "rb"
  160. ) as image_file:
  161. jpg_bytes = image_file.read()
  162. except DoesNotExist:
  163. # see if the object is currently being tracked
  164. try:
  165. camera_states = current_app.detected_frames_processor.camera_states.values()
  166. for camera_state in camera_states:
  167. if id in camera_state.tracked_objects:
  168. tracked_obj = camera_state.tracked_objects.get(id)
  169. if not tracked_obj is None:
  170. jpg_bytes = tracked_obj.get_jpg_bytes(
  171. timestamp=request.args.get("timestamp", type=int),
  172. bounding_box=request.args.get("bbox", type=int),
  173. crop=request.args.get("crop", type=int),
  174. height=request.args.get("h", type=int),
  175. quality=request.args.get("quality", default=70, type=int),
  176. )
  177. except:
  178. return "Event not found", 404
  179. except:
  180. return "Event not found", 404
  181. if jpg_bytes is None:
  182. return "Event not found", 404
  183. response = make_response(jpg_bytes)
  184. response.headers["Content-Type"] = "image/jpg"
  185. return response
  186. @bp.route("/events")
  187. def events():
  188. limit = request.args.get("limit", 100)
  189. camera = request.args.get("camera")
  190. label = request.args.get("label")
  191. zone = request.args.get("zone")
  192. after = request.args.get("after", type=float)
  193. before = request.args.get("before", type=float)
  194. has_clip = request.args.get("has_clip", type=int)
  195. has_snapshot = request.args.get("has_snapshot", type=int)
  196. include_thumbnails = request.args.get("include_thumbnails", default=1, type=int)
  197. clauses = []
  198. excluded_fields = []
  199. if camera:
  200. clauses.append((Event.camera == camera))
  201. if label:
  202. clauses.append((Event.label == label))
  203. if zone:
  204. clauses.append((Event.zones.cast("text") % f'*"{zone}"*'))
  205. if after:
  206. clauses.append((Event.start_time >= after))
  207. if before:
  208. clauses.append((Event.start_time <= before))
  209. if not has_clip is None:
  210. clauses.append((Event.has_clip == has_clip))
  211. if not has_snapshot is None:
  212. clauses.append((Event.has_snapshot == has_snapshot))
  213. if not include_thumbnails:
  214. excluded_fields.append(Event.thumbnail)
  215. if len(clauses) == 0:
  216. clauses.append((True))
  217. events = (
  218. Event.select()
  219. .where(reduce(operator.and_, clauses))
  220. .order_by(Event.start_time.desc())
  221. .limit(limit)
  222. )
  223. return jsonify([model_to_dict(e, exclude=excluded_fields) for e in events])
  224. @bp.route("/config")
  225. def config():
  226. config = current_app.frigate_config.dict()
  227. # add in the ffmpeg_cmds
  228. for camera_name, camera in current_app.frigate_config.cameras.items():
  229. camera_dict = config["cameras"][camera_name]
  230. camera_dict["ffmpeg_cmds"] = camera.ffmpeg_cmds
  231. for cmd in camera_dict["ffmpeg_cmds"]:
  232. cmd["cmd"] = " ".join(cmd["cmd"])
  233. return jsonify(config)
  234. @bp.route("/config/schema")
  235. def config_schema():
  236. return current_app.response_class(
  237. current_app.frigate_config.schema_json(), mimetype="application/json"
  238. )
  239. @bp.route("/version")
  240. def version():
  241. return VERSION
  242. @bp.route("/stats")
  243. def stats():
  244. stats = stats_snapshot(current_app.stats_tracking)
  245. return jsonify(stats)
  246. @bp.route("/<camera_name>/<label>/best.jpg")
  247. def best(camera_name, label):
  248. if camera_name in current_app.frigate_config.cameras:
  249. best_object = current_app.detected_frames_processor.get_best(camera_name, label)
  250. best_frame = best_object.get("frame")
  251. if best_frame is None:
  252. best_frame = np.zeros((720, 1280, 3), np.uint8)
  253. else:
  254. best_frame = cv2.cvtColor(best_frame, cv2.COLOR_YUV2BGR_I420)
  255. crop = bool(request.args.get("crop", 0, type=int))
  256. if crop:
  257. box = best_object.get("box", (0, 0, 300, 300))
  258. region = calculate_region(
  259. best_frame.shape, box[0], box[1], box[2], box[3], 1.1
  260. )
  261. best_frame = best_frame[region[1] : region[3], region[0] : region[2]]
  262. height = int(request.args.get("h", str(best_frame.shape[0])))
  263. width = int(height * best_frame.shape[1] / best_frame.shape[0])
  264. resize_quality = request.args.get("quality", default=70, type=int)
  265. best_frame = cv2.resize(
  266. best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA
  267. )
  268. ret, jpg = cv2.imencode(
  269. ".jpg", best_frame, [int(cv2.IMWRITE_JPEG_QUALITY), resize_quality]
  270. )
  271. response = make_response(jpg.tobytes())
  272. response.headers["Content-Type"] = "image/jpg"
  273. return response
  274. else:
  275. return "Camera named {} not found".format(camera_name), 404
  276. @bp.route("/<camera_name>")
  277. def mjpeg_feed(camera_name):
  278. fps = int(request.args.get("fps", "3"))
  279. height = int(request.args.get("h", "360"))
  280. draw_options = {
  281. "bounding_boxes": request.args.get("bbox", type=int),
  282. "timestamp": request.args.get("timestamp", type=int),
  283. "zones": request.args.get("zones", type=int),
  284. "mask": request.args.get("mask", type=int),
  285. "motion_boxes": request.args.get("motion", type=int),
  286. "regions": request.args.get("regions", type=int),
  287. }
  288. if camera_name in current_app.frigate_config.cameras:
  289. # return a multipart response
  290. return Response(
  291. imagestream(
  292. current_app.detected_frames_processor,
  293. camera_name,
  294. fps,
  295. height,
  296. draw_options,
  297. ),
  298. mimetype="multipart/x-mixed-replace; boundary=frame",
  299. )
  300. else:
  301. return "Camera named {} not found".format(camera_name), 404
  302. @bp.route("/<camera_name>/latest.jpg")
  303. def latest_frame(camera_name):
  304. draw_options = {
  305. "bounding_boxes": request.args.get("bbox", type=int),
  306. "timestamp": request.args.get("timestamp", type=int),
  307. "zones": request.args.get("zones", type=int),
  308. "mask": request.args.get("mask", type=int),
  309. "motion_boxes": request.args.get("motion", type=int),
  310. "regions": request.args.get("regions", type=int),
  311. }
  312. resize_quality = request.args.get("quality", default=70, type=int)
  313. if camera_name in current_app.frigate_config.cameras:
  314. frame = current_app.detected_frames_processor.get_current_frame(
  315. camera_name, draw_options
  316. )
  317. if frame is None:
  318. frame = np.zeros((720, 1280, 3), np.uint8)
  319. height = int(request.args.get("h", str(frame.shape[0])))
  320. width = int(height * frame.shape[1] / frame.shape[0])
  321. frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
  322. ret, jpg = cv2.imencode(
  323. ".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), resize_quality]
  324. )
  325. response = make_response(jpg.tobytes())
  326. response.headers["Content-Type"] = "image/jpg"
  327. return response
  328. else:
  329. return "Camera named {} not found".format(camera_name), 404
  330. @bp.route("/<camera_name>/recordings")
  331. def recordings(camera_name):
  332. dates = OrderedDict()
  333. # Retrieve all recordings for this camera
  334. recordings = (
  335. Recordings.select()
  336. .where(Recordings.camera == camera_name)
  337. .order_by(Recordings.start_time.asc())
  338. )
  339. last_end = 0
  340. recording: Recordings
  341. for recording in recordings:
  342. date = datetime.fromtimestamp(recording.start_time)
  343. key = date.strftime("%Y-%m-%d")
  344. hour = date.strftime("%H")
  345. # Create Day Record
  346. if key not in dates:
  347. dates[key] = OrderedDict()
  348. # Create Hour Record
  349. if hour not in dates[key]:
  350. dates[key][hour] = {"delay": {}, "events": []}
  351. # Check for delay
  352. the_hour = datetime.strptime(f"{key} {hour}", "%Y-%m-%d %H").timestamp()
  353. # diff current recording start time and the greater of the previous end time or top of the hour
  354. diff = recording.start_time - max(last_end, the_hour)
  355. # Determine seconds into recording
  356. seconds = 0
  357. if datetime.fromtimestamp(last_end).strftime("%H") == hour:
  358. seconds = int(last_end - the_hour)
  359. # Determine the delay
  360. delay = min(int(diff), 3600 - seconds)
  361. if delay > 1:
  362. # Add an offset for any delay greater than a second
  363. dates[key][hour]["delay"][seconds] = delay
  364. last_end = recording.end_time
  365. # Packing intervals to return all events with same label and overlapping times as one row.
  366. # See: https://blogs.solidq.com/en/sqlserver/packing-intervals/
  367. events = Event.raw(
  368. """WITH C1 AS
  369. (
  370. SELECT id, label, camera, top_score, start_time AS ts, +1 AS type, 1 AS sub
  371. FROM event
  372. WHERE camera = ?
  373. UNION ALL
  374. SELECT id, label, camera, top_score, end_time + 15 AS ts, -1 AS type, 0 AS sub
  375. FROM event
  376. WHERE camera = ?
  377. ),
  378. C2 AS
  379. (
  380. SELECT C1.*,
  381. SUM(type) OVER(PARTITION BY label ORDER BY ts, type DESC
  382. ROWS BETWEEN UNBOUNDED PRECEDING
  383. AND CURRENT ROW) - sub AS cnt
  384. FROM C1
  385. ),
  386. C3 AS
  387. (
  388. SELECT id, label, camera, top_score, ts,
  389. (ROW_NUMBER() OVER(PARTITION BY label ORDER BY ts) - 1) / 2 + 1
  390. AS grpnum
  391. FROM C2
  392. WHERE cnt = 0
  393. )
  394. SELECT MIN(id) as id, label, camera, MAX(top_score) as top_score, MIN(ts) AS start_time, max(ts) AS end_time
  395. FROM C3
  396. GROUP BY label, grpnum
  397. ORDER BY start_time;""",
  398. camera_name,
  399. camera_name,
  400. )
  401. event: Event
  402. for event in events:
  403. date = datetime.fromtimestamp(event.start_time)
  404. key = date.strftime("%Y-%m-%d")
  405. hour = date.strftime("%H")
  406. if key in dates and hour in dates[key]:
  407. dates[key][hour]["events"].append(
  408. model_to_dict(
  409. event,
  410. exclude=[
  411. Event.false_positive,
  412. Event.zones,
  413. Event.thumbnail,
  414. Event.has_clip,
  415. Event.has_snapshot,
  416. ],
  417. )
  418. )
  419. return jsonify(
  420. [
  421. {
  422. "date": date,
  423. "events": sum([len(value["events"]) for value in hours.values()]),
  424. "recordings": [
  425. {"hour": hour, "delay": value["delay"], "events": value["events"]}
  426. for hour, value in hours.items()
  427. ],
  428. }
  429. for date, hours in dates.items()
  430. ]
  431. )
  432. @bp.route("/vod/<year_month>/<day>/<hour>/<camera>")
  433. def vod(year_month, day, hour, camera):
  434. start_date = datetime.strptime(f"{year_month}-{day} {hour}", "%Y-%m-%d %H")
  435. end_date = start_date + timedelta(hours=1) - timedelta(milliseconds=1)
  436. start_ts = start_date.timestamp()
  437. end_ts = end_date.timestamp()
  438. # Select all recordings where either the start or end dates fall in the requested hour
  439. recordings = (
  440. Recordings.select()
  441. .where(
  442. (Recordings.start_time.between(start_ts, end_ts))
  443. | (Recordings.end_time.between(start_ts, end_ts))
  444. )
  445. .where(Recordings.camera == camera)
  446. .order_by(Recordings.start_time.asc())
  447. )
  448. clips = []
  449. durations = []
  450. recording: Recordings
  451. for recording in recordings:
  452. clip = {"type": "source", "path": recording.path}
  453. duration = int(recording.duration * 1000)
  454. # Determine if offset is needed for first clip
  455. if recording.start_time < start_ts:
  456. offset = int((start_ts - recording.start_time) * 1000)
  457. clip["clipFrom"] = offset
  458. duration -= offset
  459. # Determine if we need to end the last clip early
  460. if recording.end_time > end_ts:
  461. duration -= int((recording.end_time - end_ts) * 1000)
  462. clips.append(clip)
  463. durations.append(duration)
  464. return jsonify(
  465. {
  466. "cache": datetime.now() - timedelta(hours=1) > start_date,
  467. "discontinuity": False,
  468. "durations": durations,
  469. "sequences": [{"clips": clips}],
  470. }
  471. )
  472. def imagestream(detected_frames_processor, camera_name, fps, height, draw_options):
  473. while True:
  474. # max out at specified FPS
  475. time.sleep(1 / fps)
  476. frame = detected_frames_processor.get_current_frame(camera_name, draw_options)
  477. if frame is None:
  478. frame = np.zeros((height, int(height * 16 / 9), 3), np.uint8)
  479. width = int(height * frame.shape[1] / frame.shape[0])
  480. frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_LINEAR)
  481. ret, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
  482. yield (
  483. b"--frame\r\n"
  484. b"Content-Type: image/jpeg\r\n\r\n" + jpg.tobytes() + b"\r\n\r\n"
  485. )