http.py 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740
  1. import base64
  2. from collections import OrderedDict
  3. from datetime import datetime, timedelta
  4. import copy
  5. import json
  6. import glob
  7. import logging
  8. import os
  9. import re
  10. import subprocess as sp
  11. import time
  12. from functools import reduce
  13. from pathlib import Path
  14. import cv2
  15. from flask.helpers import send_file
  16. import numpy as np
  17. from flask import (
  18. Blueprint,
  19. Flask,
  20. Response,
  21. current_app,
  22. jsonify,
  23. make_response,
  24. request,
  25. )
  26. from peewee import SqliteDatabase, operator, fn, DoesNotExist, Value
  27. from playhouse.shortcuts import model_to_dict
  28. from frigate.const import CLIPS_DIR, RECORD_DIR
  29. from frigate.models import Event, Recordings
  30. from frigate.stats import stats_snapshot
  31. from frigate.util import calculate_region
  32. from frigate.version import VERSION
  33. logger = logging.getLogger(__name__)
  34. bp = Blueprint("frigate", __name__)
  35. def create_app(
  36. frigate_config,
  37. database: SqliteDatabase,
  38. stats_tracking,
  39. detected_frames_processor,
  40. ):
  41. app = Flask(__name__)
  42. @app.before_request
  43. def _db_connect():
  44. if database.is_closed():
  45. database.connect()
  46. @app.teardown_request
  47. def _db_close(exc):
  48. if not database.is_closed():
  49. database.close()
  50. app.frigate_config = frigate_config
  51. app.stats_tracking = stats_tracking
  52. app.detected_frames_processor = detected_frames_processor
  53. app.register_blueprint(bp)
  54. return app
  55. @bp.route("/")
  56. def is_healthy():
  57. return "Frigate is running. Alive and healthy!"
  58. @bp.route("/events/summary")
  59. def events_summary():
  60. has_clip = request.args.get("has_clip", type=int)
  61. has_snapshot = request.args.get("has_snapshot", type=int)
  62. clauses = []
  63. if not has_clip is None:
  64. clauses.append((Event.has_clip == has_clip))
  65. if not has_snapshot is None:
  66. clauses.append((Event.has_snapshot == has_snapshot))
  67. if len(clauses) == 0:
  68. clauses.append((True))
  69. groups = (
  70. Event.select(
  71. Event.camera,
  72. Event.label,
  73. fn.strftime(
  74. "%Y-%m-%d", fn.datetime(Event.start_time, "unixepoch", "localtime")
  75. ).alias("day"),
  76. Event.zones,
  77. fn.COUNT(Event.id).alias("count"),
  78. )
  79. .where(reduce(operator.and_, clauses))
  80. .group_by(
  81. Event.camera,
  82. Event.label,
  83. fn.strftime(
  84. "%Y-%m-%d", fn.datetime(Event.start_time, "unixepoch", "localtime")
  85. ),
  86. Event.zones,
  87. )
  88. )
  89. return jsonify([e for e in groups.dicts()])
  90. @bp.route("/events/<id>", methods=("GET",))
  91. def event(id):
  92. try:
  93. return model_to_dict(Event.get(Event.id == id))
  94. except DoesNotExist:
  95. return "Event not found", 404
  96. @bp.route("/events/<id>", methods=("DELETE",))
  97. def delete_event(id):
  98. try:
  99. event = Event.get(Event.id == id)
  100. except DoesNotExist:
  101. return make_response(
  102. jsonify({"success": False, "message": "Event" + id + " not found"}), 404
  103. )
  104. media_name = f"{event.camera}-{event.id}"
  105. if event.has_snapshot:
  106. media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
  107. media.unlink(missing_ok=True)
  108. if event.has_clip:
  109. media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.mp4")
  110. media.unlink(missing_ok=True)
  111. event.delete_instance()
  112. return make_response(
  113. jsonify({"success": True, "message": "Event" + id + " deleted"}), 200
  114. )
  115. @bp.route("/events/<id>/thumbnail.jpg")
  116. def event_thumbnail(id):
  117. format = request.args.get("format", "ios")
  118. thumbnail_bytes = None
  119. try:
  120. event = Event.get(Event.id == id)
  121. thumbnail_bytes = base64.b64decode(event.thumbnail)
  122. except DoesNotExist:
  123. # see if the object is currently being tracked
  124. try:
  125. camera_states = current_app.detected_frames_processor.camera_states.values()
  126. for camera_state in camera_states:
  127. if id in camera_state.tracked_objects:
  128. tracked_obj = camera_state.tracked_objects.get(id)
  129. if not tracked_obj is None:
  130. thumbnail_bytes = tracked_obj.get_thumbnail()
  131. except:
  132. return "Event not found", 404
  133. if thumbnail_bytes is None:
  134. return "Event not found", 404
  135. # android notifications prefer a 2:1 ratio
  136. if format == "android":
  137. jpg_as_np = np.frombuffer(thumbnail_bytes, dtype=np.uint8)
  138. img = cv2.imdecode(jpg_as_np, flags=1)
  139. thumbnail = cv2.copyMakeBorder(
  140. img,
  141. 0,
  142. 0,
  143. int(img.shape[1] * 0.5),
  144. int(img.shape[1] * 0.5),
  145. cv2.BORDER_CONSTANT,
  146. (0, 0, 0),
  147. )
  148. ret, jpg = cv2.imencode(".jpg", thumbnail, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
  149. thumbnail_bytes = jpg.tobytes()
  150. response = make_response(thumbnail_bytes)
  151. response.headers["Content-Type"] = "image/jpeg"
  152. return response
  153. @bp.route("/events/<id>/snapshot.jpg")
  154. def event_snapshot(id):
  155. download = request.args.get("download", type=bool)
  156. jpg_bytes = None
  157. try:
  158. event = Event.get(Event.id == id, Event.end_time != None)
  159. if not event.has_snapshot:
  160. return "Snapshot not available", 404
  161. # read snapshot from disk
  162. with open(
  163. os.path.join(CLIPS_DIR, f"{event.camera}-{id}.jpg"), "rb"
  164. ) as image_file:
  165. jpg_bytes = image_file.read()
  166. except DoesNotExist:
  167. # see if the object is currently being tracked
  168. try:
  169. camera_states = current_app.detected_frames_processor.camera_states.values()
  170. for camera_state in camera_states:
  171. if id in camera_state.tracked_objects:
  172. tracked_obj = camera_state.tracked_objects.get(id)
  173. if not tracked_obj is None:
  174. jpg_bytes = tracked_obj.get_jpg_bytes(
  175. timestamp=request.args.get("timestamp", type=int),
  176. bounding_box=request.args.get("bbox", type=int),
  177. crop=request.args.get("crop", type=int),
  178. height=request.args.get("h", type=int),
  179. quality=request.args.get("quality", default=70, type=int),
  180. )
  181. except:
  182. return "Event not found", 404
  183. except:
  184. return "Event not found", 404
  185. if jpg_bytes is None:
  186. return "Event not found", 404
  187. response = make_response(jpg_bytes)
  188. response.headers["Content-Type"] = "image/jpeg"
  189. if download:
  190. response.headers[
  191. "Content-Disposition"
  192. ] = f"attachment; filename=snapshot-{id}.jpg"
  193. return response
  194. @bp.route("/events/<id>/clip.mp4")
  195. def event_clip(id):
  196. download = request.args.get("download", type=bool)
  197. try:
  198. event: Event = Event.get(Event.id == id)
  199. except DoesNotExist:
  200. return "Event not found.", 404
  201. if not event.has_clip:
  202. return "Clip not available", 404
  203. file_name = f"{event.camera}-{id}.mp4"
  204. clip_path = os.path.join(CLIPS_DIR, file_name)
  205. if not os.path.isfile(clip_path):
  206. return recording_clip(event.camera, event.start_time, event.end_time)
  207. response = make_response()
  208. response.headers["Content-Description"] = "File Transfer"
  209. response.headers["Cache-Control"] = "no-cache"
  210. response.headers["Content-Type"] = "video/mp4"
  211. if download:
  212. response.headers["Content-Disposition"] = "attachment; filename=%s" % file_name
  213. response.headers["Content-Length"] = os.path.getsize(clip_path)
  214. response.headers[
  215. "X-Accel-Redirect"
  216. ] = f"/clips/{file_name}" # nginx: http://wiki.nginx.org/NginxXSendfile
  217. return response
  218. @bp.route("/events")
  219. def events():
  220. limit = request.args.get("limit", 100)
  221. camera = request.args.get("camera")
  222. label = request.args.get("label")
  223. zone = request.args.get("zone")
  224. after = request.args.get("after", type=float)
  225. before = request.args.get("before", type=float)
  226. has_clip = request.args.get("has_clip", type=int)
  227. has_snapshot = request.args.get("has_snapshot", type=int)
  228. include_thumbnails = request.args.get("include_thumbnails", default=1, type=int)
  229. clauses = []
  230. excluded_fields = []
  231. if camera:
  232. clauses.append((Event.camera == camera))
  233. if label:
  234. clauses.append((Event.label == label))
  235. if zone:
  236. clauses.append((Event.zones.cast("text") % f'*"{zone}"*'))
  237. if after:
  238. clauses.append((Event.start_time >= after))
  239. if before:
  240. clauses.append((Event.start_time <= before))
  241. if not has_clip is None:
  242. clauses.append((Event.has_clip == has_clip))
  243. if not has_snapshot is None:
  244. clauses.append((Event.has_snapshot == has_snapshot))
  245. if not include_thumbnails:
  246. excluded_fields.append(Event.thumbnail)
  247. if len(clauses) == 0:
  248. clauses.append((True))
  249. events = (
  250. Event.select()
  251. .where(reduce(operator.and_, clauses))
  252. .order_by(Event.start_time.desc())
  253. .limit(limit)
  254. )
  255. return jsonify([model_to_dict(e, exclude=excluded_fields) for e in events])
  256. @bp.route("/config")
  257. def config():
  258. config = current_app.frigate_config.dict()
  259. # add in the ffmpeg_cmds
  260. for camera_name, camera in current_app.frigate_config.cameras.items():
  261. camera_dict = config["cameras"][camera_name]
  262. camera_dict["ffmpeg_cmds"] = copy.deepcopy(camera.ffmpeg_cmds)
  263. for cmd in camera_dict["ffmpeg_cmds"]:
  264. cmd["cmd"] = " ".join(cmd["cmd"])
  265. return jsonify(config)
  266. @bp.route("/config/schema")
  267. def config_schema():
  268. return current_app.response_class(
  269. current_app.frigate_config.schema_json(), mimetype="application/json"
  270. )
  271. @bp.route("/version")
  272. def version():
  273. return VERSION
  274. @bp.route("/stats")
  275. def stats():
  276. stats = stats_snapshot(current_app.stats_tracking)
  277. return jsonify(stats)
  278. @bp.route("/<camera_name>/<label>/best.jpg")
  279. def best(camera_name, label):
  280. if camera_name in current_app.frigate_config.cameras:
  281. best_object = current_app.detected_frames_processor.get_best(camera_name, label)
  282. best_frame = best_object.get("frame")
  283. if best_frame is None:
  284. best_frame = np.zeros((720, 1280, 3), np.uint8)
  285. else:
  286. best_frame = cv2.cvtColor(best_frame, cv2.COLOR_YUV2BGR_I420)
  287. crop = bool(request.args.get("crop", 0, type=int))
  288. if crop:
  289. box_size = 300
  290. box = best_object.get("box", (0, 0, box_size, box_size))
  291. region = calculate_region(
  292. best_frame.shape, box[0], box[1], box[2], box[3], box_size, multiplier=1.1
  293. )
  294. best_frame = best_frame[region[1] : region[3], region[0] : region[2]]
  295. height = int(request.args.get("h", str(best_frame.shape[0])))
  296. width = int(height * best_frame.shape[1] / best_frame.shape[0])
  297. resize_quality = request.args.get("quality", default=70, type=int)
  298. best_frame = cv2.resize(
  299. best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA
  300. )
  301. ret, jpg = cv2.imencode(
  302. ".jpg", best_frame, [int(cv2.IMWRITE_JPEG_QUALITY), resize_quality]
  303. )
  304. response = make_response(jpg.tobytes())
  305. response.headers["Content-Type"] = "image/jpeg"
  306. return response
  307. else:
  308. return "Camera named {} not found".format(camera_name), 404
  309. @bp.route("/<camera_name>")
  310. def mjpeg_feed(camera_name):
  311. fps = int(request.args.get("fps", "3"))
  312. height = int(request.args.get("h", "360"))
  313. draw_options = {
  314. "bounding_boxes": request.args.get("bbox", type=int),
  315. "timestamp": request.args.get("timestamp", type=int),
  316. "zones": request.args.get("zones", type=int),
  317. "mask": request.args.get("mask", type=int),
  318. "motion_boxes": request.args.get("motion", type=int),
  319. "regions": request.args.get("regions", type=int),
  320. }
  321. if camera_name in current_app.frigate_config.cameras:
  322. # return a multipart response
  323. return Response(
  324. imagestream(
  325. current_app.detected_frames_processor,
  326. camera_name,
  327. fps,
  328. height,
  329. draw_options,
  330. ),
  331. mimetype="multipart/x-mixed-replace; boundary=frame",
  332. )
  333. else:
  334. return "Camera named {} not found".format(camera_name), 404
  335. @bp.route("/<camera_name>/latest.jpg")
  336. def latest_frame(camera_name):
  337. draw_options = {
  338. "bounding_boxes": request.args.get("bbox", type=int),
  339. "timestamp": request.args.get("timestamp", type=int),
  340. "zones": request.args.get("zones", type=int),
  341. "mask": request.args.get("mask", type=int),
  342. "motion_boxes": request.args.get("motion", type=int),
  343. "regions": request.args.get("regions", type=int),
  344. }
  345. resize_quality = request.args.get("quality", default=70, type=int)
  346. if camera_name in current_app.frigate_config.cameras:
  347. frame = current_app.detected_frames_processor.get_current_frame(
  348. camera_name, draw_options
  349. )
  350. if frame is None:
  351. frame = np.zeros((720, 1280, 3), np.uint8)
  352. height = int(request.args.get("h", str(frame.shape[0])))
  353. width = int(height * frame.shape[1] / frame.shape[0])
  354. frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
  355. ret, jpg = cv2.imencode(
  356. ".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), resize_quality]
  357. )
  358. response = make_response(jpg.tobytes())
  359. response.headers["Content-Type"] = "image/jpeg"
  360. return response
  361. else:
  362. return "Camera named {} not found".format(camera_name), 404
  363. @bp.route("/<camera_name>/recordings")
  364. def recordings(camera_name):
  365. dates = OrderedDict()
  366. # Retrieve all recordings for this camera
  367. recordings = (
  368. Recordings.select()
  369. .where(Recordings.camera == camera_name)
  370. .order_by(Recordings.start_time.asc())
  371. )
  372. last_end = 0
  373. recording: Recordings
  374. for recording in recordings:
  375. date = datetime.fromtimestamp(recording.start_time)
  376. key = date.strftime("%Y-%m-%d")
  377. hour = date.strftime("%H")
  378. # Create Day Record
  379. if key not in dates:
  380. dates[key] = OrderedDict()
  381. # Create Hour Record
  382. if hour not in dates[key]:
  383. dates[key][hour] = {"delay": {}, "events": []}
  384. # Check for delay
  385. the_hour = datetime.strptime(f"{key} {hour}", "%Y-%m-%d %H").timestamp()
  386. # diff current recording start time and the greater of the previous end time or top of the hour
  387. diff = recording.start_time - max(last_end, the_hour)
  388. # Determine seconds into recording
  389. seconds = 0
  390. if datetime.fromtimestamp(last_end).strftime("%H") == hour:
  391. seconds = int(last_end - the_hour)
  392. # Determine the delay
  393. delay = min(int(diff), 3600 - seconds)
  394. if delay > 1:
  395. # Add an offset for any delay greater than a second
  396. dates[key][hour]["delay"][seconds] = delay
  397. last_end = recording.end_time
  398. # Packing intervals to return all events with same label and overlapping times as one row.
  399. # See: https://blogs.solidq.com/en/sqlserver/packing-intervals/
  400. events = Event.raw(
  401. """WITH C1 AS
  402. (
  403. SELECT id, label, camera, top_score, start_time AS ts, +1 AS type, 1 AS sub
  404. FROM event
  405. WHERE camera = ?
  406. UNION ALL
  407. SELECT id, label, camera, top_score, end_time + 15 AS ts, -1 AS type, 0 AS sub
  408. FROM event
  409. WHERE camera = ?
  410. ),
  411. C2 AS
  412. (
  413. SELECT C1.*,
  414. SUM(type) OVER(PARTITION BY label ORDER BY ts, type DESC
  415. ROWS BETWEEN UNBOUNDED PRECEDING
  416. AND CURRENT ROW) - sub AS cnt
  417. FROM C1
  418. ),
  419. C3 AS
  420. (
  421. SELECT id, label, camera, top_score, ts,
  422. (ROW_NUMBER() OVER(PARTITION BY label ORDER BY ts) - 1) / 2 + 1
  423. AS grpnum
  424. FROM C2
  425. WHERE cnt = 0
  426. )
  427. SELECT MIN(id) as id, label, camera, MAX(top_score) as top_score, MIN(ts) AS start_time, max(ts) AS end_time
  428. FROM C3
  429. GROUP BY label, grpnum
  430. ORDER BY start_time;""",
  431. camera_name,
  432. camera_name,
  433. )
  434. event: Event
  435. for event in events:
  436. date = datetime.fromtimestamp(event.start_time)
  437. key = date.strftime("%Y-%m-%d")
  438. hour = date.strftime("%H")
  439. if key in dates and hour in dates[key]:
  440. dates[key][hour]["events"].append(
  441. model_to_dict(
  442. event,
  443. exclude=[
  444. Event.false_positive,
  445. Event.zones,
  446. Event.thumbnail,
  447. Event.has_clip,
  448. Event.has_snapshot,
  449. ],
  450. )
  451. )
  452. return jsonify(
  453. [
  454. {
  455. "date": date,
  456. "events": sum([len(value["events"]) for value in hours.values()]),
  457. "recordings": [
  458. {"hour": hour, "delay": value["delay"], "events": value["events"]}
  459. for hour, value in hours.items()
  460. ],
  461. }
  462. for date, hours in dates.items()
  463. ]
  464. )
  465. @bp.route("/<camera>/start/<int:start_ts>/end/<int:end_ts>/clip.mp4")
  466. @bp.route("/<camera>/start/<float:start_ts>/end/<float:end_ts>/clip.mp4")
  467. def recording_clip(camera, start_ts, end_ts):
  468. download = request.args.get("download", type=bool)
  469. recordings = (
  470. Recordings.select()
  471. .where(
  472. (Recordings.start_time.between(start_ts, end_ts))
  473. | (Recordings.end_time.between(start_ts, end_ts))
  474. | ((start_ts > Recordings.start_time) & (end_ts < Recordings.end_time))
  475. )
  476. .where(Recordings.camera == camera)
  477. .order_by(Recordings.start_time.asc())
  478. )
  479. playlist_lines = []
  480. clip: Recordings
  481. for clip in recordings:
  482. playlist_lines.append(f"file '{clip.path}'")
  483. # if this is the starting clip, add an inpoint
  484. if clip.start_time < start_ts:
  485. playlist_lines.append(f"inpoint {int(start_ts - clip.start_time)}")
  486. # if this is the ending clip, add an outpoint
  487. if clip.end_time > end_ts:
  488. playlist_lines.append(f"outpoint {int(end_ts - clip.start_time)}")
  489. file_name = f"clip_{camera}_{start_ts}-{end_ts}.mp4"
  490. path = f"/tmp/cache/{file_name}"
  491. ffmpeg_cmd = [
  492. "ffmpeg",
  493. "-y",
  494. "-protocol_whitelist",
  495. "pipe,file",
  496. "-f",
  497. "concat",
  498. "-safe",
  499. "0",
  500. "-i",
  501. "-",
  502. "-c",
  503. "copy",
  504. "-movflags",
  505. "+faststart",
  506. path,
  507. ]
  508. p = sp.run(
  509. ffmpeg_cmd,
  510. input="\n".join(playlist_lines),
  511. encoding="ascii",
  512. capture_output=True,
  513. )
  514. if p.returncode != 0:
  515. logger.error(p.stderr)
  516. return f"Could not create clip from recordings for {camera}.", 500
  517. response = make_response()
  518. response.headers["Content-Description"] = "File Transfer"
  519. response.headers["Cache-Control"] = "no-cache"
  520. response.headers["Content-Type"] = "video/mp4"
  521. if download:
  522. response.headers["Content-Disposition"] = "attachment; filename=%s" % file_name
  523. response.headers["Content-Length"] = os.path.getsize(path)
  524. response.headers[
  525. "X-Accel-Redirect"
  526. ] = f"/cache/{file_name}" # nginx: http://wiki.nginx.org/NginxXSendfile
  527. return response
  528. @bp.route("/vod/<camera>/start/<int:start_ts>/end/<int:end_ts>")
  529. @bp.route("/vod/<camera>/start/<float:start_ts>/end/<float:end_ts>")
  530. def vod_ts(camera, start_ts, end_ts):
  531. recordings = (
  532. Recordings.select()
  533. .where(
  534. Recordings.start_time.between(start_ts, end_ts)
  535. | Recordings.end_time.between(start_ts, end_ts)
  536. | ((start_ts > Recordings.start_time) & (end_ts < Recordings.end_time))
  537. )
  538. .where(Recordings.camera == camera)
  539. .order_by(Recordings.start_time.asc())
  540. )
  541. clips = []
  542. durations = []
  543. recording: Recordings
  544. for recording in recordings:
  545. clip = {"type": "source", "path": recording.path}
  546. duration = int(recording.duration * 1000)
  547. # Determine if offset is needed for first clip
  548. if recording.start_time < start_ts:
  549. offset = int((start_ts - recording.start_time) * 1000)
  550. clip["clipFrom"] = offset
  551. duration -= offset
  552. # Determine if we need to end the last clip early
  553. if recording.end_time > end_ts:
  554. duration -= int((recording.end_time - end_ts) * 1000)
  555. if duration > 0:
  556. clips.append(clip)
  557. durations.append(duration)
  558. else:
  559. logger.warning(f"Recording clip is missing or empty: {recording.path}")
  560. if not clips:
  561. logger.error("No recordings found for the requested time range")
  562. return "No recordings found.", 404
  563. hour_ago = datetime.now() - timedelta(hours=1)
  564. return jsonify(
  565. {
  566. "cache": hour_ago.timestamp() > start_ts,
  567. "discontinuity": False,
  568. "durations": durations,
  569. "sequences": [{"clips": clips}],
  570. }
  571. )
  572. @bp.route("/vod/<year_month>/<day>/<hour>/<camera>")
  573. def vod_hour(year_month, day, hour, camera):
  574. start_date = datetime.strptime(f"{year_month}-{day} {hour}", "%Y-%m-%d %H")
  575. end_date = start_date + timedelta(hours=1) - timedelta(milliseconds=1)
  576. start_ts = start_date.timestamp()
  577. end_ts = end_date.timestamp()
  578. return vod_ts(camera, start_ts, end_ts)
  579. @bp.route("/vod/event/<id>")
  580. def vod_event(id):
  581. try:
  582. event: Event = Event.get(Event.id == id)
  583. except DoesNotExist:
  584. logger.error(f"Event not found: {id}")
  585. return "Event not found.", 404
  586. if not event.has_clip:
  587. logger.error(f"Event does not have recordings: {id}")
  588. return "Recordings not available", 404
  589. clip_path = os.path.join(CLIPS_DIR, f"{event.camera}-{id}.mp4")
  590. if not os.path.isfile(clip_path):
  591. end_ts = (
  592. datetime.now().timestamp() if event.end_time is None else event.end_time
  593. )
  594. return vod_ts(event.camera, event.start_time, end_ts)
  595. duration = int((event.end_time - event.start_time) * 1000)
  596. return jsonify(
  597. {
  598. "cache": True,
  599. "discontinuity": False,
  600. "durations": [duration],
  601. "sequences": [{"clips": [{"type": "source", "path": clip_path}]}],
  602. }
  603. )
  604. def imagestream(detected_frames_processor, camera_name, fps, height, draw_options):
  605. while True:
  606. # max out at specified FPS
  607. time.sleep(1 / fps)
  608. frame = detected_frames_processor.get_current_frame(camera_name, draw_options)
  609. if frame is None:
  610. frame = np.zeros((height, int(height * 16 / 9), 3), np.uint8)
  611. width = int(height * frame.shape[1] / frame.shape[0])
  612. frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_LINEAR)
  613. ret, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
  614. yield (
  615. b"--frame\r\n"
  616. b"Content-Type: image/jpeg\r\n\r\n" + jpg.tobytes() + b"\r\n\r\n"
  617. )