config.example.yml 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230
  1. web_port: 5000
  2. ################
  3. ## List of detectors.
  4. ## Currently supported types: cpu, edgetpu
  5. ## EdgeTPU requires device as defined here: https://coral.ai/docs/edgetpu/multiple-edgetpu/#using-the-tensorflow-lite-python-api
  6. ################
  7. detectors:
  8. - type: edgetpu
  9. device: usb
  10. mqtt:
  11. host: mqtt.server.com
  12. topic_prefix: frigate
  13. # client_id: frigate # Optional -- set to override default client id of 'frigate' if running multiple instances
  14. # user: username # Optional
  15. #################
  16. ## Environment variables that begin with 'FRIGATE_' may be referenced in {}.
  17. ## password: '{FRIGATE_MQTT_PASSWORD}'
  18. #################
  19. # password: password # Optional
  20. ################
  21. # Global configuration for saving clips
  22. ################
  23. save_clips:
  24. ###########
  25. # Maximum length of time to retain video during long events.
  26. # If an object is being tracked for longer than this amount of time, the cache
  27. # will begin to expire and the resulting clip will be the last x seconds of the event.
  28. ###########
  29. max_seconds: 300
  30. #################
  31. # Default ffmpeg args. Optional and can be overwritten per camera.
  32. # Should work with most RTSP cameras that send h264 video
  33. # Built from the properties below with:
  34. # "ffmpeg" + global_args + input_args + "-i" + input + output_args
  35. #################
  36. # ffmpeg:
  37. # global_args:
  38. # - -hide_banner
  39. # - -loglevel
  40. # - panic
  41. # hwaccel_args: []
  42. # input_args:
  43. # - -avoid_negative_ts
  44. # - make_zero
  45. # - -fflags
  46. # - nobuffer
  47. # - -flags
  48. # - low_delay
  49. # - -strict
  50. # - experimental
  51. # - -fflags
  52. # - +genpts+discardcorrupt
  53. # - -vsync
  54. # - drop
  55. # - -rtsp_transport
  56. # - tcp
  57. # - -stimeout
  58. # - '5000000'
  59. # - -use_wallclock_as_timestamps
  60. # - '1'
  61. # output_args:
  62. # - -f
  63. # - rawvideo
  64. # - -pix_fmt
  65. # - rgb24
  66. ####################
  67. # Global object configuration. Applies to all cameras
  68. # unless overridden at the camera levels.
  69. # Keys must be valid labels. By default, the model uses coco (https://dl.google.com/coral/canned_models/coco_labels.txt).
  70. # All labels from the model are reported over MQTT. These values are used to filter out false positives.
  71. # min_area (optional): minimum width*height of the bounding box for the detected object
  72. # max_area (optional): maximum width*height of the bounding box for the detected object
  73. # min_score (optional): minimum score for the object to initiate tracking
  74. # threshold (optional): The minimum decimal percentage for tracked object's computed score to considered a true positive
  75. ####################
  76. objects:
  77. track:
  78. - person
  79. filters:
  80. person:
  81. min_area: 5000
  82. max_area: 100000
  83. min_score: 0.5
  84. threshold: 0.85
  85. cameras:
  86. back:
  87. ffmpeg:
  88. ################
  89. # Source passed to ffmpeg after the -i parameter. Supports anything compatible with OpenCV and FFmpeg.
  90. # Environment variables that begin with 'FRIGATE_' may be referenced in {}
  91. ################
  92. input: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
  93. #################
  94. # These values will override default values for just this camera
  95. #################
  96. # global_args: []
  97. # hwaccel_args: []
  98. # input_args: []
  99. # output_args: []
  100. ################
  101. ## Optionally specify the resolution of the video feed. Frigate will try to auto detect if not specified
  102. ################
  103. # height: 1280
  104. # width: 720
  105. ################
  106. ## Specify the framerate of your camera
  107. ##
  108. ## NOTE: This should only be set in the event ffmpeg is unable to determine your camera's framerate
  109. ## on its own and the reported framerate for your camera in frigate is well over what is expected.
  110. ################
  111. # fps: 5
  112. ################
  113. ## Optional mask. Must be the same aspect ratio as your video feed. Value is any of the following:
  114. ## - name of a file in the config directory
  115. ## - base64 encoded image prefixed with 'base64,' eg. 'base64,asfasdfasdf....'
  116. ## - polygon of x,y coordinates prefixed with 'poly,' eg. 'poly,0,900,1080,900,1080,1920,0,1920'
  117. ##
  118. ## The mask works by looking at the bottom center of the bounding box for the detected
  119. ## person in the image. If that pixel in the mask is a black pixel, it ignores it as a
  120. ## false positive. In my mask, the grass and driveway visible from my backdoor camera
  121. ## are white. The garage doors, sky, and trees (anywhere it would be impossible for a
  122. ## person to stand) are black.
  123. ##
  124. ## Masked areas are also ignored for motion detection.
  125. ################
  126. # mask: back-mask.bmp
  127. ################
  128. # Allows you to limit the framerate within frigate for cameras that do not support
  129. # custom framerates. A value of 1 tells frigate to look at every frame, 2 every 2nd frame,
  130. # 3 every 3rd frame, etc.
  131. ################
  132. take_frame: 1
  133. ################
  134. # The number of seconds to retain the highest scoring image for the best.jpg endpoint before allowing it
  135. # to be replaced by a newer image. Defaults to 60 seconds.
  136. ################
  137. best_image_timeout: 60
  138. ################
  139. # MQTT settings
  140. ################
  141. # mqtt:
  142. # crop_to_region: True
  143. # snapshot_height: 300
  144. ################
  145. # Zones
  146. ################
  147. zones:
  148. #################
  149. # Name of the zone
  150. ################
  151. front_steps:
  152. ####################
  153. # A list of x,y coordinates to define the polygon of the zone. The top
  154. # left corner is 0,0. Can also be a comma separated string of all x,y coordinates combined.
  155. # The same zone name can exist across multiple cameras if they have overlapping FOVs.
  156. # An object is determined to be in the zone based on whether or not the bottom center
  157. # of it's bounding box is within the polygon. The polygon must have at least 3 points.
  158. # Coordinates can be generated at https://www.image-map.net/
  159. ####################
  160. coordinates:
  161. - 545,1077
  162. - 747,939
  163. - 788,805
  164. ################
  165. # Zone level object filters. These are applied in addition to the global and camera filters
  166. # and should be more restrictive than the global and camera filters. The global and camera
  167. # filters are applied upstream.
  168. ################
  169. filters:
  170. person:
  171. min_area: 5000
  172. max_area: 100000
  173. threshold: 0.8
  174. ################
  175. # This will save a clip for each tracked object by frigate along with a json file that contains
  176. # data related to the tracked object. This works by telling ffmpeg to write video segments to /cache
  177. # from the video stream without re-encoding. Clips are then created by using ffmpeg to merge segments
  178. # without re-encoding. The segments saved are unaltered from what frigate receives to avoid re-encoding.
  179. # They do not contain bounding boxes. These are optimized to capture "false_positive" examples for improving frigate.
  180. #
  181. # NOTE: This feature does not work if you have "-vsync drop" configured in your input params.
  182. # This will only work for camera feeds that can be copied into the mp4 container format without
  183. # encoding such as h264. It may not work for some types of streams.
  184. ################
  185. save_clips:
  186. enabled: False
  187. #########
  188. # Number of seconds before the event to include in the clips
  189. #########
  190. pre_capture: 30
  191. #########
  192. # Objects to save clips for. Defaults to all tracked object types.
  193. #########
  194. # objects:
  195. # - person
  196. ################
  197. # Configuration for the snapshots in the debug view and mqtt
  198. ################
  199. snapshots:
  200. show_timestamp: True
  201. draw_zones: False
  202. ################
  203. # Camera level object config. If defined, this is used instead of the global config.
  204. ################
  205. objects:
  206. track:
  207. - person
  208. - car
  209. filters:
  210. person:
  211. min_area: 5000
  212. max_area: 100000
  213. min_score: 0.5
  214. threshold: 0.85