audio_recorder.py 77 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855
  1. """
  2. The AudioToTextRecorder class in the provided code facilitates
  3. fast speech-to-text transcription.
  4. The class employs the faster_whisper library to transcribe the recorded audio
  5. into text using machine learning models, which can be run either on a GPU or
  6. CPU. Voice activity detection (VAD) is built in, meaning the software can
  7. automatically start or stop recording based on the presence or absence of
  8. speech. It integrates wake word detection through the pvporcupine library,
  9. allowing the software to initiate recording when a specific word or phrase
  10. is spoken. The system provides real-time feedback and can be further
  11. customized.
  12. Features:
  13. - Voice Activity Detection: Automatically starts/stops recording when speech
  14. is detected or when speech ends.
  15. - Wake Word Detection: Starts recording when a specified wake word (or words)
  16. is detected.
  17. - Event Callbacks: Customizable callbacks for when recording starts
  18. or finishes.
  19. - Fast Transcription: Returns the transcribed text from the audio as fast
  20. as possible.
  21. Author: Kolja Beigel
  22. """
  23. from typing import Iterable, List, Optional, Union
  24. import torch.multiprocessing as mp
  25. import torch
  26. from typing import List, Union
  27. from ctypes import c_bool
  28. from openwakeword.model import Model
  29. from scipy.signal import resample
  30. from scipy import signal
  31. import faster_whisper
  32. import openwakeword
  33. import collections
  34. import numpy as np
  35. import pvporcupine
  36. import traceback
  37. import threading
  38. import webrtcvad
  39. import itertools
  40. import platform
  41. import pyaudio
  42. import logging
  43. import struct
  44. import halo
  45. import time
  46. import copy
  47. import os
  48. import re
  49. import gc
  50. # Set OpenMP runtime duplicate library handling to OK (Use only for development!)
  51. os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
  52. INIT_MODEL_TRANSCRIPTION = "tiny"
  53. INIT_MODEL_TRANSCRIPTION_REALTIME = "tiny"
  54. INIT_REALTIME_PROCESSING_PAUSE = 0.2
  55. INIT_SILERO_SENSITIVITY = 0.4
  56. INIT_WEBRTC_SENSITIVITY = 3
  57. INIT_POST_SPEECH_SILENCE_DURATION = 0.6
  58. INIT_MIN_LENGTH_OF_RECORDING = 0.5
  59. INIT_MIN_GAP_BETWEEN_RECORDINGS = 0
  60. INIT_WAKE_WORDS_SENSITIVITY = 0.6
  61. INIT_PRE_RECORDING_BUFFER_DURATION = 1.0
  62. INIT_WAKE_WORD_ACTIVATION_DELAY = 0.0
  63. INIT_WAKE_WORD_TIMEOUT = 5.0
  64. INIT_WAKE_WORD_BUFFER_DURATION = 0.1
  65. ALLOWED_LATENCY_LIMIT = 10
  66. TIME_SLEEP = 0.02
  67. SAMPLE_RATE = 16000
  68. BUFFER_SIZE = 512
  69. INT16_MAX_ABS_VALUE = 32768.0
  70. INIT_HANDLE_BUFFER_OVERFLOW = False
  71. if platform.system() != 'Darwin':
  72. INIT_HANDLE_BUFFER_OVERFLOW = True
  73. class AudioToTextRecorder:
  74. """
  75. A class responsible for capturing audio from the microphone, detecting
  76. voice activity, and then transcribing the captured audio using the
  77. `faster_whisper` model.
  78. """
  79. def __init__(self,
  80. model: str = INIT_MODEL_TRANSCRIPTION,
  81. language: str = "",
  82. compute_type: str = "default",
  83. input_device_index: int = None,
  84. gpu_device_index: Union[int, List[int]] = 0,
  85. device: str = "cuda",
  86. on_recording_start=None,
  87. on_recording_stop=None,
  88. on_transcription_start=None,
  89. ensure_sentence_starting_uppercase=True,
  90. ensure_sentence_ends_with_period=True,
  91. use_microphone=True,
  92. spinner=True,
  93. level=logging.WARNING,
  94. # Realtime transcription parameters
  95. enable_realtime_transcription=False,
  96. realtime_model_type=INIT_MODEL_TRANSCRIPTION_REALTIME,
  97. realtime_processing_pause=INIT_REALTIME_PROCESSING_PAUSE,
  98. on_realtime_transcription_update=None,
  99. on_realtime_transcription_stabilized=None,
  100. # Voice activation parameters
  101. silero_sensitivity: float = INIT_SILERO_SENSITIVITY,
  102. silero_use_onnx: bool = False,
  103. silero_deactivity_detection: bool = False,
  104. webrtc_sensitivity: int = INIT_WEBRTC_SENSITIVITY,
  105. post_speech_silence_duration: float = (
  106. INIT_POST_SPEECH_SILENCE_DURATION
  107. ),
  108. min_length_of_recording: float = (
  109. INIT_MIN_LENGTH_OF_RECORDING
  110. ),
  111. min_gap_between_recordings: float = (
  112. INIT_MIN_GAP_BETWEEN_RECORDINGS
  113. ),
  114. pre_recording_buffer_duration: float = (
  115. INIT_PRE_RECORDING_BUFFER_DURATION
  116. ),
  117. on_vad_detect_start=None,
  118. on_vad_detect_stop=None,
  119. # Wake word parameters
  120. wakeword_backend: str = "pvporcupine",
  121. openwakeword_model_paths: str = None,
  122. openwakeword_inference_framework: str = "onnx",
  123. wake_words: str = "",
  124. wake_words_sensitivity: float = INIT_WAKE_WORDS_SENSITIVITY,
  125. wake_word_activation_delay: float = (
  126. INIT_WAKE_WORD_ACTIVATION_DELAY
  127. ),
  128. wake_word_timeout: float = INIT_WAKE_WORD_TIMEOUT,
  129. wake_word_buffer_duration: float = INIT_WAKE_WORD_BUFFER_DURATION,
  130. on_wakeword_detected=None,
  131. on_wakeword_timeout=None,
  132. on_wakeword_detection_start=None,
  133. on_wakeword_detection_end=None,
  134. on_recorded_chunk=None,
  135. debug_mode=False,
  136. handle_buffer_overflow: bool = INIT_HANDLE_BUFFER_OVERFLOW,
  137. beam_size: int = 5,
  138. beam_size_realtime: int = 3,
  139. buffer_size: int = BUFFER_SIZE,
  140. sample_rate: int = SAMPLE_RATE,
  141. initial_prompt: Optional[Union[str, Iterable[int]]] = None,
  142. suppress_tokens: Optional[List[int]] = [-1],
  143. ):
  144. """
  145. Initializes an audio recorder and transcription
  146. and wake word detection.
  147. Args:
  148. - model (str, default="tiny"): Specifies the size of the transcription
  149. model to use or the path to a converted model directory.
  150. Valid options are 'tiny', 'tiny.en', 'base', 'base.en',
  151. 'small', 'small.en', 'medium', 'medium.en', 'large-v1',
  152. 'large-v2'.
  153. If a specific size is provided, the model is downloaded
  154. from the Hugging Face Hub.
  155. - language (str, default=""): Language code for speech-to-text engine.
  156. If not specified, the model will attempt to detect the language
  157. automatically.
  158. - compute_type (str, default="default"): Specifies the type of
  159. computation to be used for transcription.
  160. See https://opennmt.net/CTranslate2/quantization.html.
  161. - input_device_index (int, default=0): The index of the audio input
  162. device to use.
  163. - gpu_device_index (int, default=0): Device ID to use.
  164. The model can also be loaded on multiple GPUs by passing a list of
  165. IDs (e.g. [0, 1, 2, 3]). In that case, multiple transcriptions can
  166. run in parallel when transcribe() is called from multiple Python
  167. threads
  168. - device (str, default="cuda"): Device for model to use. Can either be
  169. "cuda" or "cpu".
  170. - on_recording_start (callable, default=None): Callback function to be
  171. called when recording of audio to be transcripted starts.
  172. - on_recording_stop (callable, default=None): Callback function to be
  173. called when recording of audio to be transcripted stops.
  174. - on_transcription_start (callable, default=None): Callback function
  175. to be called when transcription of audio to text starts.
  176. - ensure_sentence_starting_uppercase (bool, default=True): Ensures
  177. that every sentence detected by the algorithm starts with an
  178. uppercase letter.
  179. - ensure_sentence_ends_with_period (bool, default=True): Ensures that
  180. every sentence that doesn't end with punctuation such as "?", "!"
  181. ends with a period
  182. - use_microphone (bool, default=True): Specifies whether to use the
  183. microphone as the audio input source. If set to False, the
  184. audio input source will be the audio data sent through the
  185. feed_audio() method.
  186. - spinner (bool, default=True): Show spinner animation with current
  187. state.
  188. - level (int, default=logging.WARNING): Logging level.
  189. - enable_realtime_transcription (bool, default=False): Enables or
  190. disables real-time transcription of audio. When set to True, the
  191. audio will be transcribed continuously as it is being recorded.
  192. - realtime_model_type (str, default="tiny"): Specifies the machine
  193. learning model to be used for real-time transcription. Valid
  194. options include 'tiny', 'tiny.en', 'base', 'base.en', 'small',
  195. 'small.en', 'medium', 'medium.en', 'large-v1', 'large-v2'.
  196. - realtime_processing_pause (float, default=0.1): Specifies the time
  197. interval in seconds after a chunk of audio gets transcribed. Lower
  198. values will result in more "real-time" (frequent) transcription
  199. updates but may increase computational load.
  200. - on_realtime_transcription_update = A callback function that is
  201. triggered whenever there's an update in the real-time
  202. transcription. The function is called with the newly transcribed
  203. text as its argument.
  204. - on_realtime_transcription_stabilized = A callback function that is
  205. triggered when the transcribed text stabilizes in quality. The
  206. stabilized text is generally more accurate but may arrive with a
  207. slight delay compared to the regular real-time updates.
  208. - silero_sensitivity (float, default=SILERO_SENSITIVITY): Sensitivity
  209. for the Silero Voice Activity Detection model ranging from 0
  210. (least sensitive) to 1 (most sensitive). Default is 0.5.
  211. - silero_use_onnx (bool, default=False): Enables usage of the
  212. pre-trained model from Silero in the ONNX (Open Neural Network
  213. Exchange) format instead of the PyTorch format. This is
  214. recommended for faster performance.
  215. - silero_deactivity_detection (bool, default=False): Enables the Silero
  216. model for end-of-speech detection. More robust against background
  217. noise. Utilizes additional GPU resources but improves accuracy in
  218. noisy environments. When False, uses the default WebRTC VAD,
  219. which is more sensitive but may continue recording longer due
  220. to background sounds.
  221. - webrtc_sensitivity (int, default=WEBRTC_SENSITIVITY): Sensitivity
  222. for the WebRTC Voice Activity Detection engine ranging from 0
  223. (least aggressive / most sensitive) to 3 (most aggressive,
  224. least sensitive). Default is 3.
  225. - post_speech_silence_duration (float, default=0.2): Duration in
  226. seconds of silence that must follow speech before the recording
  227. is considered to be completed. This ensures that any brief
  228. pauses during speech don't prematurely end the recording.
  229. - min_gap_between_recordings (float, default=1.0): Specifies the
  230. minimum time interval in seconds that should exist between the
  231. end of one recording session and the beginning of another to
  232. prevent rapid consecutive recordings.
  233. - min_length_of_recording (float, default=1.0): Specifies the minimum
  234. duration in seconds that a recording session should last to ensure
  235. meaningful audio capture, preventing excessively short or
  236. fragmented recordings.
  237. - pre_recording_buffer_duration (float, default=0.2): Duration in
  238. seconds for the audio buffer to maintain pre-roll audio
  239. (compensates speech activity detection latency)
  240. - on_vad_detect_start (callable, default=None): Callback function to
  241. be called when the system listens for voice activity.
  242. - on_vad_detect_stop (callable, default=None): Callback function to be
  243. called when the system stops listening for voice activity.
  244. - wakeword_backend (str, default="pvporcupine"): Specifies the backend
  245. library to use for wake word detection. Supported options include
  246. 'pvporcupine' for using the Porcupine wake word engine or 'oww' for
  247. using the OpenWakeWord engine.
  248. - openwakeword_model_paths (str, default=None): Comma-separated paths
  249. to model files for the openwakeword library. These paths point to
  250. custom models that can be used for wake word detection when the
  251. openwakeword library is selected as the wakeword_backend.
  252. - openwakeword_inference_framework (str, default="onnx"): Specifies
  253. the inference framework to use with the openwakeword library.
  254. Can be either 'onnx' for Open Neural Network Exchange format
  255. or 'tflite' for TensorFlow Lite.
  256. - wake_words (str, default=""): Comma-separated string of wake words to
  257. initiate recording when using the 'pvporcupine' wakeword backend.
  258. Supported wake words include: 'alexa', 'americano', 'blueberry',
  259. 'bumblebee', 'computer', 'grapefruits', 'grasshopper', 'hey google',
  260. 'hey siri', 'jarvis', 'ok google', 'picovoice', 'porcupine',
  261. 'terminator'. For the 'openwakeword' backend, wake words are
  262. automatically extracted from the provided model files, so specifying
  263. them here is not necessary.
  264. - wake_words_sensitivity (float, default=0.5): Sensitivity for wake
  265. word detection, ranging from 0 (least sensitive) to 1 (most
  266. sensitive). Default is 0.5.
  267. - wake_word_activation_delay (float, default=0): Duration in seconds
  268. after the start of monitoring before the system switches to wake
  269. word activation if no voice is initially detected. If set to
  270. zero, the system uses wake word activation immediately.
  271. - wake_word_timeout (float, default=5): Duration in seconds after a
  272. wake word is recognized. If no subsequent voice activity is
  273. detected within this window, the system transitions back to an
  274. inactive state, awaiting the next wake word or voice activation.
  275. - wake_word_buffer_duration (float, default=0.1): Duration in seconds
  276. to buffer audio data during wake word detection. This helps in
  277. cutting out the wake word from the recording buffer so it does not
  278. falsely get detected along with the following spoken text, ensuring
  279. cleaner and more accurate transcription start triggers.
  280. Increase this if parts of the wake word get detected as text.
  281. - on_wakeword_detected (callable, default=None): Callback function to
  282. be called when a wake word is detected.
  283. - on_wakeword_timeout (callable, default=None): Callback function to
  284. be called when the system goes back to an inactive state after when
  285. no speech was detected after wake word activation
  286. - on_wakeword_detection_start (callable, default=None): Callback
  287. function to be called when the system starts to listen for wake
  288. words
  289. - on_wakeword_detection_end (callable, default=None): Callback
  290. function to be called when the system stops to listen for
  291. wake words (e.g. because of timeout or wake word detected)
  292. - on_recorded_chunk (callable, default=None): Callback function to be
  293. called when a chunk of audio is recorded. The function is called
  294. with the recorded audio chunk as its argument.
  295. - debug_mode (bool, default=False): If set to True, the system will
  296. print additional debug information to the console.
  297. - handle_buffer_overflow (bool, default=True): If set to True, the system
  298. will log a warning when an input overflow occurs during recording and
  299. remove the data from the buffer.
  300. - beam_size (int, default=5): The beam size to use for beam search
  301. decoding.
  302. - beam_size_realtime (int, default=3): The beam size to use for beam
  303. search decoding in the real-time transcription model.
  304. - buffer_size (int, default=512): The buffer size to use for audio
  305. recording. Changing this may break functionality.
  306. - sample_rate (int, default=16000): The sample rate to use for audio
  307. recording. Changing this will very probably functionality (as the
  308. WebRTC VAD model is very sensitive towards the sample rate).
  309. - initial_prompt (str or iterable of int, default=None): Initial
  310. prompt to be fed to the transcription models.
  311. - suppress_tokens (list of int, default=[-1]): Tokens to be suppressed
  312. from the transcription output.
  313. Raises:
  314. Exception: Errors related to initializing transcription
  315. model, wake word detection, or audio recording.
  316. """
  317. self.language = language
  318. self.compute_type = compute_type
  319. self.input_device_index = input_device_index
  320. self.gpu_device_index = gpu_device_index
  321. self.device = device
  322. self.wake_words = wake_words
  323. self.wake_word_activation_delay = wake_word_activation_delay
  324. self.wake_word_timeout = wake_word_timeout
  325. self.wake_word_buffer_duration = wake_word_buffer_duration
  326. self.ensure_sentence_starting_uppercase = (
  327. ensure_sentence_starting_uppercase
  328. )
  329. self.ensure_sentence_ends_with_period = (
  330. ensure_sentence_ends_with_period
  331. )
  332. self.use_microphone = mp.Value(c_bool, use_microphone)
  333. self.min_gap_between_recordings = min_gap_between_recordings
  334. self.min_length_of_recording = min_length_of_recording
  335. self.pre_recording_buffer_duration = pre_recording_buffer_duration
  336. self.post_speech_silence_duration = post_speech_silence_duration
  337. self.on_recording_start = on_recording_start
  338. self.on_recording_stop = on_recording_stop
  339. self.on_wakeword_detected = on_wakeword_detected
  340. self.on_wakeword_timeout = on_wakeword_timeout
  341. self.on_vad_detect_start = on_vad_detect_start
  342. self.on_vad_detect_stop = on_vad_detect_stop
  343. self.on_wakeword_detection_start = on_wakeword_detection_start
  344. self.on_wakeword_detection_end = on_wakeword_detection_end
  345. self.on_recorded_chunk = on_recorded_chunk
  346. self.on_transcription_start = on_transcription_start
  347. self.enable_realtime_transcription = enable_realtime_transcription
  348. self.realtime_model_type = realtime_model_type
  349. self.realtime_processing_pause = realtime_processing_pause
  350. self.on_realtime_transcription_update = (
  351. on_realtime_transcription_update
  352. )
  353. self.on_realtime_transcription_stabilized = (
  354. on_realtime_transcription_stabilized
  355. )
  356. self.debug_mode = debug_mode
  357. self.handle_buffer_overflow = handle_buffer_overflow
  358. self.beam_size = beam_size
  359. self.beam_size_realtime = beam_size_realtime
  360. self.allowed_latency_limit = ALLOWED_LATENCY_LIMIT
  361. self.level = level
  362. self.audio_queue = mp.Queue()
  363. self.buffer_size = buffer_size
  364. self.sample_rate = sample_rate
  365. self.recording_start_time = 0
  366. self.recording_stop_time = 0
  367. self.wake_word_detect_time = 0
  368. self.silero_check_time = 0
  369. self.silero_working = False
  370. self.speech_end_silence_start = 0
  371. self.silero_sensitivity = silero_sensitivity
  372. self.silero_deactivity_detection = silero_deactivity_detection
  373. self.listen_start = 0
  374. self.spinner = spinner
  375. self.halo = None
  376. self.state = "inactive"
  377. self.wakeword_detected = False
  378. self.text_storage = []
  379. self.realtime_stabilized_text = ""
  380. self.realtime_stabilized_safetext = ""
  381. self.is_webrtc_speech_active = False
  382. self.is_silero_speech_active = False
  383. self.recording_thread = None
  384. self.realtime_thread = None
  385. self.audio_interface = None
  386. self.audio = None
  387. self.stream = None
  388. self.start_recording_event = threading.Event()
  389. self.stop_recording_event = threading.Event()
  390. self.last_transcription_bytes = None
  391. self.initial_prompt = initial_prompt
  392. self.suppress_tokens = suppress_tokens
  393. self.use_wake_words = wake_words or wakeword_backend in {'oww', 'openwakeword', 'openwakewords'}
  394. # Initialize the logging configuration with the specified level
  395. log_format = 'RealTimeSTT: %(name)s - %(levelname)s - %(message)s'
  396. # Create a logger
  397. logger = logging.getLogger()
  398. logger.setLevel(level) # Set the root logger's level
  399. # Create a file handler and set its level
  400. file_handler = logging.FileHandler('realtimesst.log')
  401. file_handler.setLevel(logging.DEBUG)
  402. file_handler.setFormatter(logging.Formatter(log_format))
  403. # Create a console handler and set its level
  404. console_handler = logging.StreamHandler()
  405. console_handler.setLevel(level)
  406. console_handler.setFormatter(logging.Formatter(log_format))
  407. # Add the handlers to the logger
  408. logger.addHandler(file_handler)
  409. logger.addHandler(console_handler)
  410. self.is_shut_down = False
  411. self.shutdown_event = mp.Event()
  412. try:
  413. logging.debug("Explicitly setting the multiprocessing start method to 'spawn'")
  414. mp.set_start_method('spawn')
  415. except RuntimeError as e:
  416. logging.debug(f"Start method has already been set. Details: {e}")
  417. logging.info("Starting RealTimeSTT")
  418. self.interrupt_stop_event = mp.Event()
  419. self.was_interrupted = mp.Event()
  420. self.main_transcription_ready_event = mp.Event()
  421. self.parent_transcription_pipe, child_transcription_pipe = mp.Pipe()
  422. # Set device for model
  423. self.device = "cuda" if self.device == "cuda" and torch.cuda.is_available() else "cpu"
  424. self.transcript_process = self._start_thread(
  425. target=AudioToTextRecorder._transcription_worker,
  426. args=(
  427. child_transcription_pipe,
  428. model,
  429. self.compute_type,
  430. self.gpu_device_index,
  431. self.device,
  432. self.main_transcription_ready_event,
  433. self.shutdown_event,
  434. self.interrupt_stop_event,
  435. self.beam_size,
  436. self.initial_prompt,
  437. self.suppress_tokens
  438. )
  439. )
  440. # Start audio data reading process
  441. if self.use_microphone.value:
  442. logging.info("Initializing audio recording"
  443. " (creating pyAudio input stream,"
  444. f" sample rate: {self.sample_rate}"
  445. f" buffer size: {self.buffer_size}"
  446. )
  447. self.reader_process = self._start_thread(
  448. target=AudioToTextRecorder._audio_data_worker,
  449. args=(
  450. self.audio_queue,
  451. self.sample_rate,
  452. self.buffer_size,
  453. self.input_device_index,
  454. self.shutdown_event,
  455. self.interrupt_stop_event,
  456. self.use_microphone
  457. )
  458. )
  459. # Initialize the realtime transcription model
  460. if self.enable_realtime_transcription:
  461. try:
  462. logging.info("Initializing faster_whisper realtime "
  463. f"transcription model {self.realtime_model_type}"
  464. )
  465. self.realtime_model_type = faster_whisper.WhisperModel(
  466. model_size_or_path=self.realtime_model_type,
  467. device=self.device,
  468. compute_type=self.compute_type,
  469. device_index=self.gpu_device_index
  470. )
  471. except Exception as e:
  472. logging.exception("Error initializing faster_whisper "
  473. f"realtime transcription model: {e}"
  474. )
  475. raise
  476. logging.debug("Faster_whisper realtime speech to text "
  477. "transcription model initialized successfully")
  478. # Setup wake word detection
  479. if wake_words or wakeword_backend in {'oww', 'openwakeword', 'openwakewords'}:
  480. self.wakeword_backend = wakeword_backend
  481. self.wake_words_list = [
  482. word.strip() for word in wake_words.lower().split(',')
  483. ]
  484. self.wake_words_sensitivity = wake_words_sensitivity
  485. self.wake_words_sensitivities = [
  486. float(wake_words_sensitivity)
  487. for _ in range(len(self.wake_words_list))
  488. ]
  489. if self.wakeword_backend in {'pvp', 'pvporcupine'}:
  490. try:
  491. self.porcupine = pvporcupine.create(
  492. keywords=self.wake_words_list,
  493. sensitivities=self.wake_words_sensitivities
  494. )
  495. self.buffer_size = self.porcupine.frame_length
  496. self.sample_rate = self.porcupine.sample_rate
  497. except Exception as e:
  498. logging.exception(
  499. "Error initializing porcupine "
  500. f"wake word detection engine: {e}"
  501. )
  502. raise
  503. logging.debug(
  504. "Porcupine wake word detection engine initialized successfully"
  505. )
  506. elif self.wakeword_backend in {'oww', 'openwakeword', 'openwakewords'}:
  507. openwakeword.utils.download_models()
  508. try:
  509. if openwakeword_model_paths:
  510. model_paths = openwakeword_model_paths.split(',')
  511. self.owwModel = Model(
  512. wakeword_models=model_paths,
  513. inference_framework=openwakeword_inference_framework
  514. )
  515. logging.info(
  516. "Successfully loaded wakeword model(s): "
  517. f"{openwakeword_model_paths}"
  518. )
  519. else:
  520. self.owwModel = Model(
  521. inference_framework=openwakeword_inference_framework)
  522. self.oww_n_models = len(self.owwModel.models.keys())
  523. if not self.oww_n_models:
  524. logging.error(
  525. "No wake word models loaded."
  526. )
  527. for model_key in self.owwModel.models.keys():
  528. logging.info(
  529. "Successfully loaded openwakeword model: "
  530. f"{model_key}"
  531. )
  532. except Exception as e:
  533. logging.exception(
  534. "Error initializing openwakeword "
  535. f"wake word detection engine: {e}"
  536. )
  537. raise
  538. logging.debug(
  539. "Open wake word detection engine initialized successfully"
  540. )
  541. else:
  542. logging.exception(f"Wakeword engine {self.wakeword_backend} unknown/unsupported. Please specify one of: pvporcupine, openwakeword.")
  543. # Setup voice activity detection model WebRTC
  544. try:
  545. logging.info("Initializing WebRTC voice with "
  546. f"Sensitivity {webrtc_sensitivity}"
  547. )
  548. self.webrtc_vad_model = webrtcvad.Vad()
  549. self.webrtc_vad_model.set_mode(webrtc_sensitivity)
  550. except Exception as e:
  551. logging.exception("Error initializing WebRTC voice "
  552. f"activity detection engine: {e}"
  553. )
  554. raise
  555. logging.debug("WebRTC VAD voice activity detection "
  556. "engine initialized successfully"
  557. )
  558. # Setup voice activity detection model Silero VAD
  559. try:
  560. self.silero_vad_model, _ = torch.hub.load(
  561. repo_or_dir="snakers4/silero-vad",
  562. model="silero_vad",
  563. verbose=False,
  564. onnx=silero_use_onnx
  565. )
  566. except Exception as e:
  567. logging.exception(f"Error initializing Silero VAD "
  568. f"voice activity detection engine: {e}"
  569. )
  570. raise
  571. logging.debug("Silero VAD voice activity detection "
  572. "engine initialized successfully"
  573. )
  574. self.audio_buffer = collections.deque(
  575. maxlen=int((self.sample_rate // self.buffer_size) *
  576. self.pre_recording_buffer_duration)
  577. )
  578. self.frames = []
  579. # Recording control flags
  580. self.is_recording = False
  581. self.is_running = True
  582. self.start_recording_on_voice_activity = False
  583. self.stop_recording_on_voice_deactivity = False
  584. # Start the recording worker thread
  585. self.recording_thread = threading.Thread(target=self._recording_worker)
  586. self.recording_thread.daemon = True
  587. self.recording_thread.start()
  588. # Start the realtime transcription worker thread
  589. self.realtime_thread = threading.Thread(target=self._realtime_worker)
  590. self.realtime_thread.daemon = True
  591. self.realtime_thread.start()
  592. # Wait for transcription models to start
  593. logging.debug('Waiting for main transcription model to start')
  594. self.main_transcription_ready_event.wait()
  595. logging.debug('Main transcription model ready')
  596. logging.debug('RealtimeSTT initialization completed successfully')
  597. def _start_thread(self, target=None, args=()):
  598. """
  599. Implement a consistent threading model across the library.
  600. This method is used to start any thread in this library. It uses the
  601. standard threading. Thread for Linux and for all others uses the pytorch
  602. MultiProcessing library 'Process'.
  603. Args:
  604. target (callable object): is the callable object to be invoked by
  605. the run() method. Defaults to None, meaning nothing is called.
  606. args (tuple): is a list or tuple of arguments for the target
  607. invocation. Defaults to ().
  608. """
  609. if (platform.system() == 'Linux'):
  610. thread = threading.Thread(target=target, args=args)
  611. thread.deamon = True
  612. thread.start()
  613. return thread
  614. else:
  615. thread = mp.Process(target=target, args=args)
  616. thread.start()
  617. return thread
  618. @staticmethod
  619. def _transcription_worker(conn,
  620. model_path,
  621. compute_type,
  622. gpu_device_index,
  623. device,
  624. ready_event,
  625. shutdown_event,
  626. interrupt_stop_event,
  627. beam_size,
  628. initial_prompt,
  629. suppress_tokens
  630. ):
  631. """
  632. Worker method that handles the continuous
  633. process of transcribing audio data.
  634. This method runs in a separate process and is responsible for:
  635. - Initializing the `faster_whisper` model used for transcription.
  636. - Receiving audio data sent through a pipe and using the model
  637. to transcribe it.
  638. - Sending transcription results back through the pipe.
  639. - Continuously checking for a shutdown event to gracefully
  640. terminate the transcription process.
  641. Args:
  642. conn (multiprocessing.Connection): The connection endpoint used
  643. for receiving audio data and sending transcription results.
  644. model_path (str): The path to the pre-trained faster_whisper model
  645. for transcription.
  646. compute_type (str): Specifies the type of computation to be used
  647. for transcription.
  648. gpu_device_index (int): Device ID to use.
  649. device (str): Device for model to use.
  650. ready_event (threading.Event): An event that is set when the
  651. transcription model is successfully initialized and ready.
  652. shutdown_event (threading.Event): An event that, when set,
  653. signals this worker method to terminate.
  654. interrupt_stop_event (threading.Event): An event that, when set,
  655. signals this worker method to stop processing audio data.
  656. beam_size (int): The beam size to use for beam search decoding.
  657. initial_prompt (str or iterable of int): Initial prompt to be fed
  658. to the transcription model.
  659. suppress_tokens (list of int): Tokens to be suppressed from the
  660. transcription output.
  661. Raises:
  662. Exception: If there is an error while initializing the
  663. transcription model.
  664. """
  665. logging.info("Initializing faster_whisper "
  666. f"main transcription model {model_path}"
  667. )
  668. try:
  669. model = faster_whisper.WhisperModel(
  670. model_size_or_path=model_path,
  671. device=device,
  672. compute_type=compute_type,
  673. device_index=gpu_device_index,
  674. )
  675. except Exception as e:
  676. logging.exception("Error initializing main "
  677. f"faster_whisper transcription model: {e}"
  678. )
  679. raise
  680. ready_event.set()
  681. logging.debug("Faster_whisper main speech to text "
  682. "transcription model initialized successfully"
  683. )
  684. while not shutdown_event.is_set():
  685. try:
  686. if conn.poll(0.5):
  687. audio, language = conn.recv()
  688. try:
  689. segments = model.transcribe(
  690. audio,
  691. language=language if language else None,
  692. beam_size=beam_size,
  693. initial_prompt=initial_prompt,
  694. suppress_tokens=suppress_tokens
  695. )
  696. segments = segments[0]
  697. transcription = " ".join(seg.text for seg in segments)
  698. transcription = transcription.strip()
  699. conn.send(('success', transcription))
  700. except Exception as e:
  701. logging.error(f"General transcription error: {e}")
  702. conn.send(('error', str(e)))
  703. else:
  704. # If there's no data, sleep / prevent busy waiting
  705. time.sleep(0.02)
  706. except KeyboardInterrupt:
  707. interrupt_stop_event.set()
  708. logging.debug("Transcription worker process "
  709. "finished due to KeyboardInterrupt"
  710. )
  711. break
  712. @staticmethod
  713. def _audio_data_worker(audio_queue,
  714. sample_rate,
  715. buffer_size,
  716. input_device_index,
  717. shutdown_event,
  718. interrupt_stop_event,
  719. use_microphone):
  720. """
  721. Worker method that handles the audio recording process.
  722. This method runs in a separate process and is responsible for:
  723. - Setting up the audio input stream for recording.
  724. - Continuously reading audio data from the input stream
  725. and placing it in a queue.
  726. - Handling errors during the recording process, including
  727. input overflow.
  728. - Gracefully terminating the recording process when a shutdown
  729. event is set.
  730. Args:
  731. audio_queue (queue.Queue): A queue where recorded audio
  732. data is placed.
  733. sample_rate (int): The sample rate of the audio input stream.
  734. buffer_size (int): The size of the buffer used in the audio
  735. input stream.
  736. input_device_index (int): The index of the audio input device
  737. shutdown_event (threading.Event): An event that, when set, signals
  738. this worker method to terminate.
  739. Raises:
  740. Exception: If there is an error while initializing the audio
  741. recording.
  742. """
  743. try:
  744. audio_interface = pyaudio.PyAudio()
  745. if input_device_index is None:
  746. default_device = audio_interface.get_default_input_device_info()
  747. input_device_index = default_device['index']
  748. stream = audio_interface.open(
  749. rate=sample_rate,
  750. format=pyaudio.paInt16,
  751. channels=1,
  752. input=True,
  753. frames_per_buffer=buffer_size,
  754. input_device_index=input_device_index,
  755. )
  756. except Exception as e:
  757. logging.exception("Error initializing pyaudio "
  758. f"audio recording: {e}"
  759. )
  760. raise
  761. logging.debug("Audio recording (pyAudio input "
  762. "stream) initialized successfully"
  763. )
  764. try:
  765. while not shutdown_event.is_set():
  766. try:
  767. data = stream.read(buffer_size)
  768. except OSError as e:
  769. if e.errno == pyaudio.paInputOverflowed:
  770. logging.warning("Input overflowed. Frame dropped.")
  771. else:
  772. logging.error(f"Error during recording: {e}")
  773. tb_str = traceback.format_exc()
  774. print(f"Traceback: {tb_str}")
  775. print(f"Error: {e}")
  776. continue
  777. except Exception as e:
  778. logging.error(f"Error during recording: {e}")
  779. tb_str = traceback.format_exc()
  780. print(f"Traceback: {tb_str}")
  781. print(f"Error: {e}")
  782. continue
  783. if use_microphone.value:
  784. audio_queue.put(data)
  785. except KeyboardInterrupt:
  786. interrupt_stop_event.set()
  787. logging.debug("Audio data worker process "
  788. "finished due to KeyboardInterrupt"
  789. )
  790. finally:
  791. stream.stop_stream()
  792. stream.close()
  793. audio_interface.terminate()
  794. def wakeup(self):
  795. """
  796. If in wake work modus, wake up as if a wake word was spoken.
  797. """
  798. self.listen_start = time.time()
  799. def abort(self):
  800. self.start_recording_on_voice_activity = False
  801. self.stop_recording_on_voice_deactivity = False
  802. self._set_state("inactive")
  803. self.interrupt_stop_event.set()
  804. self.was_interrupted.wait()
  805. self.was_interrupted.clear()
  806. def wait_audio(self):
  807. """
  808. Waits for the start and completion of the audio recording process.
  809. This method is responsible for:
  810. - Waiting for voice activity to begin recording if not yet started.
  811. - Waiting for voice inactivity to complete the recording.
  812. - Setting the audio buffer from the recorded frames.
  813. - Resetting recording-related attributes.
  814. Side effects:
  815. - Updates the state of the instance.
  816. - Modifies the audio attribute to contain the processed audio data.
  817. """
  818. self.listen_start = time.time()
  819. # If not yet started recording, wait for voice activity to initiate.
  820. if not self.is_recording and not self.frames:
  821. self._set_state("listening")
  822. self.start_recording_on_voice_activity = True
  823. # Wait until recording starts
  824. while not self.interrupt_stop_event.is_set():
  825. if self.start_recording_event.wait(timeout=0.02):
  826. break
  827. # If recording is ongoing, wait for voice inactivity
  828. # to finish recording.
  829. if self.is_recording:
  830. self.stop_recording_on_voice_deactivity = True
  831. # Wait until recording stops
  832. while not self.interrupt_stop_event.is_set():
  833. if (self.stop_recording_event.wait(timeout=0.02)):
  834. break
  835. # Convert recorded frames to the appropriate audio format.
  836. audio_array = np.frombuffer(b''.join(self.frames), dtype=np.int16)
  837. self.audio = audio_array.astype(np.float32) / INT16_MAX_ABS_VALUE
  838. self.frames.clear()
  839. # Reset recording-related timestamps
  840. self.recording_stop_time = 0
  841. self.listen_start = 0
  842. self._set_state("inactive")
  843. def transcribe(self):
  844. """
  845. Transcribes audio captured by this class instance using the
  846. `faster_whisper` model.
  847. Automatically starts recording upon voice activity if not manually
  848. started using `recorder.start()`.
  849. Automatically stops recording upon voice deactivity if not manually
  850. stopped with `recorder.stop()`.
  851. Processes the recorded audio to generate transcription.
  852. Args:
  853. on_transcription_finished (callable, optional): Callback function
  854. to be executed when transcription is ready.
  855. If provided, transcription will be performed asynchronously,
  856. and the callback will receive the transcription as its argument.
  857. If omitted, the transcription will be performed synchronously,
  858. and the result will be returned.
  859. Returns (if no callback is set):
  860. str: The transcription of the recorded audio.
  861. Raises:
  862. Exception: If there is an error during the transcription process.
  863. """
  864. self._set_state("transcribing")
  865. audio_copy = copy.deepcopy(self.audio)
  866. self.parent_transcription_pipe.send((self.audio, self.language))
  867. status, result = self.parent_transcription_pipe.recv()
  868. self._set_state("inactive")
  869. if status == 'success':
  870. self.last_transcription_bytes = audio_copy
  871. return self._preprocess_output(result)
  872. else:
  873. logging.error(result)
  874. raise Exception(result)
  875. def _process_wakeword(self, data):
  876. """
  877. Processes audio data to detect wake words.
  878. """
  879. if self.wakeword_backend in {'pvp', 'pvporcupine'}:
  880. pcm = struct.unpack_from(
  881. "h" * self.buffer_size,
  882. data
  883. )
  884. porcupine_index = self.porcupine.process(pcm)
  885. if self.debug_mode:
  886. print (f"wake words porcupine_index: {porcupine_index}")
  887. return self.porcupine.process(pcm)
  888. elif self.wakeword_backend in {'oww', 'openwakeword', 'openwakewords'}:
  889. pcm = np.frombuffer(data, dtype=np.int16)
  890. prediction = self.owwModel.predict(pcm)
  891. max_score = -1
  892. max_index = -1
  893. wake_words_in_prediction = len(self.owwModel.prediction_buffer.keys())
  894. self.wake_words_sensitivities
  895. if wake_words_in_prediction:
  896. for idx, mdl in enumerate(self.owwModel.prediction_buffer.keys()):
  897. scores = list(self.owwModel.prediction_buffer[mdl])
  898. if scores[-1] >= self.wake_words_sensitivity and scores[-1] > max_score:
  899. max_score = scores[-1]
  900. max_index = idx
  901. if self.debug_mode:
  902. print (f"wake words oww max_index, max_score: {max_index} {max_score}")
  903. return max_index
  904. else:
  905. if self.debug_mode:
  906. print (f"wake words oww_index: -1")
  907. return -1
  908. if self.debug_mode:
  909. print("wake words no match")
  910. return -1
  911. def text(self,
  912. on_transcription_finished=None,
  913. ):
  914. """
  915. Transcribes audio captured by this class instance
  916. using the `faster_whisper` model.
  917. - Automatically starts recording upon voice activity if not manually
  918. started using `recorder.start()`.
  919. - Automatically stops recording upon voice deactivity if not manually
  920. stopped with `recorder.stop()`.
  921. - Processes the recorded audio to generate transcription.
  922. Args:
  923. on_transcription_finished (callable, optional): Callback function
  924. to be executed when transcription is ready.
  925. If provided, transcription will be performed asynchronously, and
  926. the callback will receive the transcription as its argument.
  927. If omitted, the transcription will be performed synchronously,
  928. and the result will be returned.
  929. Returns (if not callback is set):
  930. str: The transcription of the recorded audio
  931. """
  932. self.interrupt_stop_event.clear()
  933. self.was_interrupted.clear()
  934. self.wait_audio()
  935. if self.is_shut_down or self.interrupt_stop_event.is_set():
  936. if self.interrupt_stop_event.is_set():
  937. self.was_interrupted.set()
  938. return ""
  939. if on_transcription_finished:
  940. threading.Thread(target=on_transcription_finished,
  941. args=(self.transcribe(),)).start()
  942. else:
  943. return self.transcribe()
  944. def start(self):
  945. """
  946. Starts recording audio directly without waiting for voice activity.
  947. """
  948. # Ensure there's a minimum interval
  949. # between stopping and starting recording
  950. if (time.time() - self.recording_stop_time
  951. < self.min_gap_between_recordings):
  952. logging.info("Attempted to start recording "
  953. "too soon after stopping."
  954. )
  955. return self
  956. logging.info("recording started")
  957. self._set_state("recording")
  958. self.text_storage = []
  959. self.realtime_stabilized_text = ""
  960. self.realtime_stabilized_safetext = ""
  961. self.wakeword_detected = False
  962. self.wake_word_detect_time = 0
  963. self.frames = []
  964. self.is_recording = True
  965. self.recording_start_time = time.time()
  966. self.is_silero_speech_active = False
  967. self.is_webrtc_speech_active = False
  968. self.stop_recording_event.clear()
  969. self.start_recording_event.set()
  970. if self.on_recording_start:
  971. self.on_recording_start()
  972. return self
  973. def stop(self):
  974. """
  975. Stops recording audio.
  976. """
  977. # Ensure there's a minimum interval
  978. # between starting and stopping recording
  979. if (time.time() - self.recording_start_time
  980. < self.min_length_of_recording):
  981. logging.info("Attempted to stop recording "
  982. "too soon after starting."
  983. )
  984. return self
  985. logging.info("recording stopped")
  986. self.is_recording = False
  987. self.recording_stop_time = time.time()
  988. self.is_silero_speech_active = False
  989. self.is_webrtc_speech_active = False
  990. self.silero_check_time = 0
  991. self.start_recording_event.clear()
  992. self.stop_recording_event.set()
  993. if self.on_recording_stop:
  994. self.on_recording_stop()
  995. return self
  996. def feed_audio(self, chunk, original_sample_rate=16000):
  997. """
  998. Feed an audio chunk into the processing pipeline. Chunks are
  999. accumulated until the buffer size is reached, and then the accumulated
  1000. data is fed into the audio_queue.
  1001. """
  1002. # Check if the buffer attribute exists, if not, initialize it
  1003. if not hasattr(self, 'buffer'):
  1004. self.buffer = bytearray()
  1005. # Check if input is a NumPy array
  1006. if isinstance(chunk, np.ndarray):
  1007. # Handle stereo to mono conversion if necessary
  1008. if chunk.ndim == 2:
  1009. chunk = np.mean(chunk, axis=1)
  1010. # Resample to 16000 Hz if necessary
  1011. if original_sample_rate != 16000:
  1012. num_samples = int(len(chunk) * 16000 / original_sample_rate)
  1013. chunk = resample(chunk, num_samples)
  1014. # Ensure data type is int16
  1015. chunk = chunk.astype(np.int16)
  1016. # Convert the NumPy array to bytes
  1017. chunk = chunk.tobytes()
  1018. # Append the chunk to the buffer
  1019. self.buffer += chunk
  1020. buf_size = 2 * self.buffer_size # silero complains if too short
  1021. # Check if the buffer has reached or exceeded the buffer_size
  1022. while len(self.buffer) >= buf_size:
  1023. # Extract self.buffer_size amount of data from the buffer
  1024. to_process = self.buffer[:buf_size]
  1025. self.buffer = self.buffer[buf_size:]
  1026. # Feed the extracted data to the audio_queue
  1027. self.audio_queue.put(to_process)
  1028. def set_microphone(self, microphone_on=True):
  1029. """
  1030. Set the microphone on or off.
  1031. """
  1032. logging.info("Setting microphone to: " + str(microphone_on))
  1033. self.use_microphone.value = microphone_on
  1034. def shutdown(self):
  1035. """
  1036. Safely shuts down the audio recording by stopping the
  1037. recording worker and closing the audio stream.
  1038. """
  1039. # Force wait_audio() and text() to exit
  1040. self.is_shut_down = True
  1041. self.start_recording_event.set()
  1042. self.stop_recording_event.set()
  1043. self.shutdown_event.set()
  1044. self.is_recording = False
  1045. self.is_running = False
  1046. logging.debug('Finishing recording thread')
  1047. if self.recording_thread:
  1048. self.recording_thread.join()
  1049. logging.debug('Terminating reader process')
  1050. # Give it some time to finish the loop and cleanup.
  1051. if self.use_microphone:
  1052. self.reader_process.join(timeout=10)
  1053. if self.reader_process.is_alive():
  1054. logging.warning("Reader process did not terminate "
  1055. "in time. Terminating forcefully."
  1056. )
  1057. self.reader_process.terminate()
  1058. logging.debug('Terminating transcription process')
  1059. self.transcript_process.join(timeout=10)
  1060. if self.transcript_process.is_alive():
  1061. logging.warning("Transcript process did not terminate "
  1062. "in time. Terminating forcefully."
  1063. )
  1064. self.transcript_process.terminate()
  1065. self.parent_transcription_pipe.close()
  1066. logging.debug('Finishing realtime thread')
  1067. if self.realtime_thread:
  1068. self.realtime_thread.join()
  1069. if self.enable_realtime_transcription:
  1070. if self.realtime_model_type:
  1071. del self.realtime_model_type
  1072. self.realtime_model_type = None
  1073. gc.collect()
  1074. def _recording_worker(self):
  1075. """
  1076. The main worker method which constantly monitors the audio
  1077. input for voice activity and accordingly starts/stops the recording.
  1078. """
  1079. logging.debug('Starting recording worker')
  1080. try:
  1081. was_recording = False
  1082. delay_was_passed = False
  1083. # Continuously monitor audio for voice activity
  1084. while self.is_running:
  1085. try:
  1086. data = self.audio_queue.get()
  1087. if self.on_recorded_chunk:
  1088. self.on_recorded_chunk(data)
  1089. if self.handle_buffer_overflow:
  1090. # Handle queue overflow
  1091. if (self.audio_queue.qsize() >
  1092. self.allowed_latency_limit):
  1093. logging.warning("Audio queue size exceeds "
  1094. "latency limit. Current size: "
  1095. f"{self.audio_queue.qsize()}. "
  1096. "Discarding old audio chunks."
  1097. )
  1098. while (self.audio_queue.qsize() >
  1099. self.allowed_latency_limit):
  1100. data = self.audio_queue.get()
  1101. except BrokenPipeError:
  1102. print("BrokenPipeError _recording_worker")
  1103. self.is_running = False
  1104. break
  1105. if not self.is_recording:
  1106. # Handle not recording state
  1107. time_since_listen_start = (time.time() - self.listen_start
  1108. if self.listen_start else 0)
  1109. wake_word_activation_delay_passed = (
  1110. time_since_listen_start >
  1111. self.wake_word_activation_delay
  1112. )
  1113. # Handle wake-word timeout callback
  1114. if wake_word_activation_delay_passed \
  1115. and not delay_was_passed:
  1116. if self.use_wake_words and self.wake_word_activation_delay:
  1117. if self.on_wakeword_timeout:
  1118. self.on_wakeword_timeout()
  1119. delay_was_passed = wake_word_activation_delay_passed
  1120. # Set state and spinner text
  1121. if not self.recording_stop_time:
  1122. if self.use_wake_words \
  1123. and wake_word_activation_delay_passed \
  1124. and not self.wakeword_detected:
  1125. self._set_state("wakeword")
  1126. else:
  1127. if self.listen_start:
  1128. self._set_state("listening")
  1129. else:
  1130. self._set_state("inactive")
  1131. #self.wake_word_detect_time = time.time()
  1132. if self.use_wake_words and wake_word_activation_delay_passed:
  1133. try:
  1134. wakeword_index = self._process_wakeword(data)
  1135. except struct.error:
  1136. logging.error("Error unpacking audio data "
  1137. "for wake word processing.")
  1138. continue
  1139. except Exception as e:
  1140. logging.error(f"Wake word processing error: {e}")
  1141. continue
  1142. # If a wake word is detected
  1143. if wakeword_index >= 0:
  1144. # Removing the wake word from the recording
  1145. samples_time = int(self.sample_rate * self.wake_word_buffer_duration)
  1146. start_index = max(
  1147. 0,
  1148. len(self.audio_buffer) - samples_time
  1149. )
  1150. temp_samples = collections.deque(
  1151. itertools.islice(
  1152. self.audio_buffer,
  1153. start_index,
  1154. None)
  1155. )
  1156. self.audio_buffer.clear()
  1157. self.audio_buffer.extend(temp_samples)
  1158. self.wake_word_detect_time = time.time()
  1159. self.wakeword_detected = True
  1160. #self.wake_word_cooldown_time = time.time()
  1161. if self.on_wakeword_detected:
  1162. self.on_wakeword_detected()
  1163. # Check for voice activity to
  1164. # trigger the start of recording
  1165. if ((not self.use_wake_words
  1166. or not wake_word_activation_delay_passed)
  1167. and self.start_recording_on_voice_activity) \
  1168. or self.wakeword_detected:
  1169. if self._is_voice_active():
  1170. logging.info("voice activity detected")
  1171. self.start()
  1172. if self.is_recording:
  1173. self.start_recording_on_voice_activity = False
  1174. # Add the buffered audio
  1175. # to the recording frames
  1176. self.frames.extend(list(self.audio_buffer))
  1177. self.audio_buffer.clear()
  1178. self.silero_vad_model.reset_states()
  1179. else:
  1180. data_copy = data[:]
  1181. self._check_voice_activity(data_copy)
  1182. self.speech_end_silence_start = 0
  1183. else:
  1184. # If we are currently recording
  1185. # Stop the recording if silence is detected after speech
  1186. if self.stop_recording_on_voice_deactivity:
  1187. is_speech = (
  1188. self._is_silero_speech(data) if self.silero_deactivity_detection
  1189. else self._is_webrtc_speech(data, True)
  1190. )
  1191. if not is_speech:
  1192. # Voice deactivity was detected, so we start
  1193. # measuring silence time before stopping recording
  1194. if self.speech_end_silence_start == 0:
  1195. self.speech_end_silence_start = time.time()
  1196. else:
  1197. self.speech_end_silence_start = 0
  1198. # Wait for silence to stop recording after speech
  1199. if self.speech_end_silence_start and time.time() - \
  1200. self.speech_end_silence_start > \
  1201. self.post_speech_silence_duration:
  1202. logging.info("voice deactivity detected")
  1203. self.stop()
  1204. if not self.is_recording and was_recording:
  1205. # Reset after stopping recording to ensure clean state
  1206. self.stop_recording_on_voice_deactivity = False
  1207. if time.time() - self.silero_check_time > 0.1:
  1208. self.silero_check_time = 0
  1209. # Handle wake word timeout (waited to long initiating
  1210. # speech after wake word detection)
  1211. if self.wake_word_detect_time and time.time() - \
  1212. self.wake_word_detect_time > self.wake_word_timeout:
  1213. self.wake_word_detect_time = 0
  1214. if self.wakeword_detected and self.on_wakeword_timeout:
  1215. self.on_wakeword_timeout()
  1216. self.wakeword_detected = False
  1217. was_recording = self.is_recording
  1218. if self.is_recording:
  1219. self.frames.append(data)
  1220. if not self.is_recording or self.speech_end_silence_start:
  1221. self.audio_buffer.append(data)
  1222. except Exception as e:
  1223. if not self.interrupt_stop_event.is_set():
  1224. logging.error(f"Unhandled exeption in _recording_worker: {e}")
  1225. raise
  1226. def _realtime_worker(self):
  1227. """
  1228. Performs real-time transcription if the feature is enabled.
  1229. The method is responsible transcribing recorded audio frames
  1230. in real-time based on the specified resolution interval.
  1231. The transcribed text is stored in `self.realtime_transcription_text`
  1232. and a callback
  1233. function is invoked with this text if specified.
  1234. """
  1235. try:
  1236. logging.debug('Starting realtime worker')
  1237. # Return immediately if real-time transcription is not enabled
  1238. if not self.enable_realtime_transcription:
  1239. return
  1240. # Continue running as long as the main process is active
  1241. while self.is_running:
  1242. # Check if the recording is active
  1243. if self.is_recording:
  1244. # Sleep for the duration of the transcription resolution
  1245. time.sleep(self.realtime_processing_pause)
  1246. # Convert the buffer frames to a NumPy array
  1247. audio_array = np.frombuffer(
  1248. b''.join(self.frames),
  1249. dtype=np.int16
  1250. )
  1251. # Normalize the array to a [-1, 1] range
  1252. audio_array = audio_array.astype(np.float32) / \
  1253. INT16_MAX_ABS_VALUE
  1254. # Perform transcription and assemble the text
  1255. segments = self.realtime_model_type.transcribe(
  1256. audio_array,
  1257. language=self.language if self.language else None,
  1258. beam_size=self.beam_size_realtime,
  1259. initial_prompt=self.initial_prompt,
  1260. suppress_tokens=self.suppress_tokens,
  1261. )
  1262. # double check recording state
  1263. # because it could have changed mid-transcription
  1264. if self.is_recording and time.time() - \
  1265. self.recording_start_time > 0.5:
  1266. logging.debug('Starting realtime transcription')
  1267. self.realtime_transcription_text = " ".join(
  1268. seg.text for seg in segments[0]
  1269. )
  1270. self.realtime_transcription_text = \
  1271. self.realtime_transcription_text.strip()
  1272. self.text_storage.append(
  1273. self.realtime_transcription_text
  1274. )
  1275. # Take the last two texts in storage, if they exist
  1276. if len(self.text_storage) >= 2:
  1277. last_two_texts = self.text_storage[-2:]
  1278. # Find the longest common prefix
  1279. # between the two texts
  1280. prefix = os.path.commonprefix(
  1281. [last_two_texts[0], last_two_texts[1]]
  1282. )
  1283. # This prefix is the text that was transcripted
  1284. # two times in the same way
  1285. # Store as "safely detected text"
  1286. if len(prefix) >= \
  1287. len(self.realtime_stabilized_safetext):
  1288. # Only store when longer than the previous
  1289. # as additional security
  1290. self.realtime_stabilized_safetext = prefix
  1291. # Find parts of the stabilized text
  1292. # in the freshly transcripted text
  1293. matching_pos = self._find_tail_match_in_text(
  1294. self.realtime_stabilized_safetext,
  1295. self.realtime_transcription_text
  1296. )
  1297. if matching_pos < 0:
  1298. if self.realtime_stabilized_safetext:
  1299. self._on_realtime_transcription_stabilized(
  1300. self._preprocess_output(
  1301. self.realtime_stabilized_safetext,
  1302. True
  1303. )
  1304. )
  1305. else:
  1306. self._on_realtime_transcription_stabilized(
  1307. self._preprocess_output(
  1308. self.realtime_transcription_text,
  1309. True
  1310. )
  1311. )
  1312. else:
  1313. # We found parts of the stabilized text
  1314. # in the transcripted text
  1315. # We now take the stabilized text
  1316. # and add only the freshly transcripted part to it
  1317. output_text = self.realtime_stabilized_safetext + \
  1318. self.realtime_transcription_text[matching_pos:]
  1319. # This yields us the "left" text part as stabilized
  1320. # AND at the same time delivers fresh detected
  1321. # parts on the first run without the need for
  1322. # two transcriptions
  1323. self._on_realtime_transcription_stabilized(
  1324. self._preprocess_output(output_text, True)
  1325. )
  1326. # Invoke the callback with the transcribed text
  1327. self._on_realtime_transcription_update(
  1328. self._preprocess_output(
  1329. self.realtime_transcription_text,
  1330. True
  1331. )
  1332. )
  1333. # If not recording, sleep briefly before checking again
  1334. else:
  1335. time.sleep(TIME_SLEEP)
  1336. except Exception as e:
  1337. logging.error(f"Unhandled exeption in _realtime_worker: {e}")
  1338. raise
  1339. def _is_silero_speech(self, chunk):
  1340. """
  1341. Returns true if speech is detected in the provided audio data
  1342. Args:
  1343. data (bytes): raw bytes of audio data (1024 raw bytes with
  1344. 16000 sample rate and 16 bits per sample)
  1345. """
  1346. if self.sample_rate != 16000:
  1347. pcm_data = np.frombuffer(chunk, dtype=np.int16)
  1348. data_16000 = signal.resample_poly(
  1349. pcm_data, 16000, self.sample_rate)
  1350. chunk = data_16000.astype(np.int16).tobytes()
  1351. self.silero_working = True
  1352. audio_chunk = np.frombuffer(chunk, dtype=np.int16)
  1353. audio_chunk = audio_chunk.astype(np.float32) / INT16_MAX_ABS_VALUE
  1354. vad_prob = self.silero_vad_model(
  1355. torch.from_numpy(audio_chunk),
  1356. SAMPLE_RATE).item()
  1357. is_silero_speech_active = vad_prob > (1 - self.silero_sensitivity)
  1358. if is_silero_speech_active:
  1359. self.is_silero_speech_active = True
  1360. self.silero_working = False
  1361. return is_silero_speech_active
  1362. def _is_webrtc_speech(self, chunk, all_frames_must_be_true=False):
  1363. """
  1364. Returns true if speech is detected in the provided audio data
  1365. Args:
  1366. data (bytes): raw bytes of audio data (1024 raw bytes with
  1367. 16000 sample rate and 16 bits per sample)
  1368. """
  1369. if self.sample_rate != 16000:
  1370. pcm_data = np.frombuffer(chunk, dtype=np.int16)
  1371. data_16000 = signal.resample_poly(
  1372. pcm_data, 16000, self.sample_rate)
  1373. chunk = data_16000.astype(np.int16).tobytes()
  1374. # Number of audio frames per millisecond
  1375. frame_length = int(16000 * 0.01) # for 10ms frame
  1376. num_frames = int(len(chunk) / (2 * frame_length))
  1377. speech_frames = 0
  1378. for i in range(num_frames):
  1379. start_byte = i * frame_length * 2
  1380. end_byte = start_byte + frame_length * 2
  1381. frame = chunk[start_byte:end_byte]
  1382. if self.webrtc_vad_model.is_speech(frame, 16000):
  1383. speech_frames += 1
  1384. if not all_frames_must_be_true:
  1385. if self.debug_mode:
  1386. print(f"Speech detected in frame {i + 1}"
  1387. f" of {num_frames}")
  1388. return True
  1389. if all_frames_must_be_true:
  1390. if self.debug_mode and speech_frames == num_frames:
  1391. print(f"Speech detected in {speech_frames} of "
  1392. f"{num_frames} frames")
  1393. elif self.debug_mode:
  1394. print(f"Speech not detected in all {num_frames} frames")
  1395. return speech_frames == num_frames
  1396. else:
  1397. if self.debug_mode:
  1398. print(f"Speech not detected in any of {num_frames} frames")
  1399. return False
  1400. def _check_voice_activity(self, data):
  1401. """
  1402. Initiate check if voice is active based on the provided data.
  1403. Args:
  1404. data: The audio data to be checked for voice activity.
  1405. """
  1406. self.is_webrtc_speech_active = self._is_webrtc_speech(data)
  1407. # First quick performing check for voice activity using WebRTC
  1408. if self.is_webrtc_speech_active:
  1409. if not self.silero_working:
  1410. self.silero_working = True
  1411. # Run the intensive check in a separate thread
  1412. threading.Thread(
  1413. target=self._is_silero_speech,
  1414. args=(data,)).start()
  1415. def _is_voice_active(self):
  1416. """
  1417. Determine if voice is active.
  1418. Returns:
  1419. bool: True if voice is active, False otherwise.
  1420. """
  1421. return self.is_webrtc_speech_active and self.is_silero_speech_active
  1422. def _set_state(self, new_state):
  1423. """
  1424. Update the current state of the recorder and execute
  1425. corresponding state-change callbacks.
  1426. Args:
  1427. new_state (str): The new state to set.
  1428. """
  1429. # Check if the state has actually changed
  1430. if new_state == self.state:
  1431. return
  1432. # Store the current state for later comparison
  1433. old_state = self.state
  1434. # Update to the new state
  1435. self.state = new_state
  1436. # Execute callbacks based on transitioning FROM a particular state
  1437. if old_state == "listening":
  1438. if self.on_vad_detect_stop:
  1439. self.on_vad_detect_stop()
  1440. elif old_state == "wakeword":
  1441. if self.on_wakeword_detection_end:
  1442. self.on_wakeword_detection_end()
  1443. # Execute callbacks based on transitioning TO a particular state
  1444. if new_state == "listening":
  1445. if self.on_vad_detect_start:
  1446. self.on_vad_detect_start()
  1447. self._set_spinner("speak now")
  1448. if self.spinner and self.halo:
  1449. self.halo._interval = 250
  1450. elif new_state == "wakeword":
  1451. if self.on_wakeword_detection_start:
  1452. self.on_wakeword_detection_start()
  1453. self._set_spinner(f"say {self.wake_words}")
  1454. if self.spinner and self.halo:
  1455. self.halo._interval = 500
  1456. elif new_state == "transcribing":
  1457. if self.on_transcription_start:
  1458. self.on_transcription_start()
  1459. self._set_spinner("transcribing")
  1460. if self.spinner and self.halo:
  1461. self.halo._interval = 50
  1462. elif new_state == "recording":
  1463. self._set_spinner("recording")
  1464. if self.spinner and self.halo:
  1465. self.halo._interval = 100
  1466. elif new_state == "inactive":
  1467. if self.spinner and self.halo:
  1468. self.halo.stop()
  1469. self.halo = None
  1470. def _set_spinner(self, text):
  1471. """
  1472. Update the spinner's text or create a new
  1473. spinner with the provided text.
  1474. Args:
  1475. text (str): The text to be displayed alongside the spinner.
  1476. """
  1477. if self.spinner:
  1478. # If the Halo spinner doesn't exist, create and start it
  1479. if self.halo is None:
  1480. self.halo = halo.Halo(text=text)
  1481. self.halo.start()
  1482. # If the Halo spinner already exists, just update the text
  1483. else:
  1484. self.halo.text = text
  1485. def _preprocess_output(self, text, preview=False):
  1486. """
  1487. Preprocesses the output text by removing any leading or trailing
  1488. whitespace, converting all whitespace sequences to a single space
  1489. character, and capitalizing the first character of the text.
  1490. Args:
  1491. text (str): The text to be preprocessed.
  1492. Returns:
  1493. str: The preprocessed text.
  1494. """
  1495. text = re.sub(r'\s+', ' ', text.strip())
  1496. if self.ensure_sentence_starting_uppercase:
  1497. if text:
  1498. text = text[0].upper() + text[1:]
  1499. # Ensure the text ends with a proper punctuation
  1500. # if it ends with an alphanumeric character
  1501. if not preview:
  1502. if self.ensure_sentence_ends_with_period:
  1503. if text and text[-1].isalnum():
  1504. text += '.'
  1505. return text
  1506. def _find_tail_match_in_text(self, text1, text2, length_of_match=10):
  1507. """
  1508. Find the position where the last 'n' characters of text1
  1509. match with a substring in text2.
  1510. This method takes two texts, extracts the last 'n' characters from
  1511. text1 (where 'n' is determined by the variable 'length_of_match'), and
  1512. searches for an occurrence of this substring in text2, starting from
  1513. the end of text2 and moving towards the beginning.
  1514. Parameters:
  1515. - text1 (str): The text containing the substring that we want to find
  1516. in text2.
  1517. - text2 (str): The text in which we want to find the matching
  1518. substring.
  1519. - length_of_match(int): The length of the matching string that we are
  1520. looking for
  1521. Returns:
  1522. int: The position (0-based index) in text2 where the matching
  1523. substring starts. If no match is found or either of the texts is
  1524. too short, returns -1.
  1525. """
  1526. # Check if either of the texts is too short
  1527. if len(text1) < length_of_match or len(text2) < length_of_match:
  1528. return -1
  1529. # The end portion of the first text that we want to compare
  1530. target_substring = text1[-length_of_match:]
  1531. # Loop through text2 from right to left
  1532. for i in range(len(text2) - length_of_match + 1):
  1533. # Extract the substring from text2
  1534. # to compare with the target_substring
  1535. current_substring = text2[len(text2) - i - length_of_match:
  1536. len(text2) - i]
  1537. # Compare the current_substring with the target_substring
  1538. if current_substring == target_substring:
  1539. # Position in text2 where the match starts
  1540. return len(text2) - i
  1541. return -1
  1542. def _on_realtime_transcription_stabilized(self, text):
  1543. """
  1544. Callback method invoked when the real-time transcription stabilizes.
  1545. This method is called internally when the transcription text is
  1546. considered "stable" meaning it's less likely to change significantly
  1547. with additional audio input. It notifies any registered external
  1548. listener about the stabilized text if recording is still ongoing.
  1549. This is particularly useful for applications that need to display
  1550. live transcription results to users and want to highlight parts of the
  1551. transcription that are less likely to change.
  1552. Args:
  1553. text (str): The stabilized transcription text.
  1554. """
  1555. if self.on_realtime_transcription_stabilized:
  1556. if self.is_recording:
  1557. self.on_realtime_transcription_stabilized(text)
  1558. def _on_realtime_transcription_update(self, text):
  1559. """
  1560. Callback method invoked when there's an update in the real-time
  1561. transcription.
  1562. This method is called internally whenever there's a change in the
  1563. transcription text, notifying any registered external listener about
  1564. the update if recording is still ongoing. This provides a mechanism
  1565. for applications to receive and possibly display live transcription
  1566. updates, which could be partial and still subject to change.
  1567. Args:
  1568. text (str): The updated transcription text.
  1569. """
  1570. if self.on_realtime_transcription_update:
  1571. if self.is_recording:
  1572. self.on_realtime_transcription_update(text)
  1573. def __enter__(self):
  1574. """
  1575. Method to setup the context manager protocol.
  1576. This enables the instance to be used in a `with` statement, ensuring
  1577. proper resource management. When the `with` block is entered, this
  1578. method is automatically called.
  1579. Returns:
  1580. self: The current instance of the class.
  1581. """
  1582. return self
  1583. def __exit__(self, exc_type, exc_value, traceback):
  1584. """
  1585. Method to define behavior when the context manager protocol exits.
  1586. This is called when exiting the `with` block and ensures that any
  1587. necessary cleanup or resource release processes are executed, such as
  1588. shutting down the system properly.
  1589. Args:
  1590. exc_type (Exception or None): The type of the exception that
  1591. caused the context to be exited, if any.
  1592. exc_value (Exception or None): The exception instance that caused
  1593. the context to be exited, if any.
  1594. traceback (Traceback or None): The traceback corresponding to the
  1595. exception, if any.
  1596. """
  1597. self.shutdown()