audio_recorder.py 96 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240
  1. """
  2. The AudioToTextRecorder class in the provided code facilitates
  3. fast speech-to-text transcription.
  4. The class employs the faster_whisper library to transcribe the recorded audio
  5. into text using machine learning models, which can be run either on a GPU or
  6. CPU. Voice activity detection (VAD) is built in, meaning the software can
  7. automatically start or stop recording based on the presence or absence of
  8. speech. It integrates wake word detection through the pvporcupine library,
  9. allowing the software to initiate recording when a specific word or phrase
  10. is spoken. The system provides real-time feedback and can be further
  11. customized.
  12. Features:
  13. - Voice Activity Detection: Automatically starts/stops recording when speech
  14. is detected or when speech ends.
  15. - Wake Word Detection: Starts recording when a specified wake word (or words)
  16. is detected.
  17. - Event Callbacks: Customizable callbacks for when recording starts
  18. or finishes.
  19. - Fast Transcription: Returns the transcribed text from the audio as fast
  20. as possible.
  21. Author: Kolja Beigel
  22. """
  23. from typing import Iterable, List, Optional, Union
  24. import torch.multiprocessing as mp
  25. import torch
  26. from typing import List, Union
  27. from ctypes import c_bool
  28. from openwakeword.model import Model
  29. from scipy.signal import resample
  30. from scipy import signal
  31. import faster_whisper
  32. import openwakeword
  33. import collections
  34. import numpy as np
  35. import pvporcupine
  36. import traceback
  37. import threading
  38. import webrtcvad
  39. import itertools
  40. import datetime
  41. import platform
  42. import pyaudio
  43. import logging
  44. import struct
  45. import queue
  46. import halo
  47. import time
  48. import copy
  49. import os
  50. import re
  51. import gc
  52. # Set OpenMP runtime duplicate library handling to OK (Use only for development!)
  53. os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
  54. INIT_MODEL_TRANSCRIPTION = "tiny"
  55. INIT_MODEL_TRANSCRIPTION_REALTIME = "tiny"
  56. INIT_REALTIME_PROCESSING_PAUSE = 0.2
  57. INIT_SILERO_SENSITIVITY = 0.4
  58. INIT_WEBRTC_SENSITIVITY = 3
  59. INIT_POST_SPEECH_SILENCE_DURATION = 0.6
  60. INIT_MIN_LENGTH_OF_RECORDING = 0.5
  61. INIT_MIN_GAP_BETWEEN_RECORDINGS = 0
  62. INIT_WAKE_WORDS_SENSITIVITY = 0.6
  63. INIT_PRE_RECORDING_BUFFER_DURATION = 1.0
  64. INIT_WAKE_WORD_ACTIVATION_DELAY = 0.0
  65. INIT_WAKE_WORD_TIMEOUT = 5.0
  66. INIT_WAKE_WORD_BUFFER_DURATION = 0.1
  67. ALLOWED_LATENCY_LIMIT = 10
  68. TIME_SLEEP = 0.02
  69. SAMPLE_RATE = 16000
  70. BUFFER_SIZE = 512
  71. INT16_MAX_ABS_VALUE = 32768.0
  72. INIT_HANDLE_BUFFER_OVERFLOW = False
  73. if platform.system() != 'Darwin':
  74. INIT_HANDLE_BUFFER_OVERFLOW = True
  75. class AudioToTextRecorder:
  76. """
  77. A class responsible for capturing audio from the microphone, detecting
  78. voice activity, and then transcribing the captured audio using the
  79. `faster_whisper` model.
  80. """
  81. def __init__(self,
  82. model: str = INIT_MODEL_TRANSCRIPTION,
  83. language: str = "",
  84. compute_type: str = "default",
  85. input_device_index: int = None,
  86. gpu_device_index: Union[int, List[int]] = 0,
  87. device: str = "cuda",
  88. on_recording_start=None,
  89. on_recording_stop=None,
  90. on_transcription_start=None,
  91. ensure_sentence_starting_uppercase=True,
  92. ensure_sentence_ends_with_period=True,
  93. use_microphone=True,
  94. spinner=True,
  95. level=logging.WARNING,
  96. # Realtime transcription parameters
  97. enable_realtime_transcription=False,
  98. use_main_model_for_realtime=False,
  99. realtime_model_type=INIT_MODEL_TRANSCRIPTION_REALTIME,
  100. realtime_processing_pause=INIT_REALTIME_PROCESSING_PAUSE,
  101. on_realtime_transcription_update=None,
  102. on_realtime_transcription_stabilized=None,
  103. # Voice activation parameters
  104. silero_sensitivity: float = INIT_SILERO_SENSITIVITY,
  105. silero_use_onnx: bool = False,
  106. silero_deactivity_detection: bool = False,
  107. webrtc_sensitivity: int = INIT_WEBRTC_SENSITIVITY,
  108. post_speech_silence_duration: float = (
  109. INIT_POST_SPEECH_SILENCE_DURATION
  110. ),
  111. min_length_of_recording: float = (
  112. INIT_MIN_LENGTH_OF_RECORDING
  113. ),
  114. min_gap_between_recordings: float = (
  115. INIT_MIN_GAP_BETWEEN_RECORDINGS
  116. ),
  117. pre_recording_buffer_duration: float = (
  118. INIT_PRE_RECORDING_BUFFER_DURATION
  119. ),
  120. on_vad_detect_start=None,
  121. on_vad_detect_stop=None,
  122. # Wake word parameters
  123. wakeword_backend: str = "pvporcupine",
  124. openwakeword_model_paths: str = None,
  125. openwakeword_inference_framework: str = "onnx",
  126. wake_words: str = "",
  127. wake_words_sensitivity: float = INIT_WAKE_WORDS_SENSITIVITY,
  128. wake_word_activation_delay: float = (
  129. INIT_WAKE_WORD_ACTIVATION_DELAY
  130. ),
  131. wake_word_timeout: float = INIT_WAKE_WORD_TIMEOUT,
  132. wake_word_buffer_duration: float = INIT_WAKE_WORD_BUFFER_DURATION,
  133. on_wakeword_detected=None,
  134. on_wakeword_timeout=None,
  135. on_wakeword_detection_start=None,
  136. on_wakeword_detection_end=None,
  137. on_recorded_chunk=None,
  138. debug_mode=False,
  139. handle_buffer_overflow: bool = INIT_HANDLE_BUFFER_OVERFLOW,
  140. beam_size: int = 5,
  141. beam_size_realtime: int = 3,
  142. buffer_size: int = BUFFER_SIZE,
  143. sample_rate: int = SAMPLE_RATE,
  144. initial_prompt: Optional[Union[str, Iterable[int]]] = None,
  145. suppress_tokens: Optional[List[int]] = [-1],
  146. log_transcription_time: bool = False
  147. ):
  148. """
  149. Initializes an audio recorder and transcription
  150. and wake word detection.
  151. Args:
  152. - model (str, default="tiny"): Specifies the size of the transcription
  153. model to use or the path to a converted model directory.
  154. Valid options are 'tiny', 'tiny.en', 'base', 'base.en',
  155. 'small', 'small.en', 'medium', 'medium.en', 'large-v1',
  156. 'large-v2'.
  157. If a specific size is provided, the model is downloaded
  158. from the Hugging Face Hub.
  159. - language (str, default=""): Language code for speech-to-text engine.
  160. If not specified, the model will attempt to detect the language
  161. automatically.
  162. - compute_type (str, default="default"): Specifies the type of
  163. computation to be used for transcription.
  164. See https://opennmt.net/CTranslate2/quantization.html.
  165. - input_device_index (int, default=0): The index of the audio input
  166. device to use.
  167. - gpu_device_index (int, default=0): Device ID to use.
  168. The model can also be loaded on multiple GPUs by passing a list of
  169. IDs (e.g. [0, 1, 2, 3]). In that case, multiple transcriptions can
  170. run in parallel when transcribe() is called from multiple Python
  171. threads
  172. - device (str, default="cuda"): Device for model to use. Can either be
  173. "cuda" or "cpu".
  174. - on_recording_start (callable, default=None): Callback function to be
  175. called when recording of audio to be transcripted starts.
  176. - on_recording_stop (callable, default=None): Callback function to be
  177. called when recording of audio to be transcripted stops.
  178. - on_transcription_start (callable, default=None): Callback function
  179. to be called when transcription of audio to text starts.
  180. - ensure_sentence_starting_uppercase (bool, default=True): Ensures
  181. that every sentence detected by the algorithm starts with an
  182. uppercase letter.
  183. - ensure_sentence_ends_with_period (bool, default=True): Ensures that
  184. every sentence that doesn't end with punctuation such as "?", "!"
  185. ends with a period
  186. - use_microphone (bool, default=True): Specifies whether to use the
  187. microphone as the audio input source. If set to False, the
  188. audio input source will be the audio data sent through the
  189. feed_audio() method.
  190. - spinner (bool, default=True): Show spinner animation with current
  191. state.
  192. - level (int, default=logging.WARNING): Logging level.
  193. - enable_realtime_transcription (bool, default=False): Enables or
  194. disables real-time transcription of audio. When set to True, the
  195. audio will be transcribed continuously as it is being recorded.
  196. - use_main_model_for_realtime (str, default=False):
  197. If True, use the main transcription model for both regular and
  198. real-time transcription. If False, use a separate model specified
  199. by realtime_model_type for real-time transcription.
  200. Using a single model can save memory and potentially improve
  201. performance, but may not be optimized for real-time processing.
  202. Using separate models allows for a smaller, faster model for
  203. real-time transcription while keeping a more accurate model for
  204. final transcription.
  205. - realtime_model_type (str, default="tiny"): Specifies the machine
  206. learning model to be used for real-time transcription. Valid
  207. options include 'tiny', 'tiny.en', 'base', 'base.en', 'small',
  208. 'small.en', 'medium', 'medium.en', 'large-v1', 'large-v2'.
  209. - realtime_processing_pause (float, default=0.1): Specifies the time
  210. interval in seconds after a chunk of audio gets transcribed. Lower
  211. values will result in more "real-time" (frequent) transcription
  212. updates but may increase computational load.
  213. - on_realtime_transcription_update = A callback function that is
  214. triggered whenever there's an update in the real-time
  215. transcription. The function is called with the newly transcribed
  216. text as its argument.
  217. - on_realtime_transcription_stabilized = A callback function that is
  218. triggered when the transcribed text stabilizes in quality. The
  219. stabilized text is generally more accurate but may arrive with a
  220. slight delay compared to the regular real-time updates.
  221. - silero_sensitivity (float, default=SILERO_SENSITIVITY): Sensitivity
  222. for the Silero Voice Activity Detection model ranging from 0
  223. (least sensitive) to 1 (most sensitive). Default is 0.5.
  224. - silero_use_onnx (bool, default=False): Enables usage of the
  225. pre-trained model from Silero in the ONNX (Open Neural Network
  226. Exchange) format instead of the PyTorch format. This is
  227. recommended for faster performance.
  228. - silero_deactivity_detection (bool, default=False): Enables the Silero
  229. model for end-of-speech detection. More robust against background
  230. noise. Utilizes additional GPU resources but improves accuracy in
  231. noisy environments. When False, uses the default WebRTC VAD,
  232. which is more sensitive but may continue recording longer due
  233. to background sounds.
  234. - webrtc_sensitivity (int, default=WEBRTC_SENSITIVITY): Sensitivity
  235. for the WebRTC Voice Activity Detection engine ranging from 0
  236. (least aggressive / most sensitive) to 3 (most aggressive,
  237. least sensitive). Default is 3.
  238. - post_speech_silence_duration (float, default=0.2): Duration in
  239. seconds of silence that must follow speech before the recording
  240. is considered to be completed. This ensures that any brief
  241. pauses during speech don't prematurely end the recording.
  242. - min_gap_between_recordings (float, default=1.0): Specifies the
  243. minimum time interval in seconds that should exist between the
  244. end of one recording session and the beginning of another to
  245. prevent rapid consecutive recordings.
  246. - min_length_of_recording (float, default=1.0): Specifies the minimum
  247. duration in seconds that a recording session should last to ensure
  248. meaningful audio capture, preventing excessively short or
  249. fragmented recordings.
  250. - pre_recording_buffer_duration (float, default=0.2): Duration in
  251. seconds for the audio buffer to maintain pre-roll audio
  252. (compensates speech activity detection latency)
  253. - on_vad_detect_start (callable, default=None): Callback function to
  254. be called when the system listens for voice activity.
  255. - on_vad_detect_stop (callable, default=None): Callback function to be
  256. called when the system stops listening for voice activity.
  257. - wakeword_backend (str, default="pvporcupine"): Specifies the backend
  258. library to use for wake word detection. Supported options include
  259. 'pvporcupine' for using the Porcupine wake word engine or 'oww' for
  260. using the OpenWakeWord engine.
  261. - openwakeword_model_paths (str, default=None): Comma-separated paths
  262. to model files for the openwakeword library. These paths point to
  263. custom models that can be used for wake word detection when the
  264. openwakeword library is selected as the wakeword_backend.
  265. - openwakeword_inference_framework (str, default="onnx"): Specifies
  266. the inference framework to use with the openwakeword library.
  267. Can be either 'onnx' for Open Neural Network Exchange format
  268. or 'tflite' for TensorFlow Lite.
  269. - wake_words (str, default=""): Comma-separated string of wake words to
  270. initiate recording when using the 'pvporcupine' wakeword backend.
  271. Supported wake words include: 'alexa', 'americano', 'blueberry',
  272. 'bumblebee', 'computer', 'grapefruits', 'grasshopper', 'hey google',
  273. 'hey siri', 'jarvis', 'ok google', 'picovoice', 'porcupine',
  274. 'terminator'. For the 'openwakeword' backend, wake words are
  275. automatically extracted from the provided model files, so specifying
  276. them here is not necessary.
  277. - wake_words_sensitivity (float, default=0.5): Sensitivity for wake
  278. word detection, ranging from 0 (least sensitive) to 1 (most
  279. sensitive). Default is 0.5.
  280. - wake_word_activation_delay (float, default=0): Duration in seconds
  281. after the start of monitoring before the system switches to wake
  282. word activation if no voice is initially detected. If set to
  283. zero, the system uses wake word activation immediately.
  284. - wake_word_timeout (float, default=5): Duration in seconds after a
  285. wake word is recognized. If no subsequent voice activity is
  286. detected within this window, the system transitions back to an
  287. inactive state, awaiting the next wake word or voice activation.
  288. - wake_word_buffer_duration (float, default=0.1): Duration in seconds
  289. to buffer audio data during wake word detection. This helps in
  290. cutting out the wake word from the recording buffer so it does not
  291. falsely get detected along with the following spoken text, ensuring
  292. cleaner and more accurate transcription start triggers.
  293. Increase this if parts of the wake word get detected as text.
  294. - on_wakeword_detected (callable, default=None): Callback function to
  295. be called when a wake word is detected.
  296. - on_wakeword_timeout (callable, default=None): Callback function to
  297. be called when the system goes back to an inactive state after when
  298. no speech was detected after wake word activation
  299. - on_wakeword_detection_start (callable, default=None): Callback
  300. function to be called when the system starts to listen for wake
  301. words
  302. - on_wakeword_detection_end (callable, default=None): Callback
  303. function to be called when the system stops to listen for
  304. wake words (e.g. because of timeout or wake word detected)
  305. - on_recorded_chunk (callable, default=None): Callback function to be
  306. called when a chunk of audio is recorded. The function is called
  307. with the recorded audio chunk as its argument.
  308. - debug_mode (bool, default=False): If set to True, the system will
  309. print additional debug information to the console.
  310. - handle_buffer_overflow (bool, default=True): If set to True, the system
  311. will log a warning when an input overflow occurs during recording and
  312. remove the data from the buffer.
  313. - beam_size (int, default=5): The beam size to use for beam search
  314. decoding.
  315. - beam_size_realtime (int, default=3): The beam size to use for beam
  316. search decoding in the real-time transcription model.
  317. - buffer_size (int, default=512): The buffer size to use for audio
  318. recording. Changing this may break functionality.
  319. - sample_rate (int, default=16000): The sample rate to use for audio
  320. recording. Changing this will very probably functionality (as the
  321. WebRTC VAD model is very sensitive towards the sample rate).
  322. - initial_prompt (str or iterable of int, default=None): Initial
  323. prompt to be fed to the transcription models.
  324. - suppress_tokens (list of int, default=[-1]): Tokens to be suppressed
  325. from the transcription output.
  326. Raises:
  327. Exception: Errors related to initializing transcription
  328. model, wake word detection, or audio recording.
  329. """
  330. self.language = language
  331. self.compute_type = compute_type
  332. self.input_device_index = input_device_index
  333. self.gpu_device_index = gpu_device_index
  334. self.device = device
  335. self.wake_words = wake_words
  336. self.wake_word_activation_delay = wake_word_activation_delay
  337. self.wake_word_timeout = wake_word_timeout
  338. self.wake_word_buffer_duration = wake_word_buffer_duration
  339. self.ensure_sentence_starting_uppercase = (
  340. ensure_sentence_starting_uppercase
  341. )
  342. self.ensure_sentence_ends_with_period = (
  343. ensure_sentence_ends_with_period
  344. )
  345. self.use_microphone = mp.Value(c_bool, use_microphone)
  346. self.min_gap_between_recordings = min_gap_between_recordings
  347. self.min_length_of_recording = min_length_of_recording
  348. self.pre_recording_buffer_duration = pre_recording_buffer_duration
  349. self.post_speech_silence_duration = post_speech_silence_duration
  350. self.on_recording_start = on_recording_start
  351. self.on_recording_stop = on_recording_stop
  352. self.on_wakeword_detected = on_wakeword_detected
  353. self.on_wakeword_timeout = on_wakeword_timeout
  354. self.on_vad_detect_start = on_vad_detect_start
  355. self.on_vad_detect_stop = on_vad_detect_stop
  356. self.on_wakeword_detection_start = on_wakeword_detection_start
  357. self.on_wakeword_detection_end = on_wakeword_detection_end
  358. self.on_recorded_chunk = on_recorded_chunk
  359. self.on_transcription_start = on_transcription_start
  360. self.enable_realtime_transcription = enable_realtime_transcription
  361. self.use_main_model_for_realtime = use_main_model_for_realtime
  362. self.main_model_type = model
  363. self.realtime_model_type = realtime_model_type
  364. self.realtime_processing_pause = realtime_processing_pause
  365. self.on_realtime_transcription_update = (
  366. on_realtime_transcription_update
  367. )
  368. self.on_realtime_transcription_stabilized = (
  369. on_realtime_transcription_stabilized
  370. )
  371. self.debug_mode = debug_mode
  372. self.handle_buffer_overflow = handle_buffer_overflow
  373. self.beam_size = beam_size
  374. self.beam_size_realtime = beam_size_realtime
  375. self.allowed_latency_limit = ALLOWED_LATENCY_LIMIT
  376. self.level = level
  377. self.audio_queue = mp.Queue()
  378. self.buffer_size = buffer_size
  379. self.sample_rate = sample_rate
  380. self.recording_start_time = 0
  381. self.recording_stop_time = 0
  382. self.wake_word_detect_time = 0
  383. self.silero_check_time = 0
  384. self.silero_working = False
  385. self.speech_end_silence_start = 0
  386. self.silero_sensitivity = silero_sensitivity
  387. self.silero_deactivity_detection = silero_deactivity_detection
  388. self.listen_start = 0
  389. self.spinner = spinner
  390. self.halo = None
  391. self.state = "inactive"
  392. self.wakeword_detected = False
  393. self.text_storage = []
  394. self.realtime_stabilized_text = ""
  395. self.realtime_stabilized_safetext = ""
  396. self.is_webrtc_speech_active = False
  397. self.is_silero_speech_active = False
  398. self.recording_thread = None
  399. self.realtime_thread = None
  400. self.audio_interface = None
  401. self.audio = None
  402. self.stream = None
  403. self.start_recording_event = threading.Event()
  404. self.stop_recording_event = threading.Event()
  405. self.last_transcription_bytes = None
  406. self.initial_prompt = initial_prompt
  407. self.suppress_tokens = suppress_tokens
  408. self.use_wake_words = wake_words or wakeword_backend in {'oww', 'openwakeword', 'openwakewords'}
  409. self.detected_language = None
  410. self.detected_language_probability = 0
  411. self.detected_realtime_language = None
  412. self.detected_realtime_language_probability = 0
  413. self.transcription_lock = threading.Lock()
  414. self.transcribe_count = 0
  415. self.log_transcription_time = log_transcription_time
  416. # Initialize the logging configuration with the specified level
  417. log_format = 'RealTimeSTT: %(name)s - %(levelname)s - %(message)s'
  418. file_log_format = '%(asctime)s - ' + log_format
  419. # Get the root logger
  420. logger = logging.getLogger()
  421. logger.setLevel(level) # Set the logger's level
  422. logger.propagate = False # Prevent propagation to higher-level loggers
  423. # Remove any existing handlers
  424. logger.handlers = []
  425. # Create a file handler and set its level
  426. file_handler = logging.FileHandler('realtimesst.log')
  427. file_handler.setLevel(logging.DEBUG)
  428. file_handler.setFormatter(logging.Formatter(file_log_format, datefmt='%Y-%m-%d %H:%M:%S'))
  429. # Create a console handler and set its level
  430. console_handler = logging.StreamHandler()
  431. console_handler.setLevel(level)
  432. console_handler.setFormatter(logging.Formatter(log_format))
  433. # Add the handlers to the logger
  434. logger.addHandler(file_handler)
  435. logger.addHandler(console_handler)
  436. self.is_shut_down = False
  437. self.shutdown_event = mp.Event()
  438. try:
  439. # Only set the start method if it hasn't been set already
  440. if mp.get_start_method(allow_none=True) is None:
  441. mp.set_start_method("spawn")
  442. except RuntimeError as e:
  443. logging.info(f"Start method has already been set. Details: {e}")
  444. logging.info("Starting RealTimeSTT")
  445. self.interrupt_stop_event = mp.Event()
  446. self.was_interrupted = mp.Event()
  447. self.main_transcription_ready_event = mp.Event()
  448. self.parent_transcription_pipe, child_transcription_pipe = mp.Pipe()
  449. self.parent_stdout_pipe, child_stdout_pipe = mp.Pipe()
  450. # Set device for model
  451. self.device = "cuda" if self.device == "cuda" and torch.cuda.is_available() else "cpu"
  452. self.transcript_process = self._start_thread(
  453. target=AudioToTextRecorder._transcription_worker,
  454. args=(
  455. child_transcription_pipe,
  456. child_stdout_pipe,
  457. model,
  458. self.compute_type,
  459. self.gpu_device_index,
  460. self.device,
  461. self.main_transcription_ready_event,
  462. self.shutdown_event,
  463. self.interrupt_stop_event,
  464. self.beam_size,
  465. self.initial_prompt,
  466. self.suppress_tokens
  467. )
  468. )
  469. # Start audio data reading process
  470. if self.use_microphone.value:
  471. logging.info("Initializing audio recording"
  472. " (creating pyAudio input stream,"
  473. f" sample rate: {self.sample_rate}"
  474. f" buffer size: {self.buffer_size}"
  475. )
  476. self.reader_process = self._start_thread(
  477. target=AudioToTextRecorder._audio_data_worker,
  478. args=(
  479. self.audio_queue,
  480. self.sample_rate,
  481. self.buffer_size,
  482. self.input_device_index,
  483. self.shutdown_event,
  484. self.interrupt_stop_event,
  485. self.use_microphone
  486. )
  487. )
  488. # Initialize the realtime transcription model
  489. if self.enable_realtime_transcription and not self.use_main_model_for_realtime:
  490. try:
  491. logging.info("Initializing faster_whisper realtime "
  492. f"transcription model {self.realtime_model_type}"
  493. )
  494. self.realtime_model_type = faster_whisper.WhisperModel(
  495. model_size_or_path=self.realtime_model_type,
  496. device=self.device,
  497. compute_type=self.compute_type,
  498. device_index=self.gpu_device_index
  499. )
  500. except Exception as e:
  501. logging.exception("Error initializing faster_whisper "
  502. f"realtime transcription model: {e}"
  503. )
  504. raise
  505. logging.debug("Faster_whisper realtime speech to text "
  506. "transcription model initialized successfully")
  507. # Setup wake word detection
  508. if wake_words or wakeword_backend in {'oww', 'openwakeword', 'openwakewords'}:
  509. self.wakeword_backend = wakeword_backend
  510. self.wake_words_list = [
  511. word.strip() for word in wake_words.lower().split(',')
  512. ]
  513. self.wake_words_sensitivity = wake_words_sensitivity
  514. self.wake_words_sensitivities = [
  515. float(wake_words_sensitivity)
  516. for _ in range(len(self.wake_words_list))
  517. ]
  518. if self.wakeword_backend in {'pvp', 'pvporcupine'}:
  519. try:
  520. self.porcupine = pvporcupine.create(
  521. keywords=self.wake_words_list,
  522. sensitivities=self.wake_words_sensitivities
  523. )
  524. self.buffer_size = self.porcupine.frame_length
  525. self.sample_rate = self.porcupine.sample_rate
  526. except Exception as e:
  527. logging.exception(
  528. "Error initializing porcupine "
  529. f"wake word detection engine: {e}"
  530. )
  531. raise
  532. logging.debug(
  533. "Porcupine wake word detection engine initialized successfully"
  534. )
  535. elif self.wakeword_backend in {'oww', 'openwakeword', 'openwakewords'}:
  536. openwakeword.utils.download_models()
  537. try:
  538. if openwakeword_model_paths:
  539. model_paths = openwakeword_model_paths.split(',')
  540. self.owwModel = Model(
  541. wakeword_models=model_paths,
  542. inference_framework=openwakeword_inference_framework
  543. )
  544. logging.info(
  545. "Successfully loaded wakeword model(s): "
  546. f"{openwakeword_model_paths}"
  547. )
  548. else:
  549. self.owwModel = Model(
  550. inference_framework=openwakeword_inference_framework)
  551. self.oww_n_models = len(self.owwModel.models.keys())
  552. if not self.oww_n_models:
  553. logging.error(
  554. "No wake word models loaded."
  555. )
  556. for model_key in self.owwModel.models.keys():
  557. logging.info(
  558. "Successfully loaded openwakeword model: "
  559. f"{model_key}"
  560. )
  561. except Exception as e:
  562. logging.exception(
  563. "Error initializing openwakeword "
  564. f"wake word detection engine: {e}"
  565. )
  566. raise
  567. logging.debug(
  568. "Open wake word detection engine initialized successfully"
  569. )
  570. else:
  571. logging.exception(f"Wakeword engine {self.wakeword_backend} unknown/unsupported. Please specify one of: pvporcupine, openwakeword.")
  572. # Setup voice activity detection model WebRTC
  573. try:
  574. logging.info("Initializing WebRTC voice with "
  575. f"Sensitivity {webrtc_sensitivity}"
  576. )
  577. self.webrtc_vad_model = webrtcvad.Vad()
  578. self.webrtc_vad_model.set_mode(webrtc_sensitivity)
  579. except Exception as e:
  580. logging.exception("Error initializing WebRTC voice "
  581. f"activity detection engine: {e}"
  582. )
  583. raise
  584. logging.debug("WebRTC VAD voice activity detection "
  585. "engine initialized successfully"
  586. )
  587. # Setup voice activity detection model Silero VAD
  588. try:
  589. self.silero_vad_model, _ = torch.hub.load(
  590. repo_or_dir="snakers4/silero-vad",
  591. model="silero_vad",
  592. verbose=False,
  593. onnx=silero_use_onnx
  594. )
  595. except Exception as e:
  596. logging.exception(f"Error initializing Silero VAD "
  597. f"voice activity detection engine: {e}"
  598. )
  599. raise
  600. logging.debug("Silero VAD voice activity detection "
  601. "engine initialized successfully"
  602. )
  603. self.audio_buffer = collections.deque(
  604. maxlen=int((self.sample_rate // self.buffer_size) *
  605. self.pre_recording_buffer_duration)
  606. )
  607. self.frames = []
  608. # Recording control flags
  609. self.is_recording = False
  610. self.is_running = True
  611. self.start_recording_on_voice_activity = False
  612. self.stop_recording_on_voice_deactivity = False
  613. # Start the recording worker thread
  614. self.recording_thread = threading.Thread(target=self._recording_worker)
  615. self.recording_thread.daemon = True
  616. self.recording_thread.start()
  617. # Start the realtime transcription worker thread
  618. self.realtime_thread = threading.Thread(target=self._realtime_worker)
  619. self.realtime_thread.daemon = True
  620. self.realtime_thread.start()
  621. # Wait for transcription models to start
  622. logging.debug('Waiting for main transcription model to start')
  623. self.main_transcription_ready_event.wait()
  624. logging.debug('Main transcription model ready')
  625. self.stdout_thread = threading.Thread(target=self._read_stdout)
  626. self.stdout_thread.daemon = True
  627. self.stdout_thread.start()
  628. logging.debug('RealtimeSTT initialization completed successfully')
  629. def _start_thread(self, target=None, args=()):
  630. """
  631. Implement a consistent threading model across the library.
  632. This method is used to start any thread in this library. It uses the
  633. standard threading. Thread for Linux and for all others uses the pytorch
  634. MultiProcessing library 'Process'.
  635. Args:
  636. target (callable object): is the callable object to be invoked by
  637. the run() method. Defaults to None, meaning nothing is called.
  638. args (tuple): is a list or tuple of arguments for the target
  639. invocation. Defaults to ().
  640. """
  641. if (platform.system() == 'Linux'):
  642. thread = threading.Thread(target=target, args=args)
  643. thread.deamon = True
  644. thread.start()
  645. return thread
  646. else:
  647. thread = mp.Process(target=target, args=args)
  648. thread.start()
  649. return thread
  650. def _read_stdout(self):
  651. while not self.shutdown_event.is_set():
  652. try:
  653. if self.parent_stdout_pipe.poll(0.1):
  654. logging.debug("Receive from stdout pipe")
  655. message = self.parent_stdout_pipe.recv()
  656. logging.info(message)
  657. except (BrokenPipeError, EOFError, OSError):
  658. # The pipe probably has been closed, so we ignore the error
  659. pass
  660. # except BrokenPipeError as e: # handle broken pipe error
  661. # pass
  662. # except EOFError as e:
  663. # logging.error(f"EOFError in read from stdout: {e}")
  664. # logging.error(traceback.format_exc())
  665. # break
  666. except KeyboardInterrupt: # handle manual interruption (Ctrl+C)
  667. logging.info("KeyboardInterrupt in read from stdout detected, exiting...")
  668. break
  669. except Exception as e:
  670. logging.error(f"Unexpected error in read from stdout: {e}")
  671. logging.error(traceback.format_exc()) # Log the full traceback here
  672. break
  673. time.sleep(0.1)
  674. @staticmethod
  675. def _transcription_worker(conn,
  676. stdout_pipe,
  677. model_path,
  678. compute_type,
  679. gpu_device_index,
  680. device,
  681. ready_event,
  682. shutdown_event,
  683. interrupt_stop_event,
  684. beam_size,
  685. initial_prompt,
  686. suppress_tokens
  687. ):
  688. """
  689. Worker method that handles the continuous
  690. process of transcribing audio data.
  691. This method runs in a separate process and is responsible for:
  692. - Initializing the `faster_whisper` model used for transcription.
  693. - Receiving audio data sent through a pipe and using the model
  694. to transcribe it.
  695. - Sending transcription results back through the pipe.
  696. - Continuously checking for a shutdown event to gracefully
  697. terminate the transcription process.
  698. Args:
  699. conn (multiprocessing.Connection): The connection endpoint used
  700. for receiving audio data and sending transcription results.
  701. model_path (str): The path to the pre-trained faster_whisper model
  702. for transcription.
  703. compute_type (str): Specifies the type of computation to be used
  704. for transcription.
  705. gpu_device_index (int): Device ID to use.
  706. device (str): Device for model to use.
  707. ready_event (threading.Event): An event that is set when the
  708. transcription model is successfully initialized and ready.
  709. shutdown_event (threading.Event): An event that, when set,
  710. signals this worker method to terminate.
  711. interrupt_stop_event (threading.Event): An event that, when set,
  712. signals this worker method to stop processing audio data.
  713. beam_size (int): The beam size to use for beam search decoding.
  714. initial_prompt (str or iterable of int): Initial prompt to be fed
  715. to the transcription model.
  716. suppress_tokens (list of int): Tokens to be suppressed from the
  717. transcription output.
  718. Raises:
  719. Exception: If there is an error while initializing the
  720. transcription model.
  721. """
  722. def custom_print(*args, **kwargs):
  723. message = ' '.join(map(str, args))
  724. try:
  725. stdout_pipe.send(message)
  726. except (BrokenPipeError, EOFError, OSError):
  727. # The pipe probably has been closed, so we ignore the error
  728. pass
  729. # Replace the built-in print function with our custom one
  730. __builtins__['print'] = custom_print
  731. logging.info("Initializing faster_whisper "
  732. f"main transcription model {model_path}"
  733. )
  734. try:
  735. model = faster_whisper.WhisperModel(
  736. model_size_or_path=model_path,
  737. device=device,
  738. compute_type=compute_type,
  739. device_index=gpu_device_index,
  740. )
  741. except Exception as e:
  742. logging.exception("Error initializing main "
  743. f"faster_whisper transcription model: {e}"
  744. )
  745. raise
  746. ready_event.set()
  747. logging.debug("Faster_whisper main speech to text "
  748. "transcription model initialized successfully"
  749. )
  750. try:
  751. while not shutdown_event.is_set():
  752. try:
  753. if conn.poll(0.01):
  754. logging.debug("Receive from _transcription_worker pipe")
  755. audio, language = conn.recv()
  756. try:
  757. segments, info = model.transcribe(
  758. audio,
  759. language=language if language else None,
  760. beam_size=beam_size,
  761. initial_prompt=initial_prompt,
  762. suppress_tokens=suppress_tokens
  763. )
  764. transcription = " ".join(seg.text for seg in segments)
  765. transcription = transcription.strip()
  766. logging.debug(f"Final text detected with main model: {transcription}")
  767. conn.send(('success', (transcription, info)))
  768. except Exception as e:
  769. logging.error(f"General error in _transcription_worker in transcription: {e}")
  770. conn.send(('error', str(e)))
  771. else:
  772. time.sleep(TIME_SLEEP)
  773. except KeyboardInterrupt:
  774. interrupt_stop_event.set()
  775. logging.debug("Transcription worker process "
  776. "finished due to KeyboardInterrupt"
  777. )
  778. stdout_pipe.close()
  779. break
  780. except Exception as e:
  781. logging.error(f"General error in _transcription_worker in accessing pipe: {e}")
  782. finally:
  783. __builtins__['print'] = print # Restore the original print function
  784. conn.close()
  785. stdout_pipe.close()
  786. @staticmethod
  787. def _audio_data_worker(audio_queue,
  788. target_sample_rate,
  789. buffer_size,
  790. input_device_index,
  791. shutdown_event,
  792. interrupt_stop_event,
  793. use_microphone):
  794. """
  795. Worker method that handles the audio recording process.
  796. This method runs in a separate process and is responsible for:
  797. - Setting up the audio input stream for recording at the highest possible sample rate.
  798. - Continuously reading audio data from the input stream, resampling if necessary,
  799. preprocessing the data, and placing complete chunks in a queue.
  800. - Handling errors during the recording process.
  801. - Gracefully terminating the recording process when a shutdown event is set.
  802. Args:
  803. audio_queue (queue.Queue): A queue where recorded audio data is placed.
  804. target_sample_rate (int): The desired sample rate for the output audio (for Silero VAD).
  805. buffer_size (int): The number of samples expected by the Silero VAD model.
  806. input_device_index (int): The index of the audio input device.
  807. shutdown_event (threading.Event): An event that, when set, signals this worker method to terminate.
  808. interrupt_stop_event (threading.Event): An event to signal keyboard interrupt.
  809. use_microphone (multiprocessing.Value): A shared value indicating whether to use the microphone.
  810. Raises:
  811. Exception: If there is an error while initializing the audio recording.
  812. """
  813. import pyaudio
  814. import numpy as np
  815. from scipy import signal
  816. def get_highest_sample_rate(audio_interface, device_index):
  817. """Get the highest supported sample rate for the specified device."""
  818. try:
  819. device_info = audio_interface.get_device_info_by_index(device_index)
  820. max_rate = int(device_info['defaultSampleRate'])
  821. if 'supportedSampleRates' in device_info:
  822. supported_rates = [int(rate) for rate in device_info['supportedSampleRates']]
  823. if supported_rates:
  824. max_rate = max(supported_rates)
  825. return max_rate
  826. except Exception as e:
  827. logging.warning(f"Failed to get highest sample rate: {e}")
  828. return 48000 # Fallback to a common high sample rate
  829. def initialize_audio_stream(audio_interface, device_index, sample_rate, chunk_size):
  830. """Initialize the audio stream with error handling."""
  831. try:
  832. stream = audio_interface.open(
  833. format=pyaudio.paInt16,
  834. channels=1,
  835. rate=sample_rate,
  836. input=True,
  837. frames_per_buffer=chunk_size,
  838. input_device_index=device_index,
  839. )
  840. return stream
  841. except Exception as e:
  842. logging.error(f"Error initializing audio stream: {e}")
  843. raise
  844. def preprocess_audio(chunk, original_sample_rate, target_sample_rate):
  845. """Preprocess audio chunk similar to feed_audio method."""
  846. if isinstance(chunk, np.ndarray):
  847. # Handle stereo to mono conversion if necessary
  848. if chunk.ndim == 2:
  849. chunk = np.mean(chunk, axis=1)
  850. # Resample to target_sample_rate if necessary
  851. if original_sample_rate != target_sample_rate:
  852. num_samples = int(len(chunk) * target_sample_rate / original_sample_rate)
  853. chunk = signal.resample(chunk, num_samples)
  854. # Ensure data type is int16
  855. chunk = chunk.astype(np.int16)
  856. else:
  857. # If chunk is bytes, convert to numpy array
  858. chunk = np.frombuffer(chunk, dtype=np.int16)
  859. # Resample if necessary
  860. if original_sample_rate != target_sample_rate:
  861. num_samples = int(len(chunk) * target_sample_rate / original_sample_rate)
  862. chunk = signal.resample(chunk, num_samples)
  863. chunk = chunk.astype(np.int16)
  864. return chunk.tobytes()
  865. audio_interface = None
  866. stream = None
  867. device_sample_rate = None
  868. chunk_size = 1024 # Increased chunk size for better performance
  869. def setup_audio():
  870. nonlocal audio_interface, stream, device_sample_rate, input_device_index
  871. try:
  872. audio_interface = pyaudio.PyAudio()
  873. if input_device_index is None:
  874. try:
  875. default_device = audio_interface.get_default_input_device_info()
  876. input_device_index = default_device['index']
  877. except OSError as e:
  878. input_device_index = None
  879. sample_rates_to_try = [16000] # Try 16000 Hz first
  880. if input_device_index is not None:
  881. highest_rate = get_highest_sample_rate(audio_interface, input_device_index)
  882. if highest_rate != 16000:
  883. sample_rates_to_try.append(highest_rate)
  884. else:
  885. sample_rates_to_try.append(48000) # Fallback sample rate
  886. for rate in sample_rates_to_try:
  887. try:
  888. device_sample_rate = rate
  889. stream = initialize_audio_stream(audio_interface, input_device_index, device_sample_rate, chunk_size)
  890. if stream is not None:
  891. logging.debug(f"Audio recording initialized successfully at {device_sample_rate} Hz, reading {chunk_size} frames at a time")
  892. return True
  893. except Exception as e:
  894. logging.warning(f"Failed to initialize audio stream at {device_sample_rate} Hz: {e}")
  895. continue
  896. # If we reach here, none of the sample rates worked
  897. raise Exception("Failed to initialize audio stream with all sample rates.")
  898. except Exception as e:
  899. logging.exception(f"Error initializing pyaudio audio recording: {e}")
  900. if audio_interface:
  901. audio_interface.terminate()
  902. return False
  903. if not setup_audio():
  904. raise Exception("Failed to set up audio recording.")
  905. buffer = bytearray()
  906. silero_buffer_size = 2 * buffer_size # silero complains if too short
  907. try:
  908. while not shutdown_event.is_set():
  909. try:
  910. data = stream.read(chunk_size, exception_on_overflow=False)
  911. if use_microphone.value:
  912. processed_data = preprocess_audio(data, device_sample_rate, target_sample_rate)
  913. buffer += processed_data
  914. # Check if the buffer has reached or exceeded the silero_buffer_size
  915. while len(buffer) >= silero_buffer_size:
  916. # Extract silero_buffer_size amount of data from the buffer
  917. to_process = buffer[:silero_buffer_size]
  918. buffer = buffer[silero_buffer_size:]
  919. # Feed the extracted data to the audio_queue
  920. audio_queue.put(to_process)
  921. except OSError as e:
  922. if e.errno == pyaudio.paInputOverflowed:
  923. logging.warning("Input overflowed. Frame dropped.")
  924. else:
  925. logging.error(f"Error during recording: {e}")
  926. # Attempt to reinitialize the stream
  927. logging.info("Attempting to reinitialize the audio stream...")
  928. if stream:
  929. stream.stop_stream()
  930. stream.close()
  931. if audio_interface:
  932. audio_interface.terminate()
  933. # Wait a bit before trying to reinitialize
  934. time.sleep(1)
  935. if not setup_audio():
  936. logging.error("Failed to reinitialize audio stream. Exiting.")
  937. break
  938. else:
  939. logging.info("Audio stream reinitialized successfully.")
  940. continue
  941. except Exception as e:
  942. logging.error(f"Error during recording: {e}")
  943. tb_str = traceback.format_exc()
  944. logging.error(f"Traceback: {tb_str}")
  945. logging.error(f"Error: {e}")
  946. # Attempt to reinitialize the stream
  947. logging.info("Attempting to reinitialize the audio stream...")
  948. if stream:
  949. stream.stop_stream()
  950. stream.close()
  951. if audio_interface:
  952. audio_interface.terminate()
  953. # Wait a bit before trying to reinitialize
  954. time.sleep(0.5)
  955. if not setup_audio():
  956. logging.error("Failed to reinitialize audio stream. Exiting.")
  957. break
  958. else:
  959. logging.info("Audio stream reinitialized successfully.")
  960. continue
  961. except KeyboardInterrupt:
  962. interrupt_stop_event.set()
  963. logging.debug("Audio data worker process finished due to KeyboardInterrupt")
  964. finally:
  965. # After recording stops, feed any remaining audio data
  966. if buffer:
  967. audio_queue.put(bytes(buffer))
  968. if stream:
  969. stream.stop_stream()
  970. stream.close()
  971. if audio_interface:
  972. audio_interface.terminate()
  973. # try:
  974. # audio_interface = pyaudio.PyAudio()
  975. # if input_device_index is None:
  976. # try:
  977. # default_device = audio_interface.get_default_input_device_info()
  978. # input_device_index = default_device['index']
  979. # except OSError as e:
  980. # input_device_index = None
  981. # if input_device_index is not None:
  982. # device_sample_rate = get_highest_sample_rate(audio_interface, input_device_index)
  983. # else:
  984. # device_sample_rate = 16000 # better: try 16000, 48000, ... until it works
  985. # stream = initialize_audio_stream(audio_interface, input_device_index, device_sample_rate, chunk_size)
  986. # if stream is None:
  987. # raise Exception("Failed to initialize audio stream.")
  988. # except Exception as e:
  989. # logging.exception(f"Error initializing pyaudio audio recording: {e}")
  990. # if audio_interface:
  991. # audio_interface.terminate()
  992. # raise
  993. # logging.debug(f"Audio recording initialized successfully at {device_sample_rate} Hz, reading {chunk_size} frames at a time")
  994. # buffer = bytearray()
  995. # silero_buffer_size = 2 * buffer_size # silero complains if too short
  996. # try:
  997. # while not shutdown_event.is_set():
  998. # try:
  999. # data = stream.read(chunk_size)
  1000. # if use_microphone.value:
  1001. # processed_data = preprocess_audio(data, device_sample_rate, target_sample_rate)
  1002. # buffer += processed_data
  1003. # # Check if the buffer has reached or exceeded the silero_buffer_size
  1004. # while len(buffer) >= silero_buffer_size:
  1005. # # Extract silero_buffer_size amount of data from the buffer
  1006. # to_process = buffer[:silero_buffer_size]
  1007. # buffer = buffer[silero_buffer_size:]
  1008. # # Feed the extracted data to the audio_queue
  1009. # audio_queue.put(to_process)
  1010. # except OSError as e:
  1011. # if e.errno == pyaudio.paInputOverflowed:
  1012. # logging.warning("Input overflowed. Frame dropped.")
  1013. # else:
  1014. # logging.error(f"Error during recording: {e}")
  1015. # continue
  1016. # except Exception as e:
  1017. # logging.error(f"Error during recording: {e}")
  1018. # tb_str = traceback.format_exc()
  1019. # print(f"Traceback: {tb_str}")
  1020. # print(f"Error: {e}")
  1021. # continue
  1022. # except KeyboardInterrupt:
  1023. # interrupt_stop_event.set()
  1024. # logging.debug("Audio data worker process finished due to KeyboardInterrupt")
  1025. # finally:
  1026. # # After recording stops, feed any remaining audio data
  1027. # if buffer:
  1028. # audio_queue.put(bytes(buffer))
  1029. # if stream:
  1030. # stream.stop_stream()
  1031. # stream.close()
  1032. # if audio_interface:
  1033. # audio_interface.terminate()
  1034. def wakeup(self):
  1035. """
  1036. If in wake work modus, wake up as if a wake word was spoken.
  1037. """
  1038. self.listen_start = time.time()
  1039. def abort(self):
  1040. self.start_recording_on_voice_activity = False
  1041. self.stop_recording_on_voice_deactivity = False
  1042. self._set_state("inactive")
  1043. self.interrupt_stop_event.set()
  1044. self.was_interrupted.wait()
  1045. self.was_interrupted.clear()
  1046. def wait_audio(self):
  1047. """
  1048. Waits for the start and completion of the audio recording process.
  1049. This method is responsible for:
  1050. - Waiting for voice activity to begin recording if not yet started.
  1051. - Waiting for voice inactivity to complete the recording.
  1052. - Setting the audio buffer from the recorded frames.
  1053. - Resetting recording-related attributes.
  1054. Side effects:
  1055. - Updates the state of the instance.
  1056. - Modifies the audio attribute to contain the processed audio data.
  1057. """
  1058. logging.info("Setting listen time")
  1059. if self.listen_start == 0:
  1060. self.listen_start = time.time()
  1061. # If not yet started recording, wait for voice activity to initiate.
  1062. if not self.is_recording and not self.frames:
  1063. self._set_state("listening")
  1064. self.start_recording_on_voice_activity = True
  1065. # Wait until recording starts
  1066. logging.debug('Waiting for recording start')
  1067. while not self.interrupt_stop_event.is_set():
  1068. if self.start_recording_event.wait(timeout=0.02):
  1069. break
  1070. # If recording is ongoing, wait for voice inactivity
  1071. # to finish recording.
  1072. if self.is_recording:
  1073. self.stop_recording_on_voice_deactivity = True
  1074. # Wait until recording stops
  1075. logging.debug('Waiting for recording stop')
  1076. while not self.interrupt_stop_event.is_set():
  1077. if (self.stop_recording_event.wait(timeout=0.02)):
  1078. break
  1079. # Convert recorded frames to the appropriate audio format.
  1080. audio_array = np.frombuffer(b''.join(self.frames), dtype=np.int16)
  1081. self.audio = audio_array.astype(np.float32) / INT16_MAX_ABS_VALUE
  1082. self.frames.clear()
  1083. # Reset recording-related timestamps
  1084. self.recording_stop_time = 0
  1085. self.listen_start = 0
  1086. self._set_state("inactive")
  1087. def transcribe(self):
  1088. """
  1089. Transcribes audio captured by this class instance using the
  1090. `faster_whisper` model.
  1091. Automatically starts recording upon voice activity if not manually
  1092. started using `recorder.start()`.
  1093. Automatically stops recording upon voice deactivity if not manually
  1094. stopped with `recorder.stop()`.
  1095. Processes the recorded audio to generate transcription.
  1096. Args:
  1097. on_transcription_finished (callable, optional): Callback function
  1098. to be executed when transcription is ready.
  1099. If provided, transcription will be performed asynchronously,
  1100. and the callback will receive the transcription as its argument.
  1101. If omitted, the transcription will be performed synchronously,
  1102. and the result will be returned.
  1103. Returns (if no callback is set):
  1104. str: The transcription of the recorded audio.
  1105. Raises:
  1106. Exception: If there is an error during the transcription process.
  1107. """
  1108. self._set_state("transcribing")
  1109. audio_copy = copy.deepcopy(self.audio)
  1110. start_time = time.time() # Start timing
  1111. with self.transcription_lock:
  1112. try:
  1113. if self.transcribe_count == 0:
  1114. self.parent_transcription_pipe.send((self.audio, self.language))
  1115. self.transcribe_count += 1
  1116. while self.transcribe_count > 0:
  1117. logging.debug("Receive from parent_transcription_pipe pipe after sendiung transcription request")
  1118. status, result = self.parent_transcription_pipe.recv()
  1119. self.transcribe_count -= 1
  1120. self._set_state("inactive")
  1121. if status == 'success':
  1122. segments, info = result
  1123. self.detected_language = info.language if info.language_probability > 0 else None
  1124. self.detected_language_probability = info.language_probability
  1125. self.last_transcription_bytes = audio_copy
  1126. transcription = self._preprocess_output(segments)
  1127. end_time = time.time() # End timing
  1128. transcription_time = end_time - start_time
  1129. if self.log_transcription_time:
  1130. logging.info(f"Model {self.main_model_type} completed transcription in {transcription_time:.2f} seconds")
  1131. return transcription
  1132. else:
  1133. logging.error(f"Transcription error: {result}")
  1134. raise Exception(result)
  1135. except Exception as e:
  1136. logging.error(f"Error during transcription: {str(e)}")
  1137. raise e
  1138. def _process_wakeword(self, data):
  1139. """
  1140. Processes audio data to detect wake words.
  1141. """
  1142. if self.wakeword_backend in {'pvp', 'pvporcupine'}:
  1143. pcm = struct.unpack_from(
  1144. "h" * self.buffer_size,
  1145. data
  1146. )
  1147. porcupine_index = self.porcupine.process(pcm)
  1148. if self.debug_mode:
  1149. logging.info(f"wake words porcupine_index: {porcupine_index}")
  1150. return self.porcupine.process(pcm)
  1151. elif self.wakeword_backend in {'oww', 'openwakeword', 'openwakewords'}:
  1152. pcm = np.frombuffer(data, dtype=np.int16)
  1153. prediction = self.owwModel.predict(pcm)
  1154. max_score = -1
  1155. max_index = -1
  1156. wake_words_in_prediction = len(self.owwModel.prediction_buffer.keys())
  1157. self.wake_words_sensitivities
  1158. if wake_words_in_prediction:
  1159. for idx, mdl in enumerate(self.owwModel.prediction_buffer.keys()):
  1160. scores = list(self.owwModel.prediction_buffer[mdl])
  1161. if scores[-1] >= self.wake_words_sensitivity and scores[-1] > max_score:
  1162. max_score = scores[-1]
  1163. max_index = idx
  1164. if self.debug_mode:
  1165. logging.info(f"wake words oww max_index, max_score: {max_index} {max_score}")
  1166. return max_index
  1167. else:
  1168. if self.debug_mode:
  1169. logging.info(f"wake words oww_index: -1")
  1170. return -1
  1171. if self.debug_mode:
  1172. logging.info("wake words no match")
  1173. return -1
  1174. def text(self,
  1175. on_transcription_finished=None,
  1176. ):
  1177. """
  1178. Transcribes audio captured by this class instance
  1179. using the `faster_whisper` model.
  1180. - Automatically starts recording upon voice activity if not manually
  1181. started using `recorder.start()`.
  1182. - Automatically stops recording upon voice deactivity if not manually
  1183. stopped with `recorder.stop()`.
  1184. - Processes the recorded audio to generate transcription.
  1185. Args:
  1186. on_transcription_finished (callable, optional): Callback function
  1187. to be executed when transcription is ready.
  1188. If provided, transcription will be performed asynchronously, and
  1189. the callback will receive the transcription as its argument.
  1190. If omitted, the transcription will be performed synchronously,
  1191. and the result will be returned.
  1192. Returns (if not callback is set):
  1193. str: The transcription of the recorded audio
  1194. """
  1195. self.interrupt_stop_event.clear()
  1196. self.was_interrupted.clear()
  1197. self.wait_audio()
  1198. if self.is_shut_down or self.interrupt_stop_event.is_set():
  1199. if self.interrupt_stop_event.is_set():
  1200. self.was_interrupted.set()
  1201. return ""
  1202. if on_transcription_finished:
  1203. threading.Thread(target=on_transcription_finished,
  1204. args=(self.transcribe(),)).start()
  1205. else:
  1206. return self.transcribe()
  1207. def start(self):
  1208. """
  1209. Starts recording audio directly without waiting for voice activity.
  1210. """
  1211. # Ensure there's a minimum interval
  1212. # between stopping and starting recording
  1213. if (time.time() - self.recording_stop_time
  1214. < self.min_gap_between_recordings):
  1215. logging.info("Attempted to start recording "
  1216. "too soon after stopping."
  1217. )
  1218. return self
  1219. logging.info("recording started")
  1220. self._set_state("recording")
  1221. self.text_storage = []
  1222. self.realtime_stabilized_text = ""
  1223. self.realtime_stabilized_safetext = ""
  1224. self.wakeword_detected = False
  1225. self.wake_word_detect_time = 0
  1226. self.frames = []
  1227. self.is_recording = True
  1228. self.recording_start_time = time.time()
  1229. self.is_silero_speech_active = False
  1230. self.is_webrtc_speech_active = False
  1231. self.stop_recording_event.clear()
  1232. self.start_recording_event.set()
  1233. if self.on_recording_start:
  1234. self.on_recording_start()
  1235. return self
  1236. def stop(self):
  1237. """
  1238. Stops recording audio.
  1239. """
  1240. # Ensure there's a minimum interval
  1241. # between starting and stopping recording
  1242. if (time.time() - self.recording_start_time
  1243. < self.min_length_of_recording):
  1244. logging.info("Attempted to stop recording "
  1245. "too soon after starting."
  1246. )
  1247. return self
  1248. logging.info("recording stopped")
  1249. self.is_recording = False
  1250. self.recording_stop_time = time.time()
  1251. self.is_silero_speech_active = False
  1252. self.is_webrtc_speech_active = False
  1253. self.silero_check_time = 0
  1254. self.start_recording_event.clear()
  1255. self.stop_recording_event.set()
  1256. if self.on_recording_stop:
  1257. self.on_recording_stop()
  1258. return self
  1259. def feed_audio(self, chunk, original_sample_rate=16000):
  1260. """
  1261. Feed an audio chunk into the processing pipeline. Chunks are
  1262. accumulated until the buffer size is reached, and then the accumulated
  1263. data is fed into the audio_queue.
  1264. """
  1265. # Check if the buffer attribute exists, if not, initialize it
  1266. if not hasattr(self, 'buffer'):
  1267. self.buffer = bytearray()
  1268. # Check if input is a NumPy array
  1269. if isinstance(chunk, np.ndarray):
  1270. # Handle stereo to mono conversion if necessary
  1271. if chunk.ndim == 2:
  1272. chunk = np.mean(chunk, axis=1)
  1273. # Resample to 16000 Hz if necessary
  1274. if original_sample_rate != 16000:
  1275. num_samples = int(len(chunk) * 16000 / original_sample_rate)
  1276. chunk = resample(chunk, num_samples)
  1277. # Ensure data type is int16
  1278. chunk = chunk.astype(np.int16)
  1279. # Convert the NumPy array to bytes
  1280. chunk = chunk.tobytes()
  1281. # Append the chunk to the buffer
  1282. self.buffer += chunk
  1283. buf_size = 2 * self.buffer_size # silero complains if too short
  1284. # Check if the buffer has reached or exceeded the buffer_size
  1285. while len(self.buffer) >= buf_size:
  1286. # Extract self.buffer_size amount of data from the buffer
  1287. to_process = self.buffer[:buf_size]
  1288. self.buffer = self.buffer[buf_size:]
  1289. # Feed the extracted data to the audio_queue
  1290. self.audio_queue.put(to_process)
  1291. def set_microphone(self, microphone_on=True):
  1292. """
  1293. Set the microphone on or off.
  1294. """
  1295. logging.info("Setting microphone to: " + str(microphone_on))
  1296. self.use_microphone.value = microphone_on
  1297. def shutdown(self):
  1298. """
  1299. Safely shuts down the audio recording by stopping the
  1300. recording worker and closing the audio stream.
  1301. """
  1302. # Force wait_audio() and text() to exit
  1303. self.is_shut_down = True
  1304. self.start_recording_event.set()
  1305. self.stop_recording_event.set()
  1306. self.shutdown_event.set()
  1307. self.is_recording = False
  1308. self.is_running = False
  1309. logging.debug('Finishing recording thread')
  1310. if self.recording_thread:
  1311. self.recording_thread.join()
  1312. logging.debug('Terminating reader process')
  1313. # Give it some time to finish the loop and cleanup.
  1314. if self.use_microphone:
  1315. self.reader_process.join(timeout=10)
  1316. if self.reader_process.is_alive():
  1317. logging.warning("Reader process did not terminate "
  1318. "in time. Terminating forcefully."
  1319. )
  1320. self.reader_process.terminate()
  1321. logging.debug('Terminating transcription process')
  1322. self.transcript_process.join(timeout=10)
  1323. if self.transcript_process.is_alive():
  1324. logging.warning("Transcript process did not terminate "
  1325. "in time. Terminating forcefully."
  1326. )
  1327. self.transcript_process.terminate()
  1328. self.parent_transcription_pipe.close()
  1329. logging.debug('Finishing realtime thread')
  1330. if self.realtime_thread:
  1331. self.realtime_thread.join()
  1332. if self.enable_realtime_transcription:
  1333. if self.realtime_model_type:
  1334. del self.realtime_model_type
  1335. self.realtime_model_type = None
  1336. gc.collect()
  1337. def _recording_worker(self):
  1338. """
  1339. The main worker method which constantly monitors the audio
  1340. input for voice activity and accordingly starts/stops the recording.
  1341. """
  1342. logging.debug('Starting recording worker')
  1343. try:
  1344. was_recording = False
  1345. delay_was_passed = False
  1346. wakeword_detected_time = None
  1347. wakeword_samples_to_remove = None
  1348. # Continuously monitor audio for voice activity
  1349. while self.is_running:
  1350. try:
  1351. try:
  1352. data = self.audio_queue.get(timeout=0.1)
  1353. except queue.Empty:
  1354. if not self.is_running:
  1355. break
  1356. continue
  1357. if self.on_recorded_chunk:
  1358. self.on_recorded_chunk(data)
  1359. if self.handle_buffer_overflow:
  1360. # Handle queue overflow
  1361. if (self.audio_queue.qsize() >
  1362. self.allowed_latency_limit):
  1363. logging.warning("Audio queue size exceeds "
  1364. "latency limit. Current size: "
  1365. f"{self.audio_queue.qsize()}. "
  1366. "Discarding old audio chunks."
  1367. )
  1368. while (self.audio_queue.qsize() >
  1369. self.allowed_latency_limit):
  1370. data = self.audio_queue.get()
  1371. except BrokenPipeError:
  1372. logging.error("BrokenPipeError _recording_worker")
  1373. self.is_running = False
  1374. break
  1375. if not self.is_recording:
  1376. logging.info(f"not recording, state: {self.state}, self.recording_stop_time: {self.recording_stop_time}, self.listen_start: {self.listen_start}")
  1377. # Handle not recording state
  1378. time_since_listen_start = (time.time() - self.listen_start
  1379. if self.listen_start else 0)
  1380. wake_word_activation_delay_passed = (
  1381. time_since_listen_start >
  1382. self.wake_word_activation_delay
  1383. )
  1384. # Handle wake-word timeout callback
  1385. if wake_word_activation_delay_passed \
  1386. and not delay_was_passed:
  1387. if self.use_wake_words and self.wake_word_activation_delay:
  1388. if self.on_wakeword_timeout:
  1389. self.on_wakeword_timeout()
  1390. delay_was_passed = wake_word_activation_delay_passed
  1391. # Set state and spinner text
  1392. if not self.recording_stop_time:
  1393. if self.use_wake_words \
  1394. and wake_word_activation_delay_passed \
  1395. and not self.wakeword_detected:
  1396. self._set_state("wakeword")
  1397. else:
  1398. if self.listen_start:
  1399. self._set_state("listening")
  1400. else:
  1401. self._set_state("inactive")
  1402. #self.wake_word_detect_time = time.time()
  1403. if self.use_wake_words and wake_word_activation_delay_passed:
  1404. try:
  1405. wakeword_index = self._process_wakeword(data)
  1406. except struct.error:
  1407. logging.error("Error unpacking audio data "
  1408. "for wake word processing.")
  1409. continue
  1410. except Exception as e:
  1411. logging.error(f"Wake word processing error: {e}")
  1412. continue
  1413. # If a wake word is detected
  1414. if wakeword_index >= 0:
  1415. wakeword_detected_time = time.time()
  1416. wakeword_samples_to_remove = int(self.sample_rate * self.wake_word_buffer_duration)
  1417. self.wakeword_detected = True
  1418. if self.on_wakeword_detected:
  1419. self.on_wakeword_detected()
  1420. # Check for voice activity to
  1421. # trigger the start of recording
  1422. if ((not self.use_wake_words
  1423. or not wake_word_activation_delay_passed)
  1424. and self.start_recording_on_voice_activity) \
  1425. or self.wakeword_detected:
  1426. if self._is_voice_active():
  1427. logging.info("voice activity detected")
  1428. self.start()
  1429. self.start_recording_on_voice_activity = False
  1430. # Add the buffered audio
  1431. # to the recording frames
  1432. self.frames.extend(list(self.audio_buffer))
  1433. self.audio_buffer.clear()
  1434. self.silero_vad_model.reset_states()
  1435. else:
  1436. data_copy = data[:]
  1437. self._check_voice_activity(data_copy)
  1438. self.speech_end_silence_start = 0
  1439. else:
  1440. # If we are currently recording
  1441. if wakeword_samples_to_remove and wakeword_samples_to_remove > 0:
  1442. # Remove samples from the beginning of self.frames
  1443. samples_removed = 0
  1444. while wakeword_samples_to_remove > 0 and self.frames:
  1445. frame = self.frames[0]
  1446. frame_samples = len(frame) // 2 # Assuming 16-bit audio
  1447. if wakeword_samples_to_remove >= frame_samples:
  1448. self.frames.pop(0)
  1449. samples_removed += frame_samples
  1450. wakeword_samples_to_remove -= frame_samples
  1451. else:
  1452. self.frames[0] = frame[wakeword_samples_to_remove * 2:]
  1453. samples_removed += wakeword_samples_to_remove
  1454. samples_to_remove = 0
  1455. wakeword_samples_to_remove = 0
  1456. # Stop the recording if silence is detected after speech
  1457. if self.stop_recording_on_voice_deactivity:
  1458. is_speech = (
  1459. self._is_silero_speech(data) if self.silero_deactivity_detection
  1460. else self._is_webrtc_speech(data, True)
  1461. )
  1462. if not is_speech:
  1463. # Voice deactivity was detected, so we start
  1464. # measuring silence time before stopping recording
  1465. if self.speech_end_silence_start == 0:
  1466. self.speech_end_silence_start = time.time()
  1467. # if(len(self.frames) > 0):
  1468. # audio_array = np.frombuffer(b''.join(self.frames), dtype=np.int16)
  1469. # audio = audio_array.astype(np.float32) / INT16_MAX_ABS_VALUE
  1470. # self.parent_transcription_pipe.send((audio, self.language))
  1471. # self.transcribe_count += 1
  1472. else:
  1473. self.speech_end_silence_start = 0
  1474. # Wait for silence to stop recording after speech
  1475. if self.speech_end_silence_start and time.time() - \
  1476. self.speech_end_silence_start >= \
  1477. self.post_speech_silence_duration:
  1478. logging.info("voice deactivity detected")
  1479. self.frames.append(data)
  1480. logging.info("stopping recording")
  1481. self.stop()
  1482. logging.info("stopped recording")
  1483. ####
  1484. if not self.use_wake_words:
  1485. self.listen_start = time.time()
  1486. self._set_state("listening")
  1487. self.start_recording_on_voice_activity = True
  1488. if not self.is_recording and was_recording:
  1489. # Reset after stopping recording to ensure clean state
  1490. self.stop_recording_on_voice_deactivity = False
  1491. if time.time() - self.silero_check_time > 0.1:
  1492. self.silero_check_time = 0
  1493. # Handle wake word timeout (waited to long initiating
  1494. # speech after wake word detection)
  1495. if self.wake_word_detect_time and time.time() - \
  1496. self.wake_word_detect_time > self.wake_word_timeout:
  1497. self.wake_word_detect_time = 0
  1498. if self.wakeword_detected and self.on_wakeword_timeout:
  1499. self.on_wakeword_timeout()
  1500. self.wakeword_detected = False
  1501. was_recording = self.is_recording
  1502. if self.is_recording:
  1503. self.frames.append(data)
  1504. if not self.is_recording or self.speech_end_silence_start:
  1505. self.audio_buffer.append(data)
  1506. except Exception as e:
  1507. if not self.interrupt_stop_event.is_set():
  1508. logging.error(f"Unhandled exeption in _recording_worker: {e}")
  1509. raise
  1510. def _realtime_worker(self):
  1511. """
  1512. Performs real-time transcription if the feature is enabled.
  1513. The method is responsible transcribing recorded audio frames
  1514. in real-time based on the specified resolution interval.
  1515. The transcribed text is stored in `self.realtime_transcription_text`
  1516. and a callback
  1517. function is invoked with this text if specified.
  1518. """
  1519. try:
  1520. logging.debug('Starting realtime worker')
  1521. # Return immediately if real-time transcription is not enabled
  1522. if not self.enable_realtime_transcription:
  1523. return
  1524. # Continue running as long as the main process is active
  1525. while self.is_running:
  1526. # Check if the recording is active
  1527. if self.is_recording:
  1528. # Sleep for the duration of the transcription resolution
  1529. time.sleep(self.realtime_processing_pause)
  1530. # Convert the buffer frames to a NumPy array
  1531. audio_array = np.frombuffer(
  1532. b''.join(self.frames),
  1533. dtype=np.int16
  1534. )
  1535. logging.debug(f"Current realtime buffer size: {len(audio_array)}")
  1536. # Normalize the array to a [-1, 1] range
  1537. audio_array = audio_array.astype(np.float32) / \
  1538. INT16_MAX_ABS_VALUE
  1539. if self.use_main_model_for_realtime:
  1540. with self.transcription_lock:
  1541. try:
  1542. self.parent_transcription_pipe.send((audio_array, self.language))
  1543. if self.parent_transcription_pipe.poll(timeout=5): # Wait for 5 seconds
  1544. logging.debug("Receive from realtime worker after transcription request to main model")
  1545. status, result = self.parent_transcription_pipe.recv()
  1546. if status == 'success':
  1547. segments, info = result
  1548. self.detected_realtime_language = info.language if info.language_probability > 0 else None
  1549. self.detected_realtime_language_probability = info.language_probability
  1550. realtime_text = segments
  1551. logging.debug(f"Realtime text detected with main model: {realtime_text}")
  1552. else:
  1553. logging.error(f"Realtime transcription error: {result}")
  1554. continue
  1555. else:
  1556. logging.warning("Realtime transcription timed out")
  1557. continue
  1558. except Exception as e:
  1559. logging.error(f"Error in realtime transcription: {str(e)}")
  1560. continue
  1561. else:
  1562. # Perform transcription and assemble the text
  1563. segments, info = self.realtime_model_type.transcribe(
  1564. audio_array,
  1565. language=self.language if self.language else None,
  1566. beam_size=self.beam_size_realtime,
  1567. initial_prompt=self.initial_prompt,
  1568. suppress_tokens=self.suppress_tokens,
  1569. )
  1570. self.detected_realtime_language = info.language if info.language_probability > 0 else None
  1571. self.detected_realtime_language_probability = info.language_probability
  1572. realtime_text = " ".join(
  1573. seg.text for seg in segments
  1574. )
  1575. logging.debug(f"Realtime text detected: {realtime_text}")
  1576. # double check recording state
  1577. # because it could have changed mid-transcription
  1578. if self.is_recording and time.time() - \
  1579. self.recording_start_time > 0.5:
  1580. logging.debug('Starting realtime transcription')
  1581. self.realtime_transcription_text = realtime_text
  1582. self.realtime_transcription_text = \
  1583. self.realtime_transcription_text.strip()
  1584. self.text_storage.append(
  1585. self.realtime_transcription_text
  1586. )
  1587. # Take the last two texts in storage, if they exist
  1588. if len(self.text_storage) >= 2:
  1589. last_two_texts = self.text_storage[-2:]
  1590. # Find the longest common prefix
  1591. # between the two texts
  1592. prefix = os.path.commonprefix(
  1593. [last_two_texts[0], last_two_texts[1]]
  1594. )
  1595. # This prefix is the text that was transcripted
  1596. # two times in the same way
  1597. # Store as "safely detected text"
  1598. if len(prefix) >= \
  1599. len(self.realtime_stabilized_safetext):
  1600. # Only store when longer than the previous
  1601. # as additional security
  1602. self.realtime_stabilized_safetext = prefix
  1603. # Find parts of the stabilized text
  1604. # in the freshly transcripted text
  1605. matching_pos = self._find_tail_match_in_text(
  1606. self.realtime_stabilized_safetext,
  1607. self.realtime_transcription_text
  1608. )
  1609. if matching_pos < 0:
  1610. if self.realtime_stabilized_safetext:
  1611. self._on_realtime_transcription_stabilized(
  1612. self._preprocess_output(
  1613. self.realtime_stabilized_safetext,
  1614. True
  1615. )
  1616. )
  1617. else:
  1618. self._on_realtime_transcription_stabilized(
  1619. self._preprocess_output(
  1620. self.realtime_transcription_text,
  1621. True
  1622. )
  1623. )
  1624. else:
  1625. # We found parts of the stabilized text
  1626. # in the transcripted text
  1627. # We now take the stabilized text
  1628. # and add only the freshly transcripted part to it
  1629. output_text = self.realtime_stabilized_safetext + \
  1630. self.realtime_transcription_text[matching_pos:]
  1631. # This yields us the "left" text part as stabilized
  1632. # AND at the same time delivers fresh detected
  1633. # parts on the first run without the need for
  1634. # two transcriptions
  1635. self._on_realtime_transcription_stabilized(
  1636. self._preprocess_output(output_text, True)
  1637. )
  1638. # Invoke the callback with the transcribed text
  1639. self._on_realtime_transcription_update(
  1640. self._preprocess_output(
  1641. self.realtime_transcription_text,
  1642. True
  1643. )
  1644. )
  1645. # If not recording, sleep briefly before checking again
  1646. else:
  1647. time.sleep(TIME_SLEEP)
  1648. except Exception as e:
  1649. logging.error(f"Unhandled exeption in _realtime_worker: {e}")
  1650. raise
  1651. def _is_silero_speech(self, chunk):
  1652. """
  1653. Returns true if speech is detected in the provided audio data
  1654. Args:
  1655. data (bytes): raw bytes of audio data (1024 raw bytes with
  1656. 16000 sample rate and 16 bits per sample)
  1657. """
  1658. if self.sample_rate != 16000:
  1659. pcm_data = np.frombuffer(chunk, dtype=np.int16)
  1660. data_16000 = signal.resample_poly(
  1661. pcm_data, 16000, self.sample_rate)
  1662. chunk = data_16000.astype(np.int16).tobytes()
  1663. self.silero_working = True
  1664. audio_chunk = np.frombuffer(chunk, dtype=np.int16)
  1665. audio_chunk = audio_chunk.astype(np.float32) / INT16_MAX_ABS_VALUE
  1666. vad_prob = self.silero_vad_model(
  1667. torch.from_numpy(audio_chunk),
  1668. SAMPLE_RATE).item()
  1669. is_silero_speech_active = vad_prob > (1 - self.silero_sensitivity)
  1670. if is_silero_speech_active:
  1671. self.is_silero_speech_active = True
  1672. self.silero_working = False
  1673. return is_silero_speech_active
  1674. def _is_webrtc_speech(self, chunk, all_frames_must_be_true=False):
  1675. """
  1676. Returns true if speech is detected in the provided audio data
  1677. Args:
  1678. data (bytes): raw bytes of audio data (1024 raw bytes with
  1679. 16000 sample rate and 16 bits per sample)
  1680. """
  1681. if self.sample_rate != 16000:
  1682. pcm_data = np.frombuffer(chunk, dtype=np.int16)
  1683. data_16000 = signal.resample_poly(
  1684. pcm_data, 16000, self.sample_rate)
  1685. chunk = data_16000.astype(np.int16).tobytes()
  1686. # Number of audio frames per millisecond
  1687. frame_length = int(16000 * 0.01) # for 10ms frame
  1688. num_frames = int(len(chunk) / (2 * frame_length))
  1689. speech_frames = 0
  1690. for i in range(num_frames):
  1691. start_byte = i * frame_length * 2
  1692. end_byte = start_byte + frame_length * 2
  1693. frame = chunk[start_byte:end_byte]
  1694. if self.webrtc_vad_model.is_speech(frame, 16000):
  1695. speech_frames += 1
  1696. if not all_frames_must_be_true:
  1697. if self.debug_mode:
  1698. logging.info(f"Speech detected in frame {i + 1}"
  1699. f" of {num_frames}")
  1700. return True
  1701. if all_frames_must_be_true:
  1702. if self.debug_mode and speech_frames == num_frames:
  1703. logging.info(f"Speech detected in {speech_frames} of "
  1704. f"{num_frames} frames")
  1705. elif self.debug_mode:
  1706. logging.info(f"Speech not detected in all {num_frames} frames")
  1707. return speech_frames == num_frames
  1708. else:
  1709. if self.debug_mode:
  1710. logging.info(f"Speech not detected in any of {num_frames} frames")
  1711. return False
  1712. def _check_voice_activity(self, data):
  1713. """
  1714. Initiate check if voice is active based on the provided data.
  1715. Args:
  1716. data: The audio data to be checked for voice activity.
  1717. """
  1718. self.is_webrtc_speech_active = self._is_webrtc_speech(data)
  1719. # First quick performing check for voice activity using WebRTC
  1720. if self.is_webrtc_speech_active:
  1721. if not self.silero_working:
  1722. self.silero_working = True
  1723. # Run the intensive check in a separate thread
  1724. threading.Thread(
  1725. target=self._is_silero_speech,
  1726. args=(data,)).start()
  1727. def clear_audio_queue(self):
  1728. """
  1729. Safely empties the audio queue to ensure no remaining audio
  1730. fragments get processed e.g. after waking up the recorder.
  1731. """
  1732. self.audio_buffer.clear()
  1733. try:
  1734. while True:
  1735. self.audio_queue.get_nowait()
  1736. except:
  1737. # PyTorch's mp.Queue doesn't have a specific Empty exception
  1738. # so we catch any exception that might occur when the queue is empty
  1739. pass
  1740. def _is_voice_active(self):
  1741. """
  1742. Determine if voice is active.
  1743. Returns:
  1744. bool: True if voice is active, False otherwise.
  1745. """
  1746. return self.is_webrtc_speech_active and self.is_silero_speech_active
  1747. def _set_state(self, new_state):
  1748. """
  1749. Update the current state of the recorder and execute
  1750. corresponding state-change callbacks.
  1751. Args:
  1752. new_state (str): The new state to set.
  1753. """
  1754. # Check if the state has actually changed
  1755. if new_state == self.state:
  1756. return
  1757. # Store the current state for later comparison
  1758. old_state = self.state
  1759. # Update to the new state
  1760. self.state = new_state
  1761. # Log the state change
  1762. logging.info(f"State changed from '{old_state}' to '{new_state}'")
  1763. # Execute callbacks based on transitioning FROM a particular state
  1764. if old_state == "listening":
  1765. if self.on_vad_detect_stop:
  1766. self.on_vad_detect_stop()
  1767. elif old_state == "wakeword":
  1768. if self.on_wakeword_detection_end:
  1769. self.on_wakeword_detection_end()
  1770. # Execute callbacks based on transitioning TO a particular state
  1771. if new_state == "listening":
  1772. if self.on_vad_detect_start:
  1773. self.on_vad_detect_start()
  1774. self._set_spinner("speak now")
  1775. if self.spinner and self.halo:
  1776. self.halo._interval = 250
  1777. elif new_state == "wakeword":
  1778. if self.on_wakeword_detection_start:
  1779. self.on_wakeword_detection_start()
  1780. self._set_spinner(f"say {self.wake_words}")
  1781. if self.spinner and self.halo:
  1782. self.halo._interval = 500
  1783. elif new_state == "transcribing":
  1784. if self.on_transcription_start:
  1785. self.on_transcription_start()
  1786. self._set_spinner("transcribing")
  1787. if self.spinner and self.halo:
  1788. self.halo._interval = 50
  1789. elif new_state == "recording":
  1790. self._set_spinner("recording")
  1791. if self.spinner and self.halo:
  1792. self.halo._interval = 100
  1793. elif new_state == "inactive":
  1794. if self.spinner and self.halo:
  1795. self.halo.stop()
  1796. self.halo = None
  1797. def _set_spinner(self, text):
  1798. """
  1799. Update the spinner's text or create a new
  1800. spinner with the provided text.
  1801. Args:
  1802. text (str): The text to be displayed alongside the spinner.
  1803. """
  1804. if self.spinner:
  1805. # If the Halo spinner doesn't exist, create and start it
  1806. if self.halo is None:
  1807. self.halo = halo.Halo(text=text)
  1808. self.halo.start()
  1809. # If the Halo spinner already exists, just update the text
  1810. else:
  1811. self.halo.text = text
  1812. def _preprocess_output(self, text, preview=False):
  1813. """
  1814. Preprocesses the output text by removing any leading or trailing
  1815. whitespace, converting all whitespace sequences to a single space
  1816. character, and capitalizing the first character of the text.
  1817. Args:
  1818. text (str): The text to be preprocessed.
  1819. Returns:
  1820. str: The preprocessed text.
  1821. """
  1822. text = re.sub(r'\s+', ' ', text.strip())
  1823. if self.ensure_sentence_starting_uppercase:
  1824. if text:
  1825. text = text[0].upper() + text[1:]
  1826. # Ensure the text ends with a proper punctuation
  1827. # if it ends with an alphanumeric character
  1828. if not preview:
  1829. if self.ensure_sentence_ends_with_period:
  1830. if text and text[-1].isalnum():
  1831. text += '.'
  1832. return text
  1833. def _find_tail_match_in_text(self, text1, text2, length_of_match=10):
  1834. """
  1835. Find the position where the last 'n' characters of text1
  1836. match with a substring in text2.
  1837. This method takes two texts, extracts the last 'n' characters from
  1838. text1 (where 'n' is determined by the variable 'length_of_match'), and
  1839. searches for an occurrence of this substring in text2, starting from
  1840. the end of text2 and moving towards the beginning.
  1841. Parameters:
  1842. - text1 (str): The text containing the substring that we want to find
  1843. in text2.
  1844. - text2 (str): The text in which we want to find the matching
  1845. substring.
  1846. - length_of_match(int): The length of the matching string that we are
  1847. looking for
  1848. Returns:
  1849. int: The position (0-based index) in text2 where the matching
  1850. substring starts. If no match is found or either of the texts is
  1851. too short, returns -1.
  1852. """
  1853. # Check if either of the texts is too short
  1854. if len(text1) < length_of_match or len(text2) < length_of_match:
  1855. return -1
  1856. # The end portion of the first text that we want to compare
  1857. target_substring = text1[-length_of_match:]
  1858. # Loop through text2 from right to left
  1859. for i in range(len(text2) - length_of_match + 1):
  1860. # Extract the substring from text2
  1861. # to compare with the target_substring
  1862. current_substring = text2[len(text2) - i - length_of_match:
  1863. len(text2) - i]
  1864. # Compare the current_substring with the target_substring
  1865. if current_substring == target_substring:
  1866. # Position in text2 where the match starts
  1867. return len(text2) - i
  1868. return -1
  1869. def _on_realtime_transcription_stabilized(self, text):
  1870. """
  1871. Callback method invoked when the real-time transcription stabilizes.
  1872. This method is called internally when the transcription text is
  1873. considered "stable" meaning it's less likely to change significantly
  1874. with additional audio input. It notifies any registered external
  1875. listener about the stabilized text if recording is still ongoing.
  1876. This is particularly useful for applications that need to display
  1877. live transcription results to users and want to highlight parts of the
  1878. transcription that are less likely to change.
  1879. Args:
  1880. text (str): The stabilized transcription text.
  1881. """
  1882. if self.on_realtime_transcription_stabilized:
  1883. if self.is_recording:
  1884. self.on_realtime_transcription_stabilized(text)
  1885. def _on_realtime_transcription_update(self, text):
  1886. """
  1887. Callback method invoked when there's an update in the real-time
  1888. transcription.
  1889. This method is called internally whenever there's a change in the
  1890. transcription text, notifying any registered external listener about
  1891. the update if recording is still ongoing. This provides a mechanism
  1892. for applications to receive and possibly display live transcription
  1893. updates, which could be partial and still subject to change.
  1894. Args:
  1895. text (str): The updated transcription text.
  1896. """
  1897. if self.on_realtime_transcription_update:
  1898. if self.is_recording:
  1899. self.on_realtime_transcription_update(text)
  1900. def __enter__(self):
  1901. """
  1902. Method to setup the context manager protocol.
  1903. This enables the instance to be used in a `with` statement, ensuring
  1904. proper resource management. When the `with` block is entered, this
  1905. method is automatically called.
  1906. Returns:
  1907. self: The current instance of the class.
  1908. """
  1909. return self
  1910. def __exit__(self, exc_type, exc_value, traceback):
  1911. """
  1912. Method to define behavior when the context manager protocol exits.
  1913. This is called when exiting the `with` block and ensures that any
  1914. necessary cleanup or resource release processes are executed, such as
  1915. shutting down the system properly.
  1916. Args:
  1917. exc_type (Exception or None): The type of the exception that
  1918. caused the context to be exited, if any.
  1919. exc_value (Exception or None): The exception instance that caused
  1920. the context to be exited, if any.
  1921. traceback (Traceback or None): The traceback corresponding to the
  1922. exception, if any.
  1923. """
  1924. self.shutdown()