audio_recorder.py 97 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193
  1. """
  2. The AudioToTextRecorder class in the provided code facilitates
  3. fast speech-to-text transcription.
  4. The class employs the faster_whisper library to transcribe the recorded audio
  5. into text using machine learning models, which can be run either on a GPU or
  6. CPU. Voice activity detection (VAD) is built in, meaning the software can
  7. automatically start or stop recording based on the presence or absence of
  8. speech. The system provides real-time feedback and can be further
  9. customized.
  10. Features:
  11. - Voice Activity Detection: Automatically starts/stops recording when speech
  12. is detected or when speech ends.
  13. - Event Callbacks: Customizable callbacks for when recording starts
  14. or finishes.
  15. - Fast Transcription: Returns the transcribed text from the audio as fast
  16. as possible.
  17. Author: Kolja Beigel
  18. """
  19. from typing import Iterable, List, Optional, Union
  20. import torch.multiprocessing as mp
  21. import torch
  22. from ctypes import c_bool
  23. from scipy.signal import resample
  24. from scipy import signal
  25. import signal as system_signal
  26. import faster_whisper
  27. import collections
  28. import numpy as np
  29. import traceback
  30. import threading
  31. import webrtcvad
  32. import itertools
  33. import datetime
  34. import platform
  35. import pyaudio
  36. import logging
  37. import struct
  38. import base64
  39. import queue
  40. import halo
  41. import time
  42. import copy
  43. import os
  44. import re
  45. import gc
  46. # Set OpenMP runtime duplicate library handling to OK (Use only for development!)
  47. os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
  48. INIT_MODEL_TRANSCRIPTION = "tiny"
  49. INIT_MODEL_TRANSCRIPTION_REALTIME = "tiny"
  50. INIT_REALTIME_PROCESSING_PAUSE = 0.2
  51. INIT_REALTIME_INITIAL_PAUSE = 0.2
  52. INIT_SILERO_SENSITIVITY = 0.4
  53. INIT_WEBRTC_SENSITIVITY = 3
  54. INIT_POST_SPEECH_SILENCE_DURATION = 0.6
  55. INIT_MIN_LENGTH_OF_RECORDING = 0.5
  56. INIT_MIN_GAP_BETWEEN_RECORDINGS = 0
  57. INIT_PRE_RECORDING_BUFFER_DURATION = 1.0
  58. ALLOWED_LATENCY_LIMIT = 100
  59. TIME_SLEEP = 0.02
  60. SAMPLE_RATE = 16000
  61. BUFFER_SIZE = 512
  62. INT16_MAX_ABS_VALUE = 32768.0
  63. INIT_HANDLE_BUFFER_OVERFLOW = False
  64. if platform.system() != 'Darwin':
  65. INIT_HANDLE_BUFFER_OVERFLOW = True
  66. class TranscriptionWorker:
  67. def __init__(self, conn, stdout_pipe, model_path, compute_type, gpu_device_index, device,
  68. ready_event, shutdown_event, interrupt_stop_event, beam_size, initial_prompt, suppress_tokens):
  69. self.conn = conn
  70. self.stdout_pipe = stdout_pipe
  71. self.model_path = model_path
  72. self.compute_type = compute_type
  73. self.gpu_device_index = gpu_device_index
  74. self.device = device
  75. self.ready_event = ready_event
  76. self.shutdown_event = shutdown_event
  77. self.interrupt_stop_event = interrupt_stop_event
  78. self.beam_size = beam_size
  79. self.initial_prompt = initial_prompt
  80. self.suppress_tokens = suppress_tokens
  81. self.queue = queue.Queue()
  82. def custom_print(self, *args, **kwargs):
  83. message = ' '.join(map(str, args))
  84. try:
  85. self.stdout_pipe.send(message)
  86. except (BrokenPipeError, EOFError, OSError):
  87. pass
  88. def poll_connection(self):
  89. while not self.shutdown_event.is_set():
  90. if self.conn.poll(0.01):
  91. try:
  92. data = self.conn.recv()
  93. self.queue.put(data)
  94. except Exception as e:
  95. logging.error(f"Error receiving data from connection: {e}")
  96. else:
  97. time.sleep(TIME_SLEEP)
  98. def run(self):
  99. if __name__ == "__main__":
  100. system_signal.signal(system_signal.SIGINT, system_signal.SIG_IGN)
  101. __builtins__['print'] = self.custom_print
  102. logging.info(f"Initializing faster_whisper main transcription model {self.model_path}")
  103. try:
  104. model = faster_whisper.WhisperModel(
  105. model_size_or_path=self.model_path,
  106. device=self.device,
  107. compute_type=self.compute_type,
  108. device_index=self.gpu_device_index,
  109. )
  110. except Exception as e:
  111. logging.exception(f"Error initializing main faster_whisper transcription model: {e}")
  112. raise
  113. self.ready_event.set()
  114. logging.debug("Faster_whisper main speech to text transcription model initialized successfully")
  115. # Start the polling thread
  116. polling_thread = threading.Thread(target=self.poll_connection)
  117. polling_thread.start()
  118. try:
  119. while not self.shutdown_event.is_set():
  120. try:
  121. audio, language = self.queue.get(timeout=0.1)
  122. try:
  123. segments, info = model.transcribe(
  124. audio,
  125. language=language if language else None,
  126. beam_size=self.beam_size,
  127. initial_prompt=self.initial_prompt,
  128. suppress_tokens=self.suppress_tokens
  129. )
  130. transcription = " ".join(seg.text for seg in segments).strip()
  131. logging.debug(f"Final text detected with main model: {transcription}")
  132. self.conn.send(('success', (transcription, info)))
  133. except Exception as e:
  134. logging.error(f"General error in transcription: {e}")
  135. self.conn.send(('error', str(e)))
  136. except queue.Empty:
  137. continue
  138. except KeyboardInterrupt:
  139. self.interrupt_stop_event.set()
  140. logging.debug("Transcription worker process finished due to KeyboardInterrupt")
  141. break
  142. except Exception as e:
  143. logging.error(f"General error in processing queue item: {e}")
  144. finally:
  145. __builtins__['print'] = print # Restore the original print function
  146. self.conn.close()
  147. self.stdout_pipe.close()
  148. self.shutdown_event.set() # Ensure the polling thread will stop
  149. polling_thread.join() # Wait for the polling thread to finish
  150. class bcolors:
  151. OKGREEN = '\033[92m' # Green for active speech detection
  152. WARNING = '\033[93m' # Yellow for silence detection
  153. ENDC = '\033[0m' # Reset to default color
  154. class AudioToTextRecorder:
  155. """
  156. A class responsible for capturing audio from the microphone, detecting
  157. voice activity, and then transcribing the captured audio using the
  158. `faster_whisper` model.
  159. """
  160. def __init__(self,
  161. model: str = INIT_MODEL_TRANSCRIPTION,
  162. language: str = "",
  163. compute_type: str = "default",
  164. input_device_index: int = None,
  165. gpu_device_index: Union[int, List[int]] = 0,
  166. device: str = "cuda",
  167. on_recording_start=None,
  168. on_recording_stop=None,
  169. on_transcription_start=None,
  170. ensure_sentence_starting_uppercase=True,
  171. ensure_sentence_ends_with_period=True,
  172. use_microphone=True,
  173. spinner=True,
  174. level=logging.WARNING,
  175. init_logging=True,
  176. # Realtime transcription parameters
  177. enable_realtime_transcription=False,
  178. use_main_model_for_realtime=False,
  179. realtime_model_type=INIT_MODEL_TRANSCRIPTION_REALTIME,
  180. realtime_processing_pause=INIT_REALTIME_PROCESSING_PAUSE,
  181. init_realtime_after_seconds=INIT_REALTIME_INITIAL_PAUSE,
  182. on_realtime_transcription_update=None,
  183. on_realtime_transcription_stabilized=None,
  184. # Voice activation parameters
  185. silero_sensitivity: float = INIT_SILERO_SENSITIVITY,
  186. silero_use_onnx: bool = False,
  187. silero_deactivity_detection: bool = False,
  188. webrtc_sensitivity: int = INIT_WEBRTC_SENSITIVITY,
  189. post_speech_silence_duration: float = (
  190. INIT_POST_SPEECH_SILENCE_DURATION
  191. ),
  192. min_length_of_recording: float = (
  193. INIT_MIN_LENGTH_OF_RECORDING
  194. ),
  195. min_gap_between_recordings: float = (
  196. INIT_MIN_GAP_BETWEEN_RECORDINGS
  197. ),
  198. pre_recording_buffer_duration: float = (
  199. INIT_PRE_RECORDING_BUFFER_DURATION
  200. ),
  201. on_vad_detect_start=None,
  202. on_vad_detect_stop=None,
  203. on_recorded_chunk=None,
  204. debug_mode=False,
  205. handle_buffer_overflow: bool = INIT_HANDLE_BUFFER_OVERFLOW,
  206. beam_size: int = 5,
  207. beam_size_realtime: int = 3,
  208. buffer_size: int = BUFFER_SIZE,
  209. sample_rate: int = SAMPLE_RATE,
  210. initial_prompt: Optional[Union[str, Iterable[int]]] = None,
  211. suppress_tokens: Optional[List[int]] = [-1],
  212. print_transcription_time: bool = False,
  213. early_transcription_on_silence: int = 0,
  214. allowed_latency_limit: int = ALLOWED_LATENCY_LIMIT,
  215. no_log_file: bool = False,
  216. use_extended_logging: bool = False,
  217. ):
  218. """
  219. Initializes an audio recorder and transcription.
  220. Args:
  221. - model (str, default="tiny"): Specifies the size of the transcription
  222. model to use or the path to a converted model directory.
  223. Valid options are 'tiny', 'tiny.en', 'base', 'base.en',
  224. 'small', 'small.en', 'medium', 'medium.en', 'large-v1',
  225. 'large-v2'.
  226. If a specific size is provided, the model is downloaded
  227. from the Hugging Face Hub.
  228. - language (str, default=""): Language code for speech-to-text engine.
  229. If not specified, the model will attempt to detect the language
  230. automatically.
  231. - compute_type (str, default="default"): Specifies the type of
  232. computation to be used for transcription.
  233. See https://opennmt.net/CTranslate2/quantization.html.
  234. - input_device_index (int, default=0): The index of the audio input
  235. device to use.
  236. - gpu_device_index (int, default=0): Device ID to use.
  237. The model can also be loaded on multiple GPUs by passing a list of
  238. IDs (e.g. [0, 1, 2, 3]). In that case, multiple transcriptions can
  239. run in parallel when transcribe() is called from multiple Python
  240. threads
  241. - device (str, default="cuda"): Device for model to use. Can either be
  242. "cuda" or "cpu".
  243. - on_recording_start (callable, default=None): Callback function to be
  244. called when recording of audio to be transcripted starts.
  245. - on_recording_stop (callable, default=None): Callback function to be
  246. called when recording of audio to be transcripted stops.
  247. - on_transcription_start (callable, default=None): Callback function
  248. to be called when transcription of audio to text starts.
  249. - ensure_sentence_starting_uppercase (bool, default=True): Ensures
  250. that every sentence detected by the algorithm starts with an
  251. uppercase letter.
  252. - ensure_sentence_ends_with_period (bool, default=True): Ensures that
  253. every sentence that doesn't end with punctuation such as "?", "!"
  254. ends with a period
  255. - use_microphone (bool, default=True): Specifies whether to use the
  256. microphone as the audio input source. If set to False, the
  257. audio input source will be the audio data sent through the
  258. feed_audio() method.
  259. - spinner (bool, default=True): Show spinner animation with current
  260. state.
  261. - level (int, default=logging.WARNING): Logging level.
  262. - init_logging (bool, default=True): Whether to initialize
  263. the logging framework. Set to False to manage this yourself.
  264. - enable_realtime_transcription (bool, default=False): Enables or
  265. disables real-time transcription of audio. When set to True, the
  266. audio will be transcribed continuously as it is being recorded.
  267. - use_main_model_for_realtime (str, default=False):
  268. If True, use the main transcription model for both regular and
  269. real-time transcription. If False, use a separate model specified
  270. by realtime_model_type for real-time transcription.
  271. Using a single model can save memory and potentially improve
  272. performance, but may not be optimized for real-time processing.
  273. Using separate models allows for a smaller, faster model for
  274. real-time transcription while keeping a more accurate model for
  275. final transcription.
  276. - realtime_model_type (str, default="tiny"): Specifies the machine
  277. learning model to be used for real-time transcription. Valid
  278. options include 'tiny', 'tiny.en', 'base', 'base.en', 'small',
  279. 'small.en', 'medium', 'medium.en', 'large-v1', 'large-v2'.
  280. - realtime_processing_pause (float, default=0.1): Specifies the time
  281. interval in seconds after a chunk of audio gets transcribed. Lower
  282. values will result in more "real-time" (frequent) transcription
  283. updates but may increase computational load.
  284. - init_realtime_after_seconds (float, default=0.2): Specifies the
  285. initial waiting time after the recording was initiated before
  286. yielding the first realtime transcription
  287. - on_realtime_transcription_update = A callback function that is
  288. triggered whenever there's an update in the real-time
  289. transcription. The function is called with the newly transcribed
  290. text as its argument.
  291. - on_realtime_transcription_stabilized = A callback function that is
  292. triggered when the transcribed text stabilizes in quality. The
  293. stabilized text is generally more accurate but may arrive with a
  294. slight delay compared to the regular real-time updates.
  295. - silero_sensitivity (float, default=SILERO_SENSITIVITY): Sensitivity
  296. for the Silero Voice Activity Detection model ranging from 0
  297. (least sensitive) to 1 (most sensitive). Default is 0.5.
  298. - silero_use_onnx (bool, default=False): Enables usage of the
  299. pre-trained model from Silero in the ONNX (Open Neural Network
  300. Exchange) format instead of the PyTorch format. This is
  301. recommended for faster performance.
  302. - silero_deactivity_detection (bool, default=False): Enables the Silero
  303. model for end-of-speech detection. More robust against background
  304. noise. Utilizes additional GPU resources but improves accuracy in
  305. noisy environments. When False, uses the default WebRTC VAD,
  306. which is more sensitive but may continue recording longer due
  307. to background sounds.
  308. - webrtc_sensitivity (int, default=WEBRTC_SENSITIVITY): Sensitivity
  309. for the WebRTC Voice Activity Detection engine ranging from 0
  310. (least aggressive / most sensitive) to 3 (most aggressive,
  311. least sensitive). Default is 3.
  312. - post_speech_silence_duration (float, default=0.2): Duration in
  313. seconds of silence that must follow speech before the recording
  314. is considered to be completed. This ensures that any brief
  315. pauses during speech don't prematurely end the recording.
  316. - min_gap_between_recordings (float, default=1.0): Specifies the
  317. minimum time interval in seconds that should exist between the
  318. end of one recording session and the beginning of another to
  319. prevent rapid consecutive recordings.
  320. - min_length_of_recording (float, default=1.0): Specifies the minimum
  321. duration in seconds that a recording session should last to ensure
  322. meaningful audio capture, preventing excessively short or
  323. fragmented recordings.
  324. - pre_recording_buffer_duration (float, default=0.2): Duration in
  325. seconds for the audio buffer to maintain pre-roll audio
  326. (compensates speech activity detection latency)
  327. - on_vad_detect_start (callable, default=None): Callback function to
  328. be called when the system listens for voice activity.
  329. - on_vad_detect_stop (callable, default=None): Callback function to be
  330. called when the system stops listening for voice activity.
  331. - on_recorded_chunk (callable, default=None): Callback function to be
  332. called when a chunk of audio is recorded. The function is called
  333. with the recorded audio chunk as its argument.
  334. - debug_mode (bool, default=False): If set to True, the system will
  335. print additional debug information to the console.
  336. - handle_buffer_overflow (bool, default=True): If set to True, the system
  337. will log a warning when an input overflow occurs during recording and
  338. remove the data from the buffer.
  339. - beam_size (int, default=5): The beam size to use for beam search
  340. decoding.
  341. - beam_size_realtime (int, default=3): The beam size to use for beam
  342. search decoding in the real-time transcription model.
  343. - buffer_size (int, default=512): The buffer size to use for audio
  344. recording. Changing this may break functionality.
  345. - sample_rate (int, default=16000): The sample rate to use for audio
  346. recording. Changing this will very probably functionality (as the
  347. WebRTC VAD model is very sensitive towards the sample rate).
  348. - initial_prompt (str or iterable of int, default=None): Initial
  349. prompt to be fed to the transcription models.
  350. - suppress_tokens (list of int, default=[-1]): Tokens to be suppressed
  351. from the transcription output.
  352. - print_transcription_time (bool, default=False): Logs processing time
  353. of main model transcription
  354. - early_transcription_on_silence (int, default=0): If set, the
  355. system will transcribe audio faster when silence is detected.
  356. Transcription will start after the specified milliseconds, so
  357. keep this value lower than post_speech_silence_duration.
  358. Ideally around post_speech_silence_duration minus the estimated
  359. transcription time with the main model.
  360. If silence lasts longer than post_speech_silence_duration, the
  361. recording is stopped, and the transcription is submitted. If
  362. voice activity resumes within this period, the transcription
  363. is discarded. Results in faster final transcriptions to the cost
  364. of additional GPU load due to some unnecessary final transcriptions.
  365. - allowed_latency_limit (int, default=100): Maximal amount of chunks
  366. that can be unprocessed in queue before discarding chunks.
  367. - no_log_file (bool, default=False): Skips writing of debug log file.
  368. - use_extended_logging (bool, default=False): Writes extensive
  369. log messages for the recording worker, that processes the audio
  370. chunks.
  371. Raises:
  372. Exception: Errors related to initializing transcription
  373. model or audio recording.
  374. """
  375. self.language = language
  376. self.compute_type = compute_type
  377. self.input_device_index = input_device_index
  378. self.gpu_device_index = gpu_device_index
  379. self.device = device
  380. self.ensure_sentence_starting_uppercase = (
  381. ensure_sentence_starting_uppercase
  382. )
  383. self.ensure_sentence_ends_with_period = (
  384. ensure_sentence_ends_with_period
  385. )
  386. self.use_microphone = mp.Value(c_bool, use_microphone)
  387. self.min_gap_between_recordings = min_gap_between_recordings
  388. self.min_length_of_recording = min_length_of_recording
  389. self.pre_recording_buffer_duration = pre_recording_buffer_duration
  390. self.post_speech_silence_duration = post_speech_silence_duration
  391. self.on_recording_start = on_recording_start
  392. self.on_recording_stop = on_recording_stop
  393. self.on_vad_detect_start = on_vad_detect_start
  394. self.on_vad_detect_stop = on_vad_detect_stop
  395. self.on_recorded_chunk = on_recorded_chunk
  396. self.on_transcription_start = on_transcription_start
  397. self.enable_realtime_transcription = enable_realtime_transcription
  398. self.use_main_model_for_realtime = use_main_model_for_realtime
  399. self.main_model_type = model
  400. self.realtime_model_type = realtime_model_type
  401. self.realtime_processing_pause = realtime_processing_pause
  402. self.init_realtime_after_seconds = init_realtime_after_seconds
  403. self.on_realtime_transcription_update = (
  404. on_realtime_transcription_update
  405. )
  406. self.on_realtime_transcription_stabilized = (
  407. on_realtime_transcription_stabilized
  408. )
  409. self.debug_mode = debug_mode
  410. self.handle_buffer_overflow = handle_buffer_overflow
  411. self.beam_size = beam_size
  412. self.beam_size_realtime = beam_size_realtime
  413. self.allowed_latency_limit = allowed_latency_limit
  414. self.level = level
  415. self.audio_queue = mp.Queue()
  416. self.buffer_size = buffer_size
  417. self.sample_rate = sample_rate
  418. self.recording_start_time = 0
  419. self.recording_stop_time = 0
  420. self.silero_check_time = 0
  421. self.silero_working = False
  422. self.speech_end_silence_start = 0
  423. self.silero_sensitivity = silero_sensitivity
  424. self.silero_deactivity_detection = silero_deactivity_detection
  425. self.listen_start = 0
  426. self.spinner = spinner
  427. self.halo = None
  428. self.state = "inactive"
  429. self.text_storage = []
  430. self.realtime_stabilized_text = ""
  431. self.realtime_stabilized_safetext = ""
  432. self.is_webrtc_speech_active = False
  433. self.is_silero_speech_active = False
  434. self.recording_thread = None
  435. self.realtime_thread = None
  436. self.audio_interface = None
  437. self.audio = None
  438. self.stream = None
  439. self.start_recording_event = threading.Event()
  440. self.stop_recording_event = threading.Event()
  441. self.last_transcription_bytes = None
  442. self.last_transcription_bytes_b64 = None
  443. self.initial_prompt = initial_prompt
  444. self.suppress_tokens = suppress_tokens
  445. self.detected_language = None
  446. self.detected_language_probability = 0
  447. self.detected_realtime_language = None
  448. self.detected_realtime_language_probability = 0
  449. self.transcription_lock = threading.Lock()
  450. self.shutdown_lock = threading.Lock()
  451. self.transcribe_count = 0
  452. self.print_transcription_time = print_transcription_time
  453. self.early_transcription_on_silence = early_transcription_on_silence
  454. self.use_extended_logging = use_extended_logging
  455. if init_logging:
  456. # Initialize the logging configuration with the specified level
  457. log_format = 'RealTimeSTT: %(name)s - %(levelname)s - %(message)s'
  458. # Adjust file_log_format to include milliseconds
  459. file_log_format = '%(asctime)s.%(msecs)03d - ' + log_format
  460. # Get the root logger
  461. logger = logging.getLogger()
  462. logger.setLevel(logging.DEBUG) # Set the root logger's level to DEBUG
  463. # Remove any existing handlers
  464. logger.handlers = []
  465. # Create a console handler and set its level
  466. console_handler = logging.StreamHandler()
  467. console_handler.setLevel(level)
  468. console_handler.setFormatter(logging.Formatter(log_format))
  469. # Add the handlers to the logger
  470. if not no_log_file:
  471. # Create a file handler and set its level
  472. file_handler = logging.FileHandler('realtimesst.log')
  473. file_handler.setLevel(logging.DEBUG)
  474. file_handler.setFormatter(logging.Formatter(
  475. file_log_format,
  476. datefmt='%Y-%m-%d %H:%M:%S'
  477. ))
  478. logger.addHandler(file_handler)
  479. logger.addHandler(console_handler)
  480. self.is_shut_down = False
  481. self.shutdown_event = mp.Event()
  482. try:
  483. # Only set the start method if it hasn't been set already
  484. if mp.get_start_method(allow_none=True) is None:
  485. mp.set_start_method("spawn")
  486. except RuntimeError as e:
  487. logging.info(f"Start method has already been set. Details: {e}")
  488. logging.info("Starting RealTimeSTT")
  489. if use_extended_logging:
  490. logging.info("RealtimeSTT was called with these parameters:")
  491. for param, value in locals().items():
  492. logging.info(f"{param}: {value}")
  493. self.interrupt_stop_event = mp.Event()
  494. self.was_interrupted = mp.Event()
  495. self.main_transcription_ready_event = mp.Event()
  496. self.parent_transcription_pipe, child_transcription_pipe = mp.Pipe()
  497. self.parent_stdout_pipe, child_stdout_pipe = mp.Pipe()
  498. # Set device for model
  499. self.device = "cuda" if self.device == "cuda" and torch.cuda.is_available() else "cpu"
  500. self.transcript_process = self._start_thread(
  501. target=AudioToTextRecorder._transcription_worker,
  502. args=(
  503. child_transcription_pipe,
  504. child_stdout_pipe,
  505. model,
  506. self.compute_type,
  507. self.gpu_device_index,
  508. self.device,
  509. self.main_transcription_ready_event,
  510. self.shutdown_event,
  511. self.interrupt_stop_event,
  512. self.beam_size,
  513. self.initial_prompt,
  514. self.suppress_tokens
  515. )
  516. )
  517. # Start audio data reading process
  518. if self.use_microphone.value:
  519. logging.info("Initializing audio recording"
  520. " (creating pyAudio input stream,"
  521. f" sample rate: {self.sample_rate}"
  522. f" buffer size: {self.buffer_size}"
  523. )
  524. self.reader_process = self._start_thread(
  525. target=AudioToTextRecorder._audio_data_worker,
  526. args=(
  527. self.audio_queue,
  528. self.sample_rate,
  529. self.buffer_size,
  530. self.input_device_index,
  531. self.shutdown_event,
  532. self.interrupt_stop_event,
  533. self.use_microphone
  534. )
  535. )
  536. # Initialize the realtime transcription model
  537. if self.enable_realtime_transcription and not self.use_main_model_for_realtime:
  538. try:
  539. logging.info("Initializing faster_whisper realtime "
  540. f"transcription model {self.realtime_model_type}"
  541. )
  542. self.realtime_model_type = faster_whisper.WhisperModel(
  543. model_size_or_path=self.realtime_model_type,
  544. device=self.device,
  545. compute_type=self.compute_type,
  546. device_index=self.gpu_device_index
  547. )
  548. except Exception as e:
  549. logging.exception("Error initializing faster_whisper "
  550. f"realtime transcription model: {e}"
  551. )
  552. raise
  553. logging.debug("Faster_whisper realtime speech to text "
  554. "transcription model initialized successfully")
  555. # Setup voice activity detection model WebRTC
  556. try:
  557. logging.info("Initializing WebRTC voice with "
  558. f"Sensitivity {webrtc_sensitivity}"
  559. )
  560. self.webrtc_vad_model = webrtcvad.Vad()
  561. self.webrtc_vad_model.set_mode(webrtc_sensitivity)
  562. except Exception as e:
  563. logging.exception("Error initializing WebRTC voice "
  564. f"activity detection engine: {e}"
  565. )
  566. raise
  567. logging.debug("WebRTC VAD voice activity detection "
  568. "engine initialized successfully"
  569. )
  570. # Setup voice activity detection model Silero VAD
  571. try:
  572. self.silero_vad_model, _ = torch.hub.load(
  573. repo_or_dir="snakers4/silero-vad",
  574. model="silero_vad",
  575. verbose=False,
  576. onnx=silero_use_onnx
  577. )
  578. except Exception as e:
  579. logging.exception(f"Error initializing Silero VAD "
  580. f"voice activity detection engine: {e}"
  581. )
  582. raise
  583. logging.debug("Silero VAD voice activity detection "
  584. "engine initialized successfully"
  585. )
  586. self.audio_buffer = collections.deque(
  587. maxlen=int((self.sample_rate // self.buffer_size) *
  588. self.pre_recording_buffer_duration)
  589. )
  590. self.last_words_buffer = collections.deque(
  591. maxlen=int((self.sample_rate // self.buffer_size) *
  592. 0.3)
  593. )
  594. self.frames = []
  595. self.new_frames = mp.Event()
  596. self.new_frames.set()
  597. # Recording control flags
  598. self.is_recording = False
  599. self.is_running = True
  600. self.start_recording_on_voice_activity = False
  601. self.stop_recording_on_voice_deactivity = False
  602. # Start the recording worker thread
  603. self.recording_thread = threading.Thread(target=self._recording_worker)
  604. self.recording_thread.daemon = True
  605. self.recording_thread.start()
  606. # Start the realtime transcription worker thread
  607. self.realtime_thread = threading.Thread(target=self._realtime_worker)
  608. self.realtime_thread.daemon = True
  609. self.realtime_thread.start()
  610. # Wait for transcription models to start
  611. logging.debug('Waiting for main transcription model to start')
  612. self.main_transcription_ready_event.wait()
  613. logging.debug('Main transcription model ready')
  614. self.stdout_thread = threading.Thread(target=self._read_stdout)
  615. self.stdout_thread.daemon = True
  616. self.stdout_thread.start()
  617. logging.debug('RealtimeSTT initialization completed successfully')
  618. def _start_thread(self, target=None, args=()):
  619. """
  620. Implement a consistent threading model across the library.
  621. This method is used to start any thread in this library. It uses the
  622. standard threading. Thread for Linux and for all others uses the pytorch
  623. MultiProcessing library 'Process'.
  624. Args:
  625. target (callable object): is the callable object to be invoked by
  626. the run() method. Defaults to None, meaning nothing is called.
  627. args (tuple): is a list or tuple of arguments for the target
  628. invocation. Defaults to ().
  629. """
  630. if (platform.system() == 'Linux'):
  631. thread = threading.Thread(target=target, args=args)
  632. thread.deamon = True
  633. thread.start()
  634. return thread
  635. else:
  636. thread = mp.Process(target=target, args=args)
  637. thread.start()
  638. return thread
  639. def _read_stdout(self):
  640. while not self.shutdown_event.is_set():
  641. try:
  642. if self.parent_stdout_pipe.poll(0.1):
  643. logging.debug("Receive from stdout pipe")
  644. message = self.parent_stdout_pipe.recv()
  645. logging.info(message)
  646. except (BrokenPipeError, EOFError, OSError):
  647. # The pipe probably has been closed, so we ignore the error
  648. pass
  649. except KeyboardInterrupt: # handle manual interruption (Ctrl+C)
  650. logging.info("KeyboardInterrupt in read from stdout detected, exiting...")
  651. break
  652. except Exception as e:
  653. logging.error(f"Unexpected error in read from stdout: {e}")
  654. logging.error(traceback.format_exc()) # Log the full traceback here
  655. break
  656. time.sleep(0.1)
  657. def _transcription_worker(*args, **kwargs):
  658. worker = TranscriptionWorker(*args, **kwargs)
  659. worker.run()
  660. @staticmethod
  661. def _audio_data_worker(audio_queue,
  662. target_sample_rate,
  663. buffer_size,
  664. input_device_index,
  665. shutdown_event,
  666. interrupt_stop_event,
  667. use_microphone):
  668. """
  669. Worker method that handles the audio recording process.
  670. This method runs in a separate process and is responsible for:
  671. - Setting up the audio input stream for recording at the highest possible sample rate.
  672. - Continuously reading audio data from the input stream, resampling if necessary,
  673. preprocessing the data, and placing complete chunks in a queue.
  674. - Handling errors during the recording process.
  675. - Gracefully terminating the recording process when a shutdown event is set.
  676. Args:
  677. audio_queue (queue.Queue): A queue where recorded audio data is placed.
  678. target_sample_rate (int): The desired sample rate for the output audio (for Silero VAD).
  679. buffer_size (int): The number of samples expected by the Silero VAD model.
  680. input_device_index (int): The index of the audio input device.
  681. shutdown_event (threading.Event): An event that, when set, signals this worker method to terminate.
  682. interrupt_stop_event (threading.Event): An event to signal keyboard interrupt.
  683. use_microphone (multiprocessing.Value): A shared value indicating whether to use the microphone.
  684. Raises:
  685. Exception: If there is an error while initializing the audio recording.
  686. """
  687. import pyaudio
  688. import numpy as np
  689. from scipy import signal
  690. if __name__ == '__main__':
  691. system_signal.signal(system_signal.SIGINT, system_signal.SIG_IGN)
  692. def get_highest_sample_rate(audio_interface, device_index):
  693. """Get the highest supported sample rate for the specified device."""
  694. try:
  695. device_info = audio_interface.get_device_info_by_index(device_index)
  696. max_rate = int(device_info['defaultSampleRate'])
  697. if 'supportedSampleRates' in device_info:
  698. supported_rates = [int(rate) for rate in device_info['supportedSampleRates']]
  699. if supported_rates:
  700. max_rate = max(supported_rates)
  701. return max_rate
  702. except Exception as e:
  703. logging.warning(f"Failed to get highest sample rate: {e}")
  704. return 48000 # Fallback to a common high sample rate
  705. def initialize_audio_stream(audio_interface, sample_rate, chunk_size):
  706. nonlocal input_device_index
  707. def validate_device(device_index):
  708. """Validate that the device exists and is actually available for input."""
  709. try:
  710. device_info = audio_interface.get_device_info_by_index(device_index)
  711. if not device_info.get('maxInputChannels', 0) > 0:
  712. return False
  713. # Try to actually read from the device
  714. test_stream = audio_interface.open(
  715. format=pyaudio.paInt16,
  716. channels=1,
  717. rate=target_sample_rate,
  718. input=True,
  719. frames_per_buffer=chunk_size,
  720. input_device_index=device_index,
  721. start=False # Don't start the stream yet
  722. )
  723. # Start the stream and try to read from it
  724. test_stream.start_stream()
  725. test_data = test_stream.read(chunk_size, exception_on_overflow=False)
  726. test_stream.stop_stream()
  727. test_stream.close()
  728. # Check if we got valid data
  729. if len(test_data) == 0:
  730. return False
  731. return True
  732. except Exception as e:
  733. logging.debug(f"Device validation failed: {e}")
  734. return False
  735. """Initialize the audio stream with error handling."""
  736. while not shutdown_event.is_set():
  737. try:
  738. # First, get a list of all available input devices
  739. input_devices = []
  740. for i in range(audio_interface.get_device_count()):
  741. try:
  742. device_info = audio_interface.get_device_info_by_index(i)
  743. if device_info.get('maxInputChannels', 0) > 0:
  744. input_devices.append(i)
  745. except Exception:
  746. continue
  747. if not input_devices:
  748. raise Exception("No input devices found")
  749. # If input_device_index is None or invalid, try to find a working device
  750. if input_device_index is None or input_device_index not in input_devices:
  751. # First try the default device
  752. try:
  753. default_device = audio_interface.get_default_input_device_info()
  754. if validate_device(default_device['index']):
  755. input_device_index = default_device['index']
  756. except Exception:
  757. # If default device fails, try other available input devices
  758. for device_index in input_devices:
  759. if validate_device(device_index):
  760. input_device_index = device_index
  761. break
  762. else:
  763. raise Exception("No working input devices found")
  764. # Validate the selected device one final time
  765. if not validate_device(input_device_index):
  766. raise Exception("Selected device validation failed")
  767. # If we get here, we have a validated device
  768. stream = audio_interface.open(
  769. format=pyaudio.paInt16,
  770. channels=1,
  771. rate=sample_rate,
  772. input=True,
  773. frames_per_buffer=chunk_size,
  774. input_device_index=input_device_index,
  775. )
  776. logging.info(f"Microphone connected and validated (input_device_index: {input_device_index})")
  777. return stream
  778. except Exception as e:
  779. logging.error(f"Microphone connection failed: {e}. Retrying...")
  780. input_device_index = None
  781. time.sleep(3) # Wait before retrying
  782. continue
  783. def preprocess_audio(chunk, original_sample_rate, target_sample_rate):
  784. """Preprocess audio chunk similar to feed_audio method."""
  785. if isinstance(chunk, np.ndarray):
  786. # Handle stereo to mono conversion if necessary
  787. if chunk.ndim == 2:
  788. chunk = np.mean(chunk, axis=1)
  789. # Resample to target_sample_rate if necessary
  790. if original_sample_rate != target_sample_rate:
  791. num_samples = int(len(chunk) * target_sample_rate / original_sample_rate)
  792. chunk = signal.resample(chunk, num_samples)
  793. # Ensure data type is int16
  794. chunk = chunk.astype(np.int16)
  795. else:
  796. # If chunk is bytes, convert to numpy array
  797. chunk = np.frombuffer(chunk, dtype=np.int16)
  798. # Resample if necessary
  799. if original_sample_rate != target_sample_rate:
  800. num_samples = int(len(chunk) * target_sample_rate / original_sample_rate)
  801. chunk = signal.resample(chunk, num_samples)
  802. chunk = chunk.astype(np.int16)
  803. return chunk.tobytes()
  804. audio_interface = None
  805. stream = None
  806. device_sample_rate = None
  807. chunk_size = 1024 # Increased chunk size for better performance
  808. def setup_audio():
  809. nonlocal audio_interface, stream, device_sample_rate, input_device_index
  810. try:
  811. if audio_interface is None:
  812. audio_interface = pyaudio.PyAudio()
  813. if input_device_index is None:
  814. try:
  815. default_device = audio_interface.get_default_input_device_info()
  816. input_device_index = default_device['index']
  817. except OSError as e:
  818. input_device_index = None
  819. sample_rates_to_try = [16000] # Try 16000 Hz first
  820. if input_device_index is not None:
  821. highest_rate = get_highest_sample_rate(audio_interface, input_device_index)
  822. if highest_rate != 16000:
  823. sample_rates_to_try.append(highest_rate)
  824. else:
  825. sample_rates_to_try.append(48000) # Fallback sample rate
  826. for rate in sample_rates_to_try:
  827. try:
  828. device_sample_rate = rate
  829. stream = initialize_audio_stream(audio_interface, device_sample_rate, chunk_size)
  830. if stream is not None:
  831. logging.debug(f"Audio recording initialized successfully at {device_sample_rate} Hz, reading {chunk_size} frames at a time")
  832. # logging.error(f"Audio recording initialized successfully at {device_sample_rate} Hz, reading {chunk_size} frames at a time")
  833. return True
  834. except Exception as e:
  835. logging.warning(f"Failed to initialize audio23 stream at {device_sample_rate} Hz: {e}")
  836. continue
  837. # If we reach here, none of the sample rates worked
  838. raise Exception("Failed to initialize audio stream12 with all sample rates.")
  839. except Exception as e:
  840. logging.exception(f"Error initializing pyaudio audio recording: {e}")
  841. if audio_interface:
  842. audio_interface.terminate()
  843. return False
  844. if not setup_audio():
  845. raise Exception("Failed to set up audio recording.")
  846. buffer = bytearray()
  847. silero_buffer_size = 2 * buffer_size # silero complains if too short
  848. time_since_last_buffer_message = 0
  849. try:
  850. while not shutdown_event.is_set():
  851. try:
  852. data = stream.read(chunk_size, exception_on_overflow=False)
  853. if use_microphone.value:
  854. processed_data = preprocess_audio(data, device_sample_rate, target_sample_rate)
  855. buffer += processed_data
  856. # Check if the buffer has reached or exceeded the silero_buffer_size
  857. while len(buffer) >= silero_buffer_size:
  858. # Extract silero_buffer_size amount of data from the buffer
  859. to_process = buffer[:silero_buffer_size]
  860. buffer = buffer[silero_buffer_size:]
  861. # Feed the extracted data to the audio_queue
  862. if time_since_last_buffer_message:
  863. time_passed = time.time() - time_since_last_buffer_message
  864. if time_passed > 1:
  865. logging.debug("_audio_data_worker writing audio data into queue.")
  866. time_since_last_buffer_message = time.time()
  867. else:
  868. time_since_last_buffer_message = time.time()
  869. audio_queue.put(to_process)
  870. except OSError as e:
  871. if e.errno == pyaudio.paInputOverflowed:
  872. logging.warning("Input overflowed. Frame dropped.")
  873. else:
  874. logging.error(f"OSError during recording: {e}")
  875. # Attempt to reinitialize the stream
  876. logging.error("Attempting to reinitialize the audio stream...")
  877. try:
  878. if stream:
  879. stream.stop_stream()
  880. stream.close()
  881. except Exception as e:
  882. pass
  883. # Wait a bit before trying to reinitialize
  884. time.sleep(1)
  885. if not setup_audio():
  886. logging.error("Failed to reinitialize audio stream. Exiting.")
  887. break
  888. else:
  889. logging.error("Audio stream reinitialized successfully.")
  890. continue
  891. except Exception as e:
  892. logging.error(f"Unknown error during recording: {e}")
  893. tb_str = traceback.format_exc()
  894. logging.error(f"Traceback: {tb_str}")
  895. logging.error(f"Error: {e}")
  896. # Attempt to reinitialize the stream
  897. logging.info("Attempting to reinitialize the audio stream...")
  898. try:
  899. if stream:
  900. stream.stop_stream()
  901. stream.close()
  902. except Exception as e:
  903. pass
  904. # Wait a bit before trying to reinitialize
  905. time.sleep(1)
  906. if not setup_audio():
  907. logging.error("Failed to reinitialize audio stream. Exiting.")
  908. break
  909. else:
  910. logging.info("Audio stream reinitialized successfully.")
  911. continue
  912. except KeyboardInterrupt:
  913. interrupt_stop_event.set()
  914. logging.debug("Audio data worker process finished due to KeyboardInterrupt")
  915. finally:
  916. # After recording stops, feed any remaining audio data
  917. if buffer:
  918. audio_queue.put(bytes(buffer))
  919. try:
  920. if stream:
  921. stream.stop_stream()
  922. stream.close()
  923. except Exception as e:
  924. pass
  925. if audio_interface:
  926. audio_interface.terminate()
  927. def abort(self):
  928. self.start_recording_on_voice_activity = False
  929. self.stop_recording_on_voice_deactivity = False
  930. self._set_state("inactive")
  931. self.interrupt_stop_event.set()
  932. self.was_interrupted.wait()
  933. self.was_interrupted.clear()
  934. def wait_audio(self):
  935. """
  936. Waits for the start and completion of the audio recording process.
  937. This method is responsible for:
  938. - Waiting for voice activity to begin recording if not yet started.
  939. - Waiting for voice inactivity to complete the recording.
  940. - Setting the audio buffer from the recorded frames.
  941. - Resetting recording-related attributes.
  942. Side effects:
  943. - Updates the state of the instance.
  944. - Modifies the audio attribute to contain the processed audio data.
  945. """
  946. try:
  947. logging.info("Setting listen time")
  948. if self.listen_start == 0:
  949. self.listen_start = time.time()
  950. # If not yet started recording, wait for voice activity to initiate.
  951. if not self.is_recording and not self.frames:
  952. self._set_state("listening")
  953. self.start_recording_on_voice_activity = True
  954. # Wait until recording starts
  955. logging.debug('Waiting for recording start')
  956. while not self.interrupt_stop_event.is_set():
  957. if self.start_recording_event.wait(timeout=0.02):
  958. break
  959. # If recording is ongoing, wait for voice inactivity
  960. # to finish recording.
  961. if self.is_recording:
  962. self.stop_recording_on_voice_deactivity = True
  963. # Wait until recording stops
  964. logging.debug('Waiting for recording stop')
  965. while not self.interrupt_stop_event.is_set():
  966. if (self.stop_recording_event.wait(timeout=0.02)):
  967. break
  968. # Convert recorded frames to the appropriate audio format.
  969. audio_array = np.frombuffer(b''.join(self.frames), dtype=np.int16)
  970. self.audio = audio_array.astype(np.float32) / INT16_MAX_ABS_VALUE
  971. self.frames.clear()
  972. self.new_frames.set()
  973. # Reset recording-related timestamps
  974. self.recording_stop_time = 0
  975. self.listen_start = 0
  976. self._set_state("inactive")
  977. except KeyboardInterrupt:
  978. logging.info("KeyboardInterrupt in wait_audio, shutting down")
  979. self.shutdown()
  980. raise # Re-raise the exception after cleanup
  981. def transcribe(self):
  982. """
  983. Transcribes audio captured by this class instance using the
  984. `faster_whisper` model.
  985. Automatically starts recording upon voice activity if not manually
  986. started using `recorder.start()`.
  987. Automatically stops recording upon voice deactivity if not manually
  988. stopped with `recorder.stop()`.
  989. Processes the recorded audio to generate transcription.
  990. Args:
  991. on_transcription_finished (callable, optional): Callback function
  992. to be executed when transcription is ready.
  993. If provided, transcription will be performed asynchronously,
  994. and the callback will receive the transcription as its argument.
  995. If omitted, the transcription will be performed synchronously,
  996. and the result will be returned.
  997. Returns (if no callback is set):
  998. str: The transcription of the recorded audio.
  999. Raises:
  1000. Exception: If there is an error during the transcription process.
  1001. """
  1002. self._set_state("transcribing")
  1003. audio_copy = copy.deepcopy(self.audio)
  1004. start_time = 0
  1005. with self.transcription_lock:
  1006. try:
  1007. if self.transcribe_count == 0:
  1008. logging.debug("Adding transcription request, no early transcription started")
  1009. start_time = time.time() # Start timing
  1010. self.parent_transcription_pipe.send((audio_copy, self.language))
  1011. self.transcribe_count += 1
  1012. while self.transcribe_count > 0:
  1013. logging.debug(F"Receive from parent_transcription_pipe after sendiung transcription request, transcribe_count: {self.transcribe_count}")
  1014. status, result = self.parent_transcription_pipe.recv()
  1015. self.transcribe_count -= 1
  1016. self.allowed_to_early_transcribe = True
  1017. self._set_state("inactive")
  1018. if status == 'success':
  1019. segments, info = result
  1020. self.detected_language = info.language if info.language_probability > 0 else None
  1021. self.detected_language_probability = info.language_probability
  1022. self.last_transcription_bytes = copy.deepcopy(audio_copy)
  1023. self.last_transcription_bytes_b64 = base64.b64encode(self.last_transcription_bytes.tobytes()).decode('utf-8')
  1024. transcription = self._preprocess_output(segments)
  1025. end_time = time.time() # End timing
  1026. transcription_time = end_time - start_time
  1027. if start_time:
  1028. if self.print_transcription_time:
  1029. print(f"Model {self.main_model_type} completed transcription in {transcription_time:.2f} seconds")
  1030. else:
  1031. logging.debug(f"Model {self.main_model_type} completed transcription in {transcription_time:.2f} seconds")
  1032. return transcription
  1033. else:
  1034. logging.error(f"Transcription error: {result}")
  1035. raise Exception(result)
  1036. except Exception as e:
  1037. logging.error(f"Error during transcription: {str(e)}")
  1038. raise e
  1039. def text(self,
  1040. on_transcription_finished=None,
  1041. ):
  1042. """
  1043. Transcribes audio captured by this class instance
  1044. using the `faster_whisper` model.
  1045. - Automatically starts recording upon voice activity if not manually
  1046. started using `recorder.start()`.
  1047. - Automatically stops recording upon voice deactivity if not manually
  1048. stopped with `recorder.stop()`.
  1049. - Processes the recorded audio to generate transcription.
  1050. Args:
  1051. on_transcription_finished (callable, optional): Callback function
  1052. to be executed when transcription is ready.
  1053. If provided, transcription will be performed asynchronously, and
  1054. the callback will receive the transcription as its argument.
  1055. If omitted, the transcription will be performed synchronously,
  1056. and the result will be returned.
  1057. Returns (if not callback is set):
  1058. str: The transcription of the recorded audio
  1059. """
  1060. self.interrupt_stop_event.clear()
  1061. self.was_interrupted.clear()
  1062. try:
  1063. self.wait_audio()
  1064. except KeyboardInterrupt:
  1065. logging.info("KeyboardInterrupt in text() method")
  1066. self.shutdown()
  1067. raise # Re-raise the exception after cleanup
  1068. if self.is_shut_down or self.interrupt_stop_event.is_set():
  1069. if self.interrupt_stop_event.is_set():
  1070. self.was_interrupted.set()
  1071. return ""
  1072. if on_transcription_finished:
  1073. threading.Thread(target=on_transcription_finished,
  1074. args=(self.transcribe(),)).start()
  1075. else:
  1076. return self.transcribe()
  1077. def start(self):
  1078. """
  1079. Starts recording audio directly without waiting for voice activity.
  1080. """
  1081. # Ensure there's a minimum interval
  1082. # between stopping and starting recording
  1083. if (time.time() - self.recording_stop_time
  1084. < self.min_gap_between_recordings):
  1085. logging.info("Attempted to start recording "
  1086. "too soon after stopping."
  1087. )
  1088. return self
  1089. logging.info("recording started")
  1090. self._set_state("recording")
  1091. self.text_storage = []
  1092. self.realtime_stabilized_text = ""
  1093. self.realtime_stabilized_safetext = ""
  1094. self.frames = []
  1095. self.new_frames.set()
  1096. self.is_recording = True
  1097. self.recording_start_time = time.time()
  1098. self.is_silero_speech_active = False
  1099. self.is_webrtc_speech_active = False
  1100. self.stop_recording_event.clear()
  1101. self.start_recording_event.set()
  1102. if self.on_recording_start:
  1103. self.on_recording_start()
  1104. return self
  1105. def stop(self):
  1106. """
  1107. Stops recording audio.
  1108. """
  1109. # Ensure there's a minimum interval
  1110. # between starting and stopping recording
  1111. if (time.time() - self.recording_start_time
  1112. < self.min_length_of_recording):
  1113. logging.info("Attempted to stop recording "
  1114. "too soon after starting."
  1115. )
  1116. return self
  1117. logging.info("recording stopped")
  1118. self.is_recording = False
  1119. self.recording_stop_time = time.time()
  1120. self.is_silero_speech_active = False
  1121. self.is_webrtc_speech_active = False
  1122. self.silero_check_time = 0
  1123. self.start_recording_event.clear()
  1124. self.stop_recording_event.set()
  1125. if self.on_recording_stop:
  1126. self.on_recording_stop()
  1127. return self
  1128. def listen(self):
  1129. """
  1130. Puts recorder in "listen" state.
  1131. The recorder now "listens" for voice activation.
  1132. Once voice is detected we enter "recording" state.
  1133. """
  1134. self._set_state("listening")
  1135. self.start_recording_on_voice_activity = True
  1136. def feed_audio(self, chunk, original_sample_rate=16000):
  1137. """
  1138. Feed an audio chunk into the processing pipeline. Chunks are
  1139. accumulated until the buffer size is reached, and then the accumulated
  1140. data is fed into the audio_queue.
  1141. """
  1142. # Check if the buffer attribute exists, if not, initialize it
  1143. if not hasattr(self, 'buffer'):
  1144. self.buffer = bytearray()
  1145. # Check if input is a NumPy array
  1146. if isinstance(chunk, np.ndarray):
  1147. # Handle stereo to mono conversion if necessary
  1148. if chunk.ndim == 2:
  1149. chunk = np.mean(chunk, axis=1)
  1150. # Resample to 16000 Hz if necessary
  1151. if original_sample_rate != 16000:
  1152. num_samples = int(len(chunk) * 16000 / original_sample_rate)
  1153. chunk = resample(chunk, num_samples)
  1154. # Ensure data type is int16
  1155. chunk = chunk.astype(np.int16)
  1156. # Convert the NumPy array to bytes
  1157. chunk = chunk.tobytes()
  1158. # Append the chunk to the buffer
  1159. self.buffer += chunk
  1160. buf_size = 2 * self.buffer_size # silero complains if too short
  1161. # Check if the buffer has reached or exceeded the buffer_size
  1162. while len(self.buffer) >= buf_size:
  1163. # Extract self.buffer_size amount of data from the buffer
  1164. to_process = self.buffer[:buf_size]
  1165. self.buffer = self.buffer[buf_size:]
  1166. # Feed the extracted data to the audio_queue
  1167. self.audio_queue.put(to_process)
  1168. def set_microphone(self, microphone_on=True):
  1169. """
  1170. Set the microphone on or off.
  1171. """
  1172. logging.info("Setting microphone to: " + str(microphone_on))
  1173. self.use_microphone.value = microphone_on
  1174. def shutdown(self):
  1175. """
  1176. Safely shuts down the audio recording by stopping the
  1177. recording worker and closing the audio stream.
  1178. """
  1179. with self.shutdown_lock:
  1180. if self.is_shut_down:
  1181. return
  1182. print("\033[91mRealtimeSTT shutting down\033[0m")
  1183. # logging.debug("RealtimeSTT shutting down")
  1184. self.is_shut_down = True
  1185. self.start_recording_event.set()
  1186. self.stop_recording_event.set()
  1187. self.shutdown_event.set()
  1188. self.is_recording = False
  1189. self.is_running = False
  1190. logging.debug('Finishing recording thread')
  1191. if self.recording_thread:
  1192. self.audio_queue.put(bytes(1))
  1193. self.recording_thread.join()
  1194. logging.debug('Terminating reader process')
  1195. # Give it some time to finish the loop and cleanup.
  1196. if self.use_microphone.value:
  1197. self.reader_process.join(timeout=10)
  1198. if self.reader_process.is_alive():
  1199. logging.warning("Reader process did not terminate "
  1200. "in time. Terminating forcefully."
  1201. )
  1202. self.reader_process.terminate()
  1203. logging.debug('Terminating transcription process')
  1204. self.transcript_process.join(timeout=10)
  1205. if self.transcript_process.is_alive():
  1206. logging.warning("Transcript process did not terminate "
  1207. "in time. Terminating forcefully."
  1208. )
  1209. self.transcript_process.terminate()
  1210. self.parent_transcription_pipe.close()
  1211. logging.debug('Finishing realtime thread')
  1212. if self.realtime_thread:
  1213. self.realtime_thread.join()
  1214. if self.enable_realtime_transcription:
  1215. if self.realtime_model_type:
  1216. del self.realtime_model_type
  1217. self.realtime_model_type = None
  1218. gc.collect()
  1219. def _recording_worker(self):
  1220. """
  1221. The main worker method which constantly monitors the audio
  1222. input for voice activity and accordingly starts/stops the recording.
  1223. """
  1224. if self.use_extended_logging:
  1225. logging.debug('Debug: Entering try block')
  1226. last_inner_try_time = 0
  1227. try:
  1228. if self.use_extended_logging:
  1229. logging.debug('Debug: Initializing variables')
  1230. time_since_last_buffer_message = 0
  1231. was_recording = False
  1232. self.allowed_to_early_transcribe = True
  1233. if self.use_extended_logging:
  1234. logging.debug('Debug: Starting main loop')
  1235. # Continuously monitor audio for voice activity
  1236. while self.is_running:
  1237. # if self.use_extended_logging:
  1238. # logging.debug('Debug: Entering inner try block')
  1239. if last_inner_try_time:
  1240. last_processing_time = time.time() - last_inner_try_time
  1241. if last_processing_time > 0.1:
  1242. if self.use_extended_logging:
  1243. logging.warning('### WARNING: PROCESSING TOOK TOO LONG')
  1244. last_inner_try_time = time.time()
  1245. try:
  1246. # if self.use_extended_logging:
  1247. # logging.debug('Debug: Trying to get data from audio queue')
  1248. try:
  1249. data = self.audio_queue.get(timeout=0.01)
  1250. self.last_words_buffer.append(data)
  1251. except queue.Empty:
  1252. # if self.use_extended_logging:
  1253. # logging.debug('Debug: Queue is empty, checking if still running')
  1254. if not self.is_running:
  1255. if self.use_extended_logging:
  1256. logging.debug('Debug: Not running, breaking loop')
  1257. break
  1258. # if self.use_extended_logging:
  1259. # logging.debug('Debug: Continuing to next iteration')
  1260. continue
  1261. if self.use_extended_logging:
  1262. logging.debug('Debug: Checking for on_recorded_chunk callback')
  1263. if self.on_recorded_chunk:
  1264. if self.use_extended_logging:
  1265. logging.debug('Debug: Calling on_recorded_chunk')
  1266. self.on_recorded_chunk(data)
  1267. if self.use_extended_logging:
  1268. logging.debug('Debug: Checking if handle_buffer_overflow is True')
  1269. if self.handle_buffer_overflow:
  1270. if self.use_extended_logging:
  1271. logging.debug('Debug: Handling buffer overflow')
  1272. # Handle queue overflow
  1273. if (self.audio_queue.qsize() >
  1274. self.allowed_latency_limit):
  1275. if self.use_extended_logging:
  1276. logging.debug('Debug: Queue size exceeds limit, logging warnings')
  1277. logging.warning("Audio queue size exceeds "
  1278. "latency limit. Current size: "
  1279. f"{self.audio_queue.qsize()}. "
  1280. "Discarding old audio chunks."
  1281. )
  1282. if self.use_extended_logging:
  1283. logging.debug('Debug: Discarding old chunks if necessary')
  1284. while (self.audio_queue.qsize() >
  1285. self.allowed_latency_limit):
  1286. data = self.audio_queue.get()
  1287. except BrokenPipeError:
  1288. logging.error("BrokenPipeError _recording_worker")
  1289. self.is_running = False
  1290. break
  1291. if self.use_extended_logging:
  1292. logging.debug('Debug: Updating time_since_last_buffer_message')
  1293. # Feed the extracted data to the audio_queue
  1294. if time_since_last_buffer_message:
  1295. time_passed = time.time() - time_since_last_buffer_message
  1296. if time_passed > 1:
  1297. if self.use_extended_logging:
  1298. logging.debug("_recording_worker processing audio data")
  1299. time_since_last_buffer_message = time.time()
  1300. else:
  1301. time_since_last_buffer_message = time.time()
  1302. if self.use_extended_logging:
  1303. logging.debug('Debug: Initializing failed_stop_attempt')
  1304. failed_stop_attempt = False
  1305. if self.use_extended_logging:
  1306. logging.debug('Debug: Checking if not recording')
  1307. if not self.is_recording:
  1308. if self.use_extended_logging:
  1309. logging.debug('Debug: Handling not recording state')
  1310. if self.use_extended_logging:
  1311. logging.debug('Debug: Setting state and spinner text')
  1312. # Set state and spinner text
  1313. if self.use_extended_logging:
  1314. logging.debug('Debug: Checking voice activity conditions')
  1315. # Check for voice activity to
  1316. # trigger the start of recording
  1317. if self.start_recording_on_voice_activity:
  1318. if self.use_extended_logging:
  1319. logging.debug('Debug: Checking if voice is active')
  1320. if self._is_voice_active():
  1321. if self.use_extended_logging:
  1322. logging.debug('Debug: Voice activity detected')
  1323. logging.info("voice activity detected")
  1324. if self.use_extended_logging:
  1325. logging.debug('Debug: Starting recording')
  1326. self.start()
  1327. self.start_recording_on_voice_activity = False
  1328. if self.use_extended_logging:
  1329. logging.debug('Debug: Adding buffered audio to frames')
  1330. # Add the buffered audio
  1331. # to the recording frames
  1332. self.frames.extend(list(self.audio_buffer))
  1333. self.new_frames.set()
  1334. self.audio_buffer.clear()
  1335. if self.use_extended_logging:
  1336. logging.debug('Debug: Resetting Silero VAD model states')
  1337. self.silero_vad_model.reset_states()
  1338. else:
  1339. if self.use_extended_logging:
  1340. logging.debug('Debug: Checking voice activity')
  1341. data_copy = data[:]
  1342. self._check_voice_activity(data_copy)
  1343. if self.use_extended_logging:
  1344. logging.debug('Debug: Resetting speech_end_silence_start')
  1345. self.speech_end_silence_start = 0
  1346. else:
  1347. if self.use_extended_logging:
  1348. logging.debug('Debug: Handling recording state')
  1349. if self.use_extended_logging:
  1350. logging.debug('Debug: Checking if stop_recording_on_voice_deactivity is True')
  1351. # Stop the recording if silence is detected after speech
  1352. if self.stop_recording_on_voice_deactivity:
  1353. if self.use_extended_logging:
  1354. logging.debug('Debug: Determining if speech is detected')
  1355. is_speech = (
  1356. self._is_silero_speech(data) if self.silero_deactivity_detection
  1357. else self._is_webrtc_speech(data, True)
  1358. )
  1359. if self.use_extended_logging:
  1360. logging.debug('Debug: Formatting speech_end_silence_start')
  1361. if not self.speech_end_silence_start:
  1362. str_speech_end_silence_start = "0"
  1363. else:
  1364. str_speech_end_silence_start = datetime.datetime.fromtimestamp(self.speech_end_silence_start).strftime('%H:%M:%S.%f')[:-3]
  1365. if self.use_extended_logging:
  1366. logging.debug(f"is_speech: {is_speech}, str_speech_end_silence_start: {str_speech_end_silence_start}")
  1367. if self.use_extended_logging:
  1368. logging.debug('Debug: Checking if speech is not detected')
  1369. if not is_speech:
  1370. if self.use_extended_logging:
  1371. logging.debug('Debug: Handling voice deactivity')
  1372. # Voice deactivity was detected, so we start
  1373. # measuring silence time before stopping recording
  1374. if self.speech_end_silence_start == 0 and \
  1375. (time.time() - self.recording_start_time > self.min_length_of_recording):
  1376. self.speech_end_silence_start = time.time()
  1377. if self.use_extended_logging:
  1378. logging.debug('Debug: Checking early transcription conditions')
  1379. if self.speech_end_silence_start and self.early_transcription_on_silence and len(self.frames) > 0 and \
  1380. (time.time() - self.speech_end_silence_start > self.early_transcription_on_silence) and \
  1381. self.allowed_to_early_transcribe:
  1382. if self.use_extended_logging:
  1383. logging.debug("Debug:Adding early transcription request")
  1384. self.transcribe_count += 1
  1385. audio_array = np.frombuffer(b''.join(self.frames), dtype=np.int16)
  1386. audio = audio_array.astype(np.float32) / INT16_MAX_ABS_VALUE
  1387. if self.use_extended_logging:
  1388. logging.debug("Debug: early transcription request pipe send")
  1389. self.parent_transcription_pipe.send((audio, self.language))
  1390. if self.use_extended_logging:
  1391. logging.debug("Debug: early transcription request pipe send return")
  1392. self.allowed_to_early_transcribe = False
  1393. else:
  1394. if self.use_extended_logging:
  1395. logging.debug('Debug: Handling speech detection')
  1396. if self.speech_end_silence_start:
  1397. if self.use_extended_logging:
  1398. logging.info("Resetting self.speech_end_silence_start")
  1399. self.speech_end_silence_start = 0
  1400. self.allowed_to_early_transcribe = True
  1401. if self.use_extended_logging:
  1402. logging.debug('Debug: Checking if silence duration exceeds threshold')
  1403. # Wait for silence to stop recording after speech
  1404. if self.speech_end_silence_start and time.time() - \
  1405. self.speech_end_silence_start >= \
  1406. self.post_speech_silence_duration:
  1407. if self.use_extended_logging:
  1408. logging.debug('Debug: Formatting silence start time')
  1409. # Get time in desired format (HH:MM:SS.nnn)
  1410. silence_start_time = datetime.datetime.fromtimestamp(self.speech_end_silence_start).strftime('%H:%M:%S.%f')[:-3]
  1411. if self.use_extended_logging:
  1412. logging.debug('Debug: Calculating time difference')
  1413. # Calculate time difference
  1414. time_diff = time.time() - self.speech_end_silence_start
  1415. if self.use_extended_logging:
  1416. logging.debug('Debug: Logging voice deactivity detection')
  1417. logging.info(f"voice deactivity detected at {silence_start_time}, "
  1418. f"time since silence start: {time_diff:.3f} seconds")
  1419. logging.debug('Debug: Appending data to frames and stopping recording')
  1420. self.frames.append(data)
  1421. self.stop()
  1422. if not self.is_recording:
  1423. if self.use_extended_logging:
  1424. logging.debug('Debug: Resetting speech_end_silence_start')
  1425. self.speech_end_silence_start = 0
  1426. else:
  1427. if self.use_extended_logging:
  1428. logging.debug('Debug: Setting failed_stop_attempt to True')
  1429. failed_stop_attempt = True
  1430. if self.use_extended_logging:
  1431. logging.debug('Debug: Checking if recording stopped')
  1432. if not self.is_recording and was_recording:
  1433. if self.use_extended_logging:
  1434. logging.debug('Debug: Resetting after stopping recording')
  1435. # Reset after stopping recording to ensure clean state
  1436. self.stop_recording_on_voice_deactivity = False
  1437. if self.use_extended_logging:
  1438. logging.debug('Debug: Checking Silero time')
  1439. if time.time() - self.silero_check_time > 0.1:
  1440. self.silero_check_time = 0
  1441. if self.use_extended_logging:
  1442. logging.debug('Debug: Updating was_recording')
  1443. was_recording = self.is_recording
  1444. if self.use_extended_logging:
  1445. logging.debug('Debug: Checking if recording and not failed stop attempt')
  1446. if self.is_recording and not failed_stop_attempt:
  1447. if self.use_extended_logging:
  1448. logging.debug('Debug: Appending data to frames')
  1449. self.frames.append(data)
  1450. self.new_frames.set()
  1451. if self.use_extended_logging:
  1452. logging.debug('Debug: Checking if not recording or speech end silence start')
  1453. if not self.is_recording or self.speech_end_silence_start:
  1454. if self.use_extended_logging:
  1455. logging.debug('Debug: Appending data to audio buffer')
  1456. self.audio_buffer.append(data)
  1457. except Exception as e:
  1458. logging.debug('Debug: Caught exception in main try block')
  1459. if not self.interrupt_stop_event.is_set():
  1460. logging.error(f"Unhandled exeption in _recording_worker: {e}")
  1461. raise
  1462. if self.use_extended_logging:
  1463. logging.debug('Debug: Exiting _recording_worker method')
  1464. def _realtime_worker(self):
  1465. """
  1466. Performs real-time transcription if the feature is enabled.
  1467. The method is responsible transcribing recorded audio frames
  1468. in real-time based on the specified resolution interval.
  1469. The transcribed text is stored in `self.realtime_transcription_text`
  1470. and a callback
  1471. function is invoked with this text if specified.
  1472. """
  1473. try:
  1474. logging.debug('Starting realtime worker')
  1475. # Return immediately if real-time transcription is not enabled
  1476. if not self.enable_realtime_transcription:
  1477. return
  1478. # Continue running as long as the main process is active
  1479. while self.is_running:
  1480. # Check if the recording is active
  1481. if self.is_recording:
  1482. self.new_frames.wait()
  1483. self.new_frames.clear()
  1484. # Sleep for the duration of the transcription resolution
  1485. time.sleep(self.realtime_processing_pause)
  1486. # Convert the buffer frames to a NumPy array
  1487. audio_array = np.frombuffer(
  1488. b''.join(self.frames),
  1489. dtype=np.int16
  1490. )
  1491. logging.debug(f"Current realtime buffer size: {len(audio_array)}")
  1492. # Normalize the array to a [-1, 1] range
  1493. audio_array = audio_array.astype(np.float32) / \
  1494. INT16_MAX_ABS_VALUE
  1495. if self.use_main_model_for_realtime:
  1496. with self.transcription_lock:
  1497. try:
  1498. self.parent_transcription_pipe.send((audio_array, self.language))
  1499. if self.parent_transcription_pipe.poll(timeout=5): # Wait for 5 seconds
  1500. logging.debug("Receive from realtime worker after transcription request to main model")
  1501. status, result = self.parent_transcription_pipe.recv()
  1502. if status == 'success':
  1503. segments, info = result
  1504. self.detected_realtime_language = info.language if info.language_probability > 0 else None
  1505. self.detected_realtime_language_probability = info.language_probability
  1506. realtime_text = segments
  1507. logging.debug(f"Realtime text detected with main model: {realtime_text}")
  1508. else:
  1509. logging.error(f"Realtime transcription error: {result}")
  1510. continue
  1511. else:
  1512. logging.warning("Realtime transcription timed out")
  1513. continue
  1514. except Exception as e:
  1515. logging.error(f"Error in realtime transcription: {str(e)}")
  1516. continue
  1517. else:
  1518. # Perform transcription and assemble the text
  1519. segments, info = self.realtime_model_type.transcribe(
  1520. audio_array,
  1521. language=self.language if self.language else None,
  1522. beam_size=self.beam_size_realtime,
  1523. initial_prompt=self.initial_prompt,
  1524. suppress_tokens=self.suppress_tokens,
  1525. )
  1526. self.detected_realtime_language = info.language if info.language_probability > 0 else None
  1527. self.detected_realtime_language_probability = info.language_probability
  1528. realtime_text = " ".join(
  1529. seg.text for seg in segments
  1530. )
  1531. logging.debug(f"Realtime text detected: {realtime_text}")
  1532. # double check recording state
  1533. # because it could have changed mid-transcription
  1534. if self.is_recording and time.time() - \
  1535. self.recording_start_time > self.init_realtime_after_seconds:
  1536. # logging.debug('Starting realtime transcription')
  1537. self.realtime_transcription_text = realtime_text
  1538. self.realtime_transcription_text = \
  1539. self.realtime_transcription_text.strip()
  1540. self.text_storage.append(
  1541. self.realtime_transcription_text
  1542. )
  1543. # Take the last two texts in storage, if they exist
  1544. if len(self.text_storage) >= 2:
  1545. last_two_texts = self.text_storage[-2:]
  1546. # Find the longest common prefix
  1547. # between the two texts
  1548. prefix = os.path.commonprefix(
  1549. [last_two_texts[0], last_two_texts[1]]
  1550. )
  1551. # This prefix is the text that was transcripted
  1552. # two times in the same way
  1553. # Store as "safely detected text"
  1554. if len(prefix) >= \
  1555. len(self.realtime_stabilized_safetext):
  1556. # Only store when longer than the previous
  1557. # as additional security
  1558. self.realtime_stabilized_safetext = prefix
  1559. # Find parts of the stabilized text
  1560. # in the freshly transcripted text
  1561. matching_pos = self._find_tail_match_in_text(
  1562. self.realtime_stabilized_safetext,
  1563. self.realtime_transcription_text
  1564. )
  1565. if matching_pos < 0:
  1566. if self.realtime_stabilized_safetext:
  1567. self._on_realtime_transcription_stabilized(
  1568. self._preprocess_output(
  1569. self.realtime_stabilized_safetext,
  1570. True
  1571. )
  1572. )
  1573. else:
  1574. self._on_realtime_transcription_stabilized(
  1575. self._preprocess_output(
  1576. self.realtime_transcription_text,
  1577. True
  1578. )
  1579. )
  1580. else:
  1581. # We found parts of the stabilized text
  1582. # in the transcripted text
  1583. # We now take the stabilized text
  1584. # and add only the freshly transcripted part to it
  1585. output_text = self.realtime_stabilized_safetext + \
  1586. self.realtime_transcription_text[matching_pos:]
  1587. # This yields us the "left" text part as stabilized
  1588. # AND at the same time delivers fresh detected
  1589. # parts on the first run without the need for
  1590. # two transcriptions
  1591. self._on_realtime_transcription_stabilized(
  1592. self._preprocess_output(output_text, True)
  1593. )
  1594. # Invoke the callback with the transcribed text
  1595. self._on_realtime_transcription_update(
  1596. self._preprocess_output(
  1597. self.realtime_transcription_text,
  1598. True
  1599. )
  1600. )
  1601. # If not recording, sleep briefly before checking again
  1602. else:
  1603. time.sleep(TIME_SLEEP)
  1604. except Exception as e:
  1605. logging.error(f"Unhandled exeption in _realtime_worker: {e}")
  1606. raise
  1607. def _is_silero_speech(self, chunk):
  1608. """
  1609. Returns true if speech is detected in the provided audio data
  1610. Args:
  1611. data (bytes): raw bytes of audio data (1024 raw bytes with
  1612. 16000 sample rate and 16 bits per sample)
  1613. """
  1614. if self.sample_rate != 16000:
  1615. pcm_data = np.frombuffer(chunk, dtype=np.int16)
  1616. data_16000 = signal.resample_poly(
  1617. pcm_data, 16000, self.sample_rate)
  1618. chunk = data_16000.astype(np.int16).tobytes()
  1619. self.silero_working = True
  1620. audio_chunk = np.frombuffer(chunk, dtype=np.int16)
  1621. audio_chunk = audio_chunk.astype(np.float32) / INT16_MAX_ABS_VALUE
  1622. vad_prob = self.silero_vad_model(
  1623. torch.from_numpy(audio_chunk),
  1624. SAMPLE_RATE).item()
  1625. is_silero_speech_active = vad_prob > (1 - self.silero_sensitivity)
  1626. if is_silero_speech_active:
  1627. if not self.is_silero_speech_active and self.use_extended_logging:
  1628. logging.info(f"{bcolors.OKGREEN}Silero VAD detected speech{bcolors.ENDC}")
  1629. elif self.is_silero_speech_active and self.use_extended_logging:
  1630. logging.info(f"{bcolors.WARNING}Silero VAD detected silence{bcolors.ENDC}")
  1631. self.is_silero_speech_active = is_silero_speech_active
  1632. self.silero_working = False
  1633. return is_silero_speech_active
  1634. def _is_webrtc_speech(self, chunk, all_frames_must_be_true=False):
  1635. """
  1636. Returns true if speech is detected in the provided audio data
  1637. Args:
  1638. data (bytes): raw bytes of audio data (1024 raw bytes with
  1639. 16000 sample rate and 16 bits per sample)
  1640. """
  1641. speech_str = f"{bcolors.OKGREEN}WebRTC VAD detected speech{bcolors.ENDC}"
  1642. silence_str = f"{bcolors.WARNING}WebRTC VAD detected silence{bcolors.ENDC}"
  1643. if self.sample_rate != 16000:
  1644. pcm_data = np.frombuffer(chunk, dtype=np.int16)
  1645. data_16000 = signal.resample_poly(
  1646. pcm_data, 16000, self.sample_rate)
  1647. chunk = data_16000.astype(np.int16).tobytes()
  1648. # Number of audio frames per millisecond
  1649. frame_length = int(16000 * 0.01) # for 10ms frame
  1650. num_frames = int(len(chunk) / (2 * frame_length))
  1651. speech_frames = 0
  1652. for i in range(num_frames):
  1653. start_byte = i * frame_length * 2
  1654. end_byte = start_byte + frame_length * 2
  1655. frame = chunk[start_byte:end_byte]
  1656. if self.webrtc_vad_model.is_speech(frame, 16000):
  1657. speech_frames += 1
  1658. if not all_frames_must_be_true:
  1659. if self.debug_mode:
  1660. logging.info(f"Speech detected in frame {i + 1}"
  1661. f" of {num_frames}")
  1662. if not self.is_webrtc_speech_active and self.use_extended_logging:
  1663. logging.info(speech_str)
  1664. self.is_webrtc_speech_active = True
  1665. return True
  1666. if all_frames_must_be_true:
  1667. if self.debug_mode and speech_frames == num_frames:
  1668. logging.info(f"Speech detected in {speech_frames} of "
  1669. f"{num_frames} frames")
  1670. elif self.debug_mode:
  1671. logging.info(f"Speech not detected in all {num_frames} frames")
  1672. speech_detected = speech_frames == num_frames
  1673. if speech_detected and not self.is_webrtc_speech_active and self.use_extended_logging:
  1674. logging.info(speech_str)
  1675. elif not speech_detected and self.is_webrtc_speech_active and self.use_extended_logging:
  1676. logging.info(silence_str)
  1677. self.is_webrtc_speech_active = speech_detected
  1678. return speech_detected
  1679. else:
  1680. if self.debug_mode:
  1681. logging.info(f"Speech not detected in any of {num_frames} frames")
  1682. if self.is_webrtc_speech_active and self.use_extended_logging:
  1683. logging.info(silence_str)
  1684. self.is_webrtc_speech_active = False
  1685. return False
  1686. def _check_voice_activity(self, data):
  1687. """
  1688. Initiate check if voice is active based on the provided data.
  1689. Args:
  1690. data: The audio data to be checked for voice activity.
  1691. """
  1692. self._is_webrtc_speech(data)
  1693. # First quick performing check for voice activity using WebRTC
  1694. if self.is_webrtc_speech_active:
  1695. if not self.silero_working:
  1696. self.silero_working = True
  1697. # Run the intensive check in a separate thread
  1698. threading.Thread(
  1699. target=self._is_silero_speech,
  1700. args=(data,)).start()
  1701. def clear_audio_queue(self):
  1702. """
  1703. Safely empties the audio queue to ensure no remaining audio
  1704. fragments get processed e.g. after waking up the recorder.
  1705. """
  1706. self.audio_buffer.clear()
  1707. try:
  1708. self.text_storage = []
  1709. self.realtime_stabilized_text = ""
  1710. self.realtime_stabilized_safetext = ""
  1711. self.frames = []
  1712. while True:
  1713. self.audio_queue.get_nowait()
  1714. except:
  1715. # PyTorch's mp.Queue doesn't have a specific Empty exception
  1716. # so we catch any exception that might occur when the queue is empty
  1717. pass
  1718. def _is_voice_active(self):
  1719. """
  1720. Determine if voice is active.
  1721. Returns:
  1722. bool: True if voice is active, False otherwise.
  1723. """
  1724. return self.is_webrtc_speech_active and self.is_silero_speech_active
  1725. def _set_state(self, new_state):
  1726. """
  1727. Update the current state of the recorder and execute
  1728. corresponding state-change callbacks.
  1729. Args:
  1730. new_state (str): The new state to set.
  1731. """
  1732. # Check if the state has actually changed
  1733. if new_state == self.state:
  1734. return
  1735. # Store the current state for later comparison
  1736. old_state = self.state
  1737. # Update to the new state
  1738. self.state = new_state
  1739. # Log the state change
  1740. logging.info(f"State changed from '{old_state}' to '{new_state}'")
  1741. # Execute callbacks based on transitioning FROM a particular state
  1742. if old_state == "listening":
  1743. if self.on_vad_detect_stop:
  1744. self.on_vad_detect_stop()
  1745. # Execute callbacks based on transitioning TO a particular state
  1746. if new_state == "listening":
  1747. if self.on_vad_detect_start:
  1748. self.on_vad_detect_start()
  1749. self._set_spinner("speak now")
  1750. if self.spinner and self.halo:
  1751. self.halo._interval = 250
  1752. elif new_state == "transcribing":
  1753. if self.on_transcription_start:
  1754. self.on_transcription_start()
  1755. self._set_spinner("transcribing")
  1756. if self.spinner and self.halo:
  1757. self.halo._interval = 50
  1758. elif new_state == "recording":
  1759. self._set_spinner("recording")
  1760. if self.spinner and self.halo:
  1761. self.halo._interval = 100
  1762. elif new_state == "inactive":
  1763. if self.spinner and self.halo:
  1764. self.halo.stop()
  1765. self.halo = None
  1766. def _set_spinner(self, text):
  1767. """
  1768. Update the spinner's text or create a new
  1769. spinner with the provided text.
  1770. Args:
  1771. text (str): The text to be displayed alongside the spinner.
  1772. """
  1773. if self.spinner:
  1774. # If the Halo spinner doesn't exist, create and start it
  1775. if self.halo is None:
  1776. self.halo = halo.Halo(text=text)
  1777. self.halo.start()
  1778. # If the Halo spinner already exists, just update the text
  1779. else:
  1780. self.halo.text = text
  1781. def _preprocess_output(self, text, preview=False):
  1782. """
  1783. Preprocesses the output text by removing any leading or trailing
  1784. whitespace, converting all whitespace sequences to a single space
  1785. character, and capitalizing the first character of the text.
  1786. Args:
  1787. text (str): The text to be preprocessed.
  1788. Returns:
  1789. str: The preprocessed text.
  1790. """
  1791. text = re.sub(r'\s+', ' ', text.strip())
  1792. if self.ensure_sentence_starting_uppercase:
  1793. if text:
  1794. text = text[0].upper() + text[1:]
  1795. # Ensure the text ends with a proper punctuation
  1796. # if it ends with an alphanumeric character
  1797. if not preview:
  1798. if self.ensure_sentence_ends_with_period:
  1799. if text and text[-1].isalnum():
  1800. text += '.'
  1801. return text
  1802. def _find_tail_match_in_text(self, text1, text2, length_of_match=10):
  1803. """
  1804. Find the position where the last 'n' characters of text1
  1805. match with a substring in text2.
  1806. This method takes two texts, extracts the last 'n' characters from
  1807. text1 (where 'n' is determined by the variable 'length_of_match'), and
  1808. searches for an occurrence of this substring in text2, starting from
  1809. the end of text2 and moving towards the beginning.
  1810. Parameters:
  1811. - text1 (str): The text containing the substring that we want to find
  1812. in text2.
  1813. - text2 (str): The text in which we want to find the matching
  1814. substring.
  1815. - length_of_match(int): The length of the matching string that we are
  1816. looking for
  1817. Returns:
  1818. int: The position (0-based index) in text2 where the matching
  1819. substring starts. If no match is found or either of the texts is
  1820. too short, returns -1.
  1821. """
  1822. # Check if either of the texts is too short
  1823. if len(text1) < length_of_match or len(text2) < length_of_match:
  1824. return -1
  1825. # The end portion of the first text that we want to compare
  1826. target_substring = text1[-length_of_match:]
  1827. # Loop through text2 from right to left
  1828. for i in range(len(text2) - length_of_match + 1):
  1829. # Extract the substring from text2
  1830. # to compare with the target_substring
  1831. current_substring = text2[len(text2) - i - length_of_match:
  1832. len(text2) - i]
  1833. # Compare the current_substring with the target_substring
  1834. if current_substring == target_substring:
  1835. # Position in text2 where the match starts
  1836. return len(text2) - i
  1837. return -1
  1838. def _on_realtime_transcription_stabilized(self, text):
  1839. """
  1840. Callback method invoked when the real-time transcription stabilizes.
  1841. This method is called internally when the transcription text is
  1842. considered "stable" meaning it's less likely to change significantly
  1843. with additional audio input. It notifies any registered external
  1844. listener about the stabilized text if recording is still ongoing.
  1845. This is particularly useful for applications that need to display
  1846. live transcription results to users and want to highlight parts of the
  1847. transcription that are less likely to change.
  1848. Args:
  1849. text (str): The stabilized transcription text.
  1850. """
  1851. if self.on_realtime_transcription_stabilized:
  1852. if self.is_recording:
  1853. self.on_realtime_transcription_stabilized(text)
  1854. def _on_realtime_transcription_update(self, text):
  1855. """
  1856. Callback method invoked when there's an update in the real-time
  1857. transcription.
  1858. This method is called internally whenever there's a change in the
  1859. transcription text, notifying any registered external listener about
  1860. the update if recording is still ongoing. This provides a mechanism
  1861. for applications to receive and possibly display live transcription
  1862. updates, which could be partial and still subject to change.
  1863. Args:
  1864. text (str): The updated transcription text.
  1865. """
  1866. if self.on_realtime_transcription_update:
  1867. if self.is_recording:
  1868. self.on_realtime_transcription_update(text)
  1869. def __enter__(self):
  1870. """
  1871. Method to setup the context manager protocol.
  1872. This enables the instance to be used in a `with` statement, ensuring
  1873. proper resource management. When the `with` block is entered, this
  1874. method is automatically called.
  1875. Returns:
  1876. self: The current instance of the class.
  1877. """
  1878. return self
  1879. def __exit__(self, exc_type, exc_value, traceback):
  1880. """
  1881. Method to define behavior when the context manager protocol exits.
  1882. This is called when exiting the `with` block and ensures that any
  1883. necessary cleanup or resource release processes are executed, such as
  1884. shutting down the system properly.
  1885. Args:
  1886. exc_type (Exception or None): The type of the exception that
  1887. caused the context to be exited, if any.
  1888. exc_value (Exception or None): The exception instance that caused
  1889. the context to be exited, if any.
  1890. traceback (Traceback or None): The traceback corresponding to the
  1891. exception, if any.
  1892. """
  1893. self.shutdown()