audio_recorder.py 114 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521
  1. """
  2. The AudioToTextRecorder class in the provided code facilitates
  3. fast speech-to-text transcription.
  4. The class employs the faster_whisper library to transcribe the recorded audio
  5. into text using machine learning models, which can be run either on a GPU or
  6. CPU. Voice activity detection (VAD) is built in, meaning the software can
  7. automatically start or stop recording based on the presence or absence of
  8. speech. It integrates wake word detection through the pvporcupine library,
  9. allowing the software to initiate recording when a specific word or phrase
  10. is spoken. The system provides real-time feedback and can be further
  11. customized.
  12. Features:
  13. - Voice Activity Detection: Automatically starts/stops recording when speech
  14. is detected or when speech ends.
  15. - Wake Word Detection: Starts recording when a specified wake word (or words)
  16. is detected.
  17. - Event Callbacks: Customizable callbacks for when recording starts
  18. or finishes.
  19. - Fast Transcription: Returns the transcribed text from the audio as fast
  20. as possible.
  21. Author: Kolja Beigel
  22. """
  23. from typing import Iterable, List, Optional, Union
  24. import torch.multiprocessing as mp
  25. import torch
  26. from ctypes import c_bool
  27. from openwakeword.model import Model
  28. from scipy.signal import resample
  29. from scipy import signal
  30. import signal as system_signal
  31. import faster_whisper
  32. import openwakeword
  33. import collections
  34. import numpy as np
  35. import pvporcupine
  36. import traceback
  37. import threading
  38. import webrtcvad
  39. import itertools
  40. import datetime
  41. import platform
  42. import pyaudio
  43. import logging
  44. import struct
  45. import base64
  46. import queue
  47. import halo
  48. import time
  49. import copy
  50. import os
  51. import re
  52. import gc
  53. # Set OpenMP runtime duplicate library handling to OK (Use only for development!)
  54. os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
  55. INIT_MODEL_TRANSCRIPTION = "tiny"
  56. INIT_MODEL_TRANSCRIPTION_REALTIME = "tiny"
  57. INIT_REALTIME_PROCESSING_PAUSE = 0.2
  58. INIT_REALTIME_INITIAL_PAUSE = 0.2
  59. INIT_SILERO_SENSITIVITY = 0.4
  60. INIT_WEBRTC_SENSITIVITY = 3
  61. INIT_POST_SPEECH_SILENCE_DURATION = 0.6
  62. INIT_MIN_LENGTH_OF_RECORDING = 0.5
  63. INIT_MIN_GAP_BETWEEN_RECORDINGS = 0
  64. INIT_WAKE_WORDS_SENSITIVITY = 0.6
  65. INIT_PRE_RECORDING_BUFFER_DURATION = 1.0
  66. INIT_WAKE_WORD_ACTIVATION_DELAY = 0.0
  67. INIT_WAKE_WORD_TIMEOUT = 5.0
  68. INIT_WAKE_WORD_BUFFER_DURATION = 0.1
  69. ALLOWED_LATENCY_LIMIT = 100
  70. TIME_SLEEP = 0.02
  71. SAMPLE_RATE = 16000
  72. BUFFER_SIZE = 512
  73. INT16_MAX_ABS_VALUE = 32768.0
  74. INIT_HANDLE_BUFFER_OVERFLOW = False
  75. if platform.system() != 'Darwin':
  76. INIT_HANDLE_BUFFER_OVERFLOW = True
  77. class TranscriptionWorker:
  78. def __init__(self, conn, stdout_pipe, model_path, compute_type, gpu_device_index, device,
  79. ready_event, shutdown_event, interrupt_stop_event, beam_size, initial_prompt, suppress_tokens):
  80. self.conn = conn
  81. self.stdout_pipe = stdout_pipe
  82. self.model_path = model_path
  83. self.compute_type = compute_type
  84. self.gpu_device_index = gpu_device_index
  85. self.device = device
  86. self.ready_event = ready_event
  87. self.shutdown_event = shutdown_event
  88. self.interrupt_stop_event = interrupt_stop_event
  89. self.beam_size = beam_size
  90. self.initial_prompt = initial_prompt
  91. self.suppress_tokens = suppress_tokens
  92. self.queue = queue.Queue()
  93. def custom_print(self, *args, **kwargs):
  94. message = ' '.join(map(str, args))
  95. try:
  96. self.stdout_pipe.send(message)
  97. except (BrokenPipeError, EOFError, OSError):
  98. pass
  99. def poll_connection(self):
  100. while not self.shutdown_event.is_set():
  101. if self.conn.poll(0.01):
  102. try:
  103. data = self.conn.recv()
  104. self.queue.put(data)
  105. except Exception as e:
  106. logging.error(f"Error receiving data from connection: {e}")
  107. else:
  108. time.sleep(TIME_SLEEP)
  109. def run(self):
  110. if __name__ == "__main__":
  111. system_signal.signal(system_signal.SIGINT, system_signal.SIG_IGN)
  112. __builtins__['print'] = self.custom_print
  113. logging.info(f"Initializing faster_whisper main transcription model {self.model_path}")
  114. try:
  115. model = faster_whisper.WhisperModel(
  116. model_size_or_path=self.model_path,
  117. device=self.device,
  118. compute_type=self.compute_type,
  119. device_index=self.gpu_device_index,
  120. )
  121. except Exception as e:
  122. logging.exception(f"Error initializing main faster_whisper transcription model: {e}")
  123. raise
  124. self.ready_event.set()
  125. logging.debug("Faster_whisper main speech to text transcription model initialized successfully")
  126. # Start the polling thread
  127. polling_thread = threading.Thread(target=self.poll_connection)
  128. polling_thread.start()
  129. try:
  130. while not self.shutdown_event.is_set():
  131. try:
  132. audio, language = self.queue.get(timeout=0.1)
  133. try:
  134. segments, info = model.transcribe(
  135. audio,
  136. language=language if language else None,
  137. beam_size=self.beam_size,
  138. initial_prompt=self.initial_prompt,
  139. suppress_tokens=self.suppress_tokens
  140. )
  141. transcription = " ".join(seg.text for seg in segments).strip()
  142. logging.debug(f"Final text detected with main model: {transcription}")
  143. self.conn.send(('success', (transcription, info)))
  144. except Exception as e:
  145. logging.error(f"General error in transcription: {e}")
  146. self.conn.send(('error', str(e)))
  147. except queue.Empty:
  148. continue
  149. except KeyboardInterrupt:
  150. self.interrupt_stop_event.set()
  151. logging.debug("Transcription worker process finished due to KeyboardInterrupt")
  152. break
  153. except Exception as e:
  154. logging.error(f"General error in processing queue item: {e}")
  155. finally:
  156. __builtins__['print'] = print # Restore the original print function
  157. self.conn.close()
  158. self.stdout_pipe.close()
  159. self.shutdown_event.set() # Ensure the polling thread will stop
  160. polling_thread.join() # Wait for the polling thread to finish
  161. class bcolors:
  162. OKGREEN = '\033[92m' # Green for active speech detection
  163. WARNING = '\033[93m' # Yellow for silence detection
  164. ENDC = '\033[0m' # Reset to default color
  165. class AudioToTextRecorder:
  166. """
  167. A class responsible for capturing audio from the microphone, detecting
  168. voice activity, and then transcribing the captured audio using the
  169. `faster_whisper` model.
  170. """
  171. def __init__(self,
  172. model: str = INIT_MODEL_TRANSCRIPTION,
  173. language: str = "",
  174. compute_type: str = "default",
  175. input_device_index: int = None,
  176. gpu_device_index: Union[int, List[int]] = 0,
  177. device: str = "cuda",
  178. on_recording_start=None,
  179. on_recording_stop=None,
  180. on_transcription_start=None,
  181. ensure_sentence_starting_uppercase=True,
  182. ensure_sentence_ends_with_period=True,
  183. use_microphone=True,
  184. spinner=True,
  185. level=logging.WARNING,
  186. init_logging=True,
  187. # Realtime transcription parameters
  188. enable_realtime_transcription=False,
  189. use_main_model_for_realtime=False,
  190. realtime_model_type=INIT_MODEL_TRANSCRIPTION_REALTIME,
  191. realtime_processing_pause=INIT_REALTIME_PROCESSING_PAUSE,
  192. init_realtime_after_seconds=INIT_REALTIME_INITIAL_PAUSE,
  193. on_realtime_transcription_update=None,
  194. on_realtime_transcription_stabilized=None,
  195. # Voice activation parameters
  196. silero_sensitivity: float = INIT_SILERO_SENSITIVITY,
  197. silero_use_onnx: bool = False,
  198. silero_deactivity_detection: bool = False,
  199. webrtc_sensitivity: int = INIT_WEBRTC_SENSITIVITY,
  200. post_speech_silence_duration: float = (
  201. INIT_POST_SPEECH_SILENCE_DURATION
  202. ),
  203. min_length_of_recording: float = (
  204. INIT_MIN_LENGTH_OF_RECORDING
  205. ),
  206. min_gap_between_recordings: float = (
  207. INIT_MIN_GAP_BETWEEN_RECORDINGS
  208. ),
  209. pre_recording_buffer_duration: float = (
  210. INIT_PRE_RECORDING_BUFFER_DURATION
  211. ),
  212. on_vad_detect_start=None,
  213. on_vad_detect_stop=None,
  214. # Wake word parameters
  215. wakeword_backend: str = "pvporcupine",
  216. openwakeword_model_paths: str = None,
  217. openwakeword_inference_framework: str = "onnx",
  218. wake_words: str = "",
  219. wake_words_sensitivity: float = INIT_WAKE_WORDS_SENSITIVITY,
  220. wake_word_activation_delay: float = (
  221. INIT_WAKE_WORD_ACTIVATION_DELAY
  222. ),
  223. wake_word_timeout: float = INIT_WAKE_WORD_TIMEOUT,
  224. wake_word_buffer_duration: float = INIT_WAKE_WORD_BUFFER_DURATION,
  225. on_wakeword_detected=None,
  226. on_wakeword_timeout=None,
  227. on_wakeword_detection_start=None,
  228. on_wakeword_detection_end=None,
  229. on_recorded_chunk=None,
  230. debug_mode=False,
  231. handle_buffer_overflow: bool = INIT_HANDLE_BUFFER_OVERFLOW,
  232. beam_size: int = 5,
  233. beam_size_realtime: int = 3,
  234. buffer_size: int = BUFFER_SIZE,
  235. sample_rate: int = SAMPLE_RATE,
  236. initial_prompt: Optional[Union[str, Iterable[int]]] = None,
  237. suppress_tokens: Optional[List[int]] = [-1],
  238. print_transcription_time: bool = False,
  239. early_transcription_on_silence: int = 0,
  240. allowed_latency_limit: int = ALLOWED_LATENCY_LIMIT,
  241. no_log_file: bool = False,
  242. use_extended_logging: bool = False,
  243. ):
  244. """
  245. Initializes an audio recorder and transcription
  246. and wake word detection.
  247. Args:
  248. - model (str, default="tiny"): Specifies the size of the transcription
  249. model to use or the path to a converted model directory.
  250. Valid options are 'tiny', 'tiny.en', 'base', 'base.en',
  251. 'small', 'small.en', 'medium', 'medium.en', 'large-v1',
  252. 'large-v2'.
  253. If a specific size is provided, the model is downloaded
  254. from the Hugging Face Hub.
  255. - language (str, default=""): Language code for speech-to-text engine.
  256. If not specified, the model will attempt to detect the language
  257. automatically.
  258. - compute_type (str, default="default"): Specifies the type of
  259. computation to be used for transcription.
  260. See https://opennmt.net/CTranslate2/quantization.html.
  261. - input_device_index (int, default=0): The index of the audio input
  262. device to use.
  263. - gpu_device_index (int, default=0): Device ID to use.
  264. The model can also be loaded on multiple GPUs by passing a list of
  265. IDs (e.g. [0, 1, 2, 3]). In that case, multiple transcriptions can
  266. run in parallel when transcribe() is called from multiple Python
  267. threads
  268. - device (str, default="cuda"): Device for model to use. Can either be
  269. "cuda" or "cpu".
  270. - on_recording_start (callable, default=None): Callback function to be
  271. called when recording of audio to be transcripted starts.
  272. - on_recording_stop (callable, default=None): Callback function to be
  273. called when recording of audio to be transcripted stops.
  274. - on_transcription_start (callable, default=None): Callback function
  275. to be called when transcription of audio to text starts.
  276. - ensure_sentence_starting_uppercase (bool, default=True): Ensures
  277. that every sentence detected by the algorithm starts with an
  278. uppercase letter.
  279. - ensure_sentence_ends_with_period (bool, default=True): Ensures that
  280. every sentence that doesn't end with punctuation such as "?", "!"
  281. ends with a period
  282. - use_microphone (bool, default=True): Specifies whether to use the
  283. microphone as the audio input source. If set to False, the
  284. audio input source will be the audio data sent through the
  285. feed_audio() method.
  286. - spinner (bool, default=True): Show spinner animation with current
  287. state.
  288. - level (int, default=logging.WARNING): Logging level.
  289. - init_logging (bool, default=True): Whether to initialize
  290. the logging framework. Set to False to manage this yourself.
  291. - enable_realtime_transcription (bool, default=False): Enables or
  292. disables real-time transcription of audio. When set to True, the
  293. audio will be transcribed continuously as it is being recorded.
  294. - use_main_model_for_realtime (str, default=False):
  295. If True, use the main transcription model for both regular and
  296. real-time transcription. If False, use a separate model specified
  297. by realtime_model_type for real-time transcription.
  298. Using a single model can save memory and potentially improve
  299. performance, but may not be optimized for real-time processing.
  300. Using separate models allows for a smaller, faster model for
  301. real-time transcription while keeping a more accurate model for
  302. final transcription.
  303. - realtime_model_type (str, default="tiny"): Specifies the machine
  304. learning model to be used for real-time transcription. Valid
  305. options include 'tiny', 'tiny.en', 'base', 'base.en', 'small',
  306. 'small.en', 'medium', 'medium.en', 'large-v1', 'large-v2'.
  307. - realtime_processing_pause (float, default=0.1): Specifies the time
  308. interval in seconds after a chunk of audio gets transcribed. Lower
  309. values will result in more "real-time" (frequent) transcription
  310. updates but may increase computational load.
  311. - init_realtime_after_seconds (float, default=0.2): Specifies the
  312. initial waiting time after the recording was initiated before
  313. yielding the first realtime transcription
  314. - on_realtime_transcription_update = A callback function that is
  315. triggered whenever there's an update in the real-time
  316. transcription. The function is called with the newly transcribed
  317. text as its argument.
  318. - on_realtime_transcription_stabilized = A callback function that is
  319. triggered when the transcribed text stabilizes in quality. The
  320. stabilized text is generally more accurate but may arrive with a
  321. slight delay compared to the regular real-time updates.
  322. - silero_sensitivity (float, default=SILERO_SENSITIVITY): Sensitivity
  323. for the Silero Voice Activity Detection model ranging from 0
  324. (least sensitive) to 1 (most sensitive). Default is 0.5.
  325. - silero_use_onnx (bool, default=False): Enables usage of the
  326. pre-trained model from Silero in the ONNX (Open Neural Network
  327. Exchange) format instead of the PyTorch format. This is
  328. recommended for faster performance.
  329. - silero_deactivity_detection (bool, default=False): Enables the Silero
  330. model for end-of-speech detection. More robust against background
  331. noise. Utilizes additional GPU resources but improves accuracy in
  332. noisy environments. When False, uses the default WebRTC VAD,
  333. which is more sensitive but may continue recording longer due
  334. to background sounds.
  335. - webrtc_sensitivity (int, default=WEBRTC_SENSITIVITY): Sensitivity
  336. for the WebRTC Voice Activity Detection engine ranging from 0
  337. (least aggressive / most sensitive) to 3 (most aggressive,
  338. least sensitive). Default is 3.
  339. - post_speech_silence_duration (float, default=0.2): Duration in
  340. seconds of silence that must follow speech before the recording
  341. is considered to be completed. This ensures that any brief
  342. pauses during speech don't prematurely end the recording.
  343. - min_gap_between_recordings (float, default=1.0): Specifies the
  344. minimum time interval in seconds that should exist between the
  345. end of one recording session and the beginning of another to
  346. prevent rapid consecutive recordings.
  347. - min_length_of_recording (float, default=1.0): Specifies the minimum
  348. duration in seconds that a recording session should last to ensure
  349. meaningful audio capture, preventing excessively short or
  350. fragmented recordings.
  351. - pre_recording_buffer_duration (float, default=0.2): Duration in
  352. seconds for the audio buffer to maintain pre-roll audio
  353. (compensates speech activity detection latency)
  354. - on_vad_detect_start (callable, default=None): Callback function to
  355. be called when the system listens for voice activity.
  356. - on_vad_detect_stop (callable, default=None): Callback function to be
  357. called when the system stops listening for voice activity.
  358. - wakeword_backend (str, default="pvporcupine"): Specifies the backend
  359. library to use for wake word detection. Supported options include
  360. 'pvporcupine' for using the Porcupine wake word engine or 'oww' for
  361. using the OpenWakeWord engine.
  362. - openwakeword_model_paths (str, default=None): Comma-separated paths
  363. to model files for the openwakeword library. These paths point to
  364. custom models that can be used for wake word detection when the
  365. openwakeword library is selected as the wakeword_backend.
  366. - openwakeword_inference_framework (str, default="onnx"): Specifies
  367. the inference framework to use with the openwakeword library.
  368. Can be either 'onnx' for Open Neural Network Exchange format
  369. or 'tflite' for TensorFlow Lite.
  370. - wake_words (str, default=""): Comma-separated string of wake words to
  371. initiate recording when using the 'pvporcupine' wakeword backend.
  372. Supported wake words include: 'alexa', 'americano', 'blueberry',
  373. 'bumblebee', 'computer', 'grapefruits', 'grasshopper', 'hey google',
  374. 'hey siri', 'jarvis', 'ok google', 'picovoice', 'porcupine',
  375. 'terminator'. For the 'openwakeword' backend, wake words are
  376. automatically extracted from the provided model files, so specifying
  377. them here is not necessary.
  378. - wake_words_sensitivity (float, default=0.5): Sensitivity for wake
  379. word detection, ranging from 0 (least sensitive) to 1 (most
  380. sensitive). Default is 0.5.
  381. - wake_word_activation_delay (float, default=0): Duration in seconds
  382. after the start of monitoring before the system switches to wake
  383. word activation if no voice is initially detected. If set to
  384. zero, the system uses wake word activation immediately.
  385. - wake_word_timeout (float, default=5): Duration in seconds after a
  386. wake word is recognized. If no subsequent voice activity is
  387. detected within this window, the system transitions back to an
  388. inactive state, awaiting the next wake word or voice activation.
  389. - wake_word_buffer_duration (float, default=0.1): Duration in seconds
  390. to buffer audio data during wake word detection. This helps in
  391. cutting out the wake word from the recording buffer so it does not
  392. falsely get detected along with the following spoken text, ensuring
  393. cleaner and more accurate transcription start triggers.
  394. Increase this if parts of the wake word get detected as text.
  395. - on_wakeword_detected (callable, default=None): Callback function to
  396. be called when a wake word is detected.
  397. - on_wakeword_timeout (callable, default=None): Callback function to
  398. be called when the system goes back to an inactive state after when
  399. no speech was detected after wake word activation
  400. - on_wakeword_detection_start (callable, default=None): Callback
  401. function to be called when the system starts to listen for wake
  402. words
  403. - on_wakeword_detection_end (callable, default=None): Callback
  404. function to be called when the system stops to listen for
  405. wake words (e.g. because of timeout or wake word detected)
  406. - on_recorded_chunk (callable, default=None): Callback function to be
  407. called when a chunk of audio is recorded. The function is called
  408. with the recorded audio chunk as its argument.
  409. - debug_mode (bool, default=False): If set to True, the system will
  410. print additional debug information to the console.
  411. - handle_buffer_overflow (bool, default=True): If set to True, the system
  412. will log a warning when an input overflow occurs during recording and
  413. remove the data from the buffer.
  414. - beam_size (int, default=5): The beam size to use for beam search
  415. decoding.
  416. - beam_size_realtime (int, default=3): The beam size to use for beam
  417. search decoding in the real-time transcription model.
  418. - buffer_size (int, default=512): The buffer size to use for audio
  419. recording. Changing this may break functionality.
  420. - sample_rate (int, default=16000): The sample rate to use for audio
  421. recording. Changing this will very probably functionality (as the
  422. WebRTC VAD model is very sensitive towards the sample rate).
  423. - initial_prompt (str or iterable of int, default=None): Initial
  424. prompt to be fed to the transcription models.
  425. - suppress_tokens (list of int, default=[-1]): Tokens to be suppressed
  426. from the transcription output.
  427. - print_transcription_time (bool, default=False): Logs processing time
  428. of main model transcription
  429. - early_transcription_on_silence (int, default=0): If set, the
  430. system will transcribe audio faster when silence is detected.
  431. Transcription will start after the specified milliseconds, so
  432. keep this value lower than post_speech_silence_duration.
  433. Ideally around post_speech_silence_duration minus the estimated
  434. transcription time with the main model.
  435. If silence lasts longer than post_speech_silence_duration, the
  436. recording is stopped, and the transcription is submitted. If
  437. voice activity resumes within this period, the transcription
  438. is discarded. Results in faster final transcriptions to the cost
  439. of additional GPU load due to some unnecessary final transcriptions.
  440. - allowed_latency_limit (int, default=100): Maximal amount of chunks
  441. that can be unprocessed in queue before discarding chunks.
  442. - no_log_file (bool, default=False): Skips writing of debug log file.
  443. - use_extended_logging (bool, default=False): Writes extensive
  444. log messages for the recording worker, that processes the audio
  445. chunks.
  446. Raises:
  447. Exception: Errors related to initializing transcription
  448. model, wake word detection, or audio recording.
  449. """
  450. self.language = language
  451. self.compute_type = compute_type
  452. self.input_device_index = input_device_index
  453. self.gpu_device_index = gpu_device_index
  454. self.device = device
  455. self.wake_words = wake_words
  456. self.wake_word_activation_delay = wake_word_activation_delay
  457. self.wake_word_timeout = wake_word_timeout
  458. self.wake_word_buffer_duration = wake_word_buffer_duration
  459. self.ensure_sentence_starting_uppercase = (
  460. ensure_sentence_starting_uppercase
  461. )
  462. self.ensure_sentence_ends_with_period = (
  463. ensure_sentence_ends_with_period
  464. )
  465. self.use_microphone = mp.Value(c_bool, use_microphone)
  466. self.min_gap_between_recordings = min_gap_between_recordings
  467. self.min_length_of_recording = min_length_of_recording
  468. self.pre_recording_buffer_duration = pre_recording_buffer_duration
  469. self.post_speech_silence_duration = post_speech_silence_duration
  470. self.on_recording_start = on_recording_start
  471. self.on_recording_stop = on_recording_stop
  472. self.on_wakeword_detected = on_wakeword_detected
  473. self.on_wakeword_timeout = on_wakeword_timeout
  474. self.on_vad_detect_start = on_vad_detect_start
  475. self.on_vad_detect_stop = on_vad_detect_stop
  476. self.on_wakeword_detection_start = on_wakeword_detection_start
  477. self.on_wakeword_detection_end = on_wakeword_detection_end
  478. self.on_recorded_chunk = on_recorded_chunk
  479. self.on_transcription_start = on_transcription_start
  480. self.enable_realtime_transcription = enable_realtime_transcription
  481. self.use_main_model_for_realtime = use_main_model_for_realtime
  482. self.main_model_type = model
  483. self.realtime_model_type = realtime_model_type
  484. self.realtime_processing_pause = realtime_processing_pause
  485. self.init_realtime_after_seconds = init_realtime_after_seconds
  486. self.on_realtime_transcription_update = (
  487. on_realtime_transcription_update
  488. )
  489. self.on_realtime_transcription_stabilized = (
  490. on_realtime_transcription_stabilized
  491. )
  492. self.debug_mode = debug_mode
  493. self.handle_buffer_overflow = handle_buffer_overflow
  494. self.beam_size = beam_size
  495. self.beam_size_realtime = beam_size_realtime
  496. self.allowed_latency_limit = allowed_latency_limit
  497. self.level = level
  498. self.audio_queue = mp.Queue()
  499. self.buffer_size = buffer_size
  500. self.sample_rate = sample_rate
  501. self.recording_start_time = 0
  502. self.recording_stop_time = 0
  503. self.wake_word_detect_time = 0
  504. self.silero_check_time = 0
  505. self.silero_working = False
  506. self.speech_end_silence_start = 0
  507. self.silero_sensitivity = silero_sensitivity
  508. self.silero_deactivity_detection = silero_deactivity_detection
  509. self.listen_start = 0
  510. self.spinner = spinner
  511. self.halo = None
  512. self.state = "inactive"
  513. self.wakeword_detected = False
  514. self.text_storage = []
  515. self.realtime_stabilized_text = ""
  516. self.realtime_stabilized_safetext = ""
  517. self.is_webrtc_speech_active = False
  518. self.is_silero_speech_active = False
  519. self.recording_thread = None
  520. self.realtime_thread = None
  521. self.audio_interface = None
  522. self.audio = None
  523. self.stream = None
  524. self.start_recording_event = threading.Event()
  525. self.stop_recording_event = threading.Event()
  526. self.last_transcription_bytes = None
  527. self.last_transcription_bytes_b64 = None
  528. self.initial_prompt = initial_prompt
  529. self.suppress_tokens = suppress_tokens
  530. self.use_wake_words = wake_words or wakeword_backend in {'oww', 'openwakeword', 'openwakewords'}
  531. self.detected_language = None
  532. self.detected_language_probability = 0
  533. self.detected_realtime_language = None
  534. self.detected_realtime_language_probability = 0
  535. self.transcription_lock = threading.Lock()
  536. self.shutdown_lock = threading.Lock()
  537. self.transcribe_count = 0
  538. self.print_transcription_time = print_transcription_time
  539. self.early_transcription_on_silence = early_transcription_on_silence
  540. self.use_extended_logging = use_extended_logging
  541. if init_logging:
  542. # Initialize the logging configuration with the specified level
  543. log_format = 'RealTimeSTT: %(name)s - %(levelname)s - %(message)s'
  544. # Adjust file_log_format to include milliseconds
  545. file_log_format = '%(asctime)s.%(msecs)03d - ' + log_format
  546. # Get the root logger
  547. logger = logging.getLogger()
  548. logger.setLevel(logging.DEBUG) # Set the root logger's level to DEBUG
  549. # Remove any existing handlers
  550. logger.handlers = []
  551. # Create a console handler and set its level
  552. console_handler = logging.StreamHandler()
  553. console_handler.setLevel(level)
  554. console_handler.setFormatter(logging.Formatter(log_format))
  555. # Add the handlers to the logger
  556. if not no_log_file:
  557. # Create a file handler and set its level
  558. file_handler = logging.FileHandler('realtimesst.log')
  559. file_handler.setLevel(logging.DEBUG)
  560. file_handler.setFormatter(logging.Formatter(
  561. file_log_format,
  562. datefmt='%Y-%m-%d %H:%M:%S'
  563. ))
  564. logger.addHandler(file_handler)
  565. logger.addHandler(console_handler)
  566. self.is_shut_down = False
  567. self.shutdown_event = mp.Event()
  568. try:
  569. # Only set the start method if it hasn't been set already
  570. if mp.get_start_method(allow_none=True) is None:
  571. mp.set_start_method("spawn")
  572. except RuntimeError as e:
  573. logging.info(f"Start method has already been set. Details: {e}")
  574. logging.info("Starting RealTimeSTT")
  575. if use_extended_logging:
  576. logging.info("RealtimeSTT was called with these parameters:")
  577. for param, value in locals().items():
  578. logging.info(f"{param}: {value}")
  579. self.interrupt_stop_event = mp.Event()
  580. self.was_interrupted = mp.Event()
  581. self.main_transcription_ready_event = mp.Event()
  582. self.parent_transcription_pipe, child_transcription_pipe = mp.Pipe()
  583. self.parent_stdout_pipe, child_stdout_pipe = mp.Pipe()
  584. # Set device for model
  585. self.device = "cuda" if self.device == "cuda" and torch.cuda.is_available() else "cpu"
  586. self.transcript_process = self._start_thread(
  587. target=AudioToTextRecorder._transcription_worker,
  588. args=(
  589. child_transcription_pipe,
  590. child_stdout_pipe,
  591. model,
  592. self.compute_type,
  593. self.gpu_device_index,
  594. self.device,
  595. self.main_transcription_ready_event,
  596. self.shutdown_event,
  597. self.interrupt_stop_event,
  598. self.beam_size,
  599. self.initial_prompt,
  600. self.suppress_tokens
  601. )
  602. )
  603. # Start audio data reading process
  604. if self.use_microphone.value:
  605. logging.info("Initializing audio recording"
  606. " (creating pyAudio input stream,"
  607. f" sample rate: {self.sample_rate}"
  608. f" buffer size: {self.buffer_size}"
  609. )
  610. self.reader_process = self._start_thread(
  611. target=AudioToTextRecorder._audio_data_worker,
  612. args=(
  613. self.audio_queue,
  614. self.sample_rate,
  615. self.buffer_size,
  616. self.input_device_index,
  617. self.shutdown_event,
  618. self.interrupt_stop_event,
  619. self.use_microphone
  620. )
  621. )
  622. # Initialize the realtime transcription model
  623. if self.enable_realtime_transcription and not self.use_main_model_for_realtime:
  624. try:
  625. logging.info("Initializing faster_whisper realtime "
  626. f"transcription model {self.realtime_model_type}"
  627. )
  628. self.realtime_model_type = faster_whisper.WhisperModel(
  629. model_size_or_path=self.realtime_model_type,
  630. device=self.device,
  631. compute_type=self.compute_type,
  632. device_index=self.gpu_device_index
  633. )
  634. except Exception as e:
  635. logging.exception("Error initializing faster_whisper "
  636. f"realtime transcription model: {e}"
  637. )
  638. raise
  639. logging.debug("Faster_whisper realtime speech to text "
  640. "transcription model initialized successfully")
  641. # Setup wake word detection
  642. if wake_words or wakeword_backend in {'oww', 'openwakeword', 'openwakewords'}:
  643. self.wakeword_backend = wakeword_backend
  644. self.wake_words_list = [
  645. word.strip() for word in wake_words.lower().split(',')
  646. ]
  647. self.wake_words_sensitivity = wake_words_sensitivity
  648. self.wake_words_sensitivities = [
  649. float(wake_words_sensitivity)
  650. for _ in range(len(self.wake_words_list))
  651. ]
  652. if self.wakeword_backend in {'pvp', 'pvporcupine'}:
  653. try:
  654. self.porcupine = pvporcupine.create(
  655. keywords=self.wake_words_list,
  656. sensitivities=self.wake_words_sensitivities
  657. )
  658. self.buffer_size = self.porcupine.frame_length
  659. self.sample_rate = self.porcupine.sample_rate
  660. except Exception as e:
  661. logging.exception(
  662. "Error initializing porcupine "
  663. f"wake word detection engine: {e}"
  664. )
  665. raise
  666. logging.debug(
  667. "Porcupine wake word detection engine initialized successfully"
  668. )
  669. elif self.wakeword_backend in {'oww', 'openwakeword', 'openwakewords'}:
  670. openwakeword.utils.download_models()
  671. try:
  672. if openwakeword_model_paths:
  673. model_paths = openwakeword_model_paths.split(',')
  674. self.owwModel = Model(
  675. wakeword_models=model_paths,
  676. inference_framework=openwakeword_inference_framework
  677. )
  678. logging.info(
  679. "Successfully loaded wakeword model(s): "
  680. f"{openwakeword_model_paths}"
  681. )
  682. else:
  683. self.owwModel = Model(
  684. inference_framework=openwakeword_inference_framework)
  685. self.oww_n_models = len(self.owwModel.models.keys())
  686. if not self.oww_n_models:
  687. logging.error(
  688. "No wake word models loaded."
  689. )
  690. for model_key in self.owwModel.models.keys():
  691. logging.info(
  692. "Successfully loaded openwakeword model: "
  693. f"{model_key}"
  694. )
  695. except Exception as e:
  696. logging.exception(
  697. "Error initializing openwakeword "
  698. f"wake word detection engine: {e}"
  699. )
  700. raise
  701. logging.debug(
  702. "Open wake word detection engine initialized successfully"
  703. )
  704. else:
  705. logging.exception(f"Wakeword engine {self.wakeword_backend} unknown/unsupported. Please specify one of: pvporcupine, openwakeword.")
  706. # Setup voice activity detection model WebRTC
  707. try:
  708. logging.info("Initializing WebRTC voice with "
  709. f"Sensitivity {webrtc_sensitivity}"
  710. )
  711. self.webrtc_vad_model = webrtcvad.Vad()
  712. self.webrtc_vad_model.set_mode(webrtc_sensitivity)
  713. except Exception as e:
  714. logging.exception("Error initializing WebRTC voice "
  715. f"activity detection engine: {e}"
  716. )
  717. raise
  718. logging.debug("WebRTC VAD voice activity detection "
  719. "engine initialized successfully"
  720. )
  721. # Setup voice activity detection model Silero VAD
  722. try:
  723. self.silero_vad_model, _ = torch.hub.load(
  724. repo_or_dir="snakers4/silero-vad",
  725. model="silero_vad",
  726. verbose=False,
  727. onnx=silero_use_onnx
  728. )
  729. except Exception as e:
  730. logging.exception(f"Error initializing Silero VAD "
  731. f"voice activity detection engine: {e}"
  732. )
  733. raise
  734. logging.debug("Silero VAD voice activity detection "
  735. "engine initialized successfully"
  736. )
  737. self.audio_buffer = collections.deque(
  738. maxlen=int((self.sample_rate // self.buffer_size) *
  739. self.pre_recording_buffer_duration)
  740. )
  741. self.last_words_buffer = collections.deque(
  742. maxlen=int((self.sample_rate // self.buffer_size) *
  743. 0.3)
  744. )
  745. self.frames = []
  746. # Recording control flags
  747. self.is_recording = False
  748. self.is_running = True
  749. self.start_recording_on_voice_activity = False
  750. self.stop_recording_on_voice_deactivity = False
  751. # Start the recording worker thread
  752. self.recording_thread = threading.Thread(target=self._recording_worker)
  753. self.recording_thread.daemon = True
  754. self.recording_thread.start()
  755. # Start the realtime transcription worker thread
  756. self.realtime_thread = threading.Thread(target=self._realtime_worker)
  757. self.realtime_thread.daemon = True
  758. self.realtime_thread.start()
  759. # Wait for transcription models to start
  760. logging.debug('Waiting for main transcription model to start')
  761. self.main_transcription_ready_event.wait()
  762. logging.debug('Main transcription model ready')
  763. self.stdout_thread = threading.Thread(target=self._read_stdout)
  764. self.stdout_thread.daemon = True
  765. self.stdout_thread.start()
  766. logging.debug('RealtimeSTT initialization completed successfully')
  767. def _start_thread(self, target=None, args=()):
  768. """
  769. Implement a consistent threading model across the library.
  770. This method is used to start any thread in this library. It uses the
  771. standard threading. Thread for Linux and for all others uses the pytorch
  772. MultiProcessing library 'Process'.
  773. Args:
  774. target (callable object): is the callable object to be invoked by
  775. the run() method. Defaults to None, meaning nothing is called.
  776. args (tuple): is a list or tuple of arguments for the target
  777. invocation. Defaults to ().
  778. """
  779. if (platform.system() == 'Linux'):
  780. thread = threading.Thread(target=target, args=args)
  781. thread.deamon = True
  782. thread.start()
  783. return thread
  784. else:
  785. thread = mp.Process(target=target, args=args)
  786. thread.start()
  787. return thread
  788. def _read_stdout(self):
  789. while not self.shutdown_event.is_set():
  790. try:
  791. if self.parent_stdout_pipe.poll(0.1):
  792. logging.debug("Receive from stdout pipe")
  793. message = self.parent_stdout_pipe.recv()
  794. logging.info(message)
  795. except (BrokenPipeError, EOFError, OSError):
  796. # The pipe probably has been closed, so we ignore the error
  797. pass
  798. except KeyboardInterrupt: # handle manual interruption (Ctrl+C)
  799. logging.info("KeyboardInterrupt in read from stdout detected, exiting...")
  800. break
  801. except Exception as e:
  802. logging.error(f"Unexpected error in read from stdout: {e}")
  803. logging.error(traceback.format_exc()) # Log the full traceback here
  804. break
  805. time.sleep(0.1)
  806. def _transcription_worker(*args, **kwargs):
  807. worker = TranscriptionWorker(*args, **kwargs)
  808. worker.run()
  809. @staticmethod
  810. def _audio_data_worker(audio_queue,
  811. target_sample_rate,
  812. buffer_size,
  813. input_device_index,
  814. shutdown_event,
  815. interrupt_stop_event,
  816. use_microphone):
  817. """
  818. Worker method that handles the audio recording process.
  819. This method runs in a separate process and is responsible for:
  820. - Setting up the audio input stream for recording at the highest possible sample rate.
  821. - Continuously reading audio data from the input stream, resampling if necessary,
  822. preprocessing the data, and placing complete chunks in a queue.
  823. - Handling errors during the recording process.
  824. - Gracefully terminating the recording process when a shutdown event is set.
  825. Args:
  826. audio_queue (queue.Queue): A queue where recorded audio data is placed.
  827. target_sample_rate (int): The desired sample rate for the output audio (for Silero VAD).
  828. buffer_size (int): The number of samples expected by the Silero VAD model.
  829. input_device_index (int): The index of the audio input device.
  830. shutdown_event (threading.Event): An event that, when set, signals this worker method to terminate.
  831. interrupt_stop_event (threading.Event): An event to signal keyboard interrupt.
  832. use_microphone (multiprocessing.Value): A shared value indicating whether to use the microphone.
  833. Raises:
  834. Exception: If there is an error while initializing the audio recording.
  835. """
  836. import pyaudio
  837. import numpy as np
  838. from scipy import signal
  839. if __name__ == '__main__':
  840. system_signal.signal(system_signal.SIGINT, system_signal.SIG_IGN)
  841. def get_highest_sample_rate(audio_interface, device_index):
  842. """Get the highest supported sample rate for the specified device."""
  843. try:
  844. device_info = audio_interface.get_device_info_by_index(device_index)
  845. max_rate = int(device_info['defaultSampleRate'])
  846. if 'supportedSampleRates' in device_info:
  847. supported_rates = [int(rate) for rate in device_info['supportedSampleRates']]
  848. if supported_rates:
  849. max_rate = max(supported_rates)
  850. return max_rate
  851. except Exception as e:
  852. logging.warning(f"Failed to get highest sample rate: {e}")
  853. return 48000 # Fallback to a common high sample rate
  854. def initialize_audio_stream(audio_interface, sample_rate, chunk_size):
  855. nonlocal input_device_index
  856. def validate_device(device_index):
  857. """Validate that the device exists and is actually available for input."""
  858. try:
  859. device_info = audio_interface.get_device_info_by_index(device_index)
  860. if not device_info.get('maxInputChannels', 0) > 0:
  861. return False
  862. # Try to actually read from the device
  863. test_stream = audio_interface.open(
  864. format=pyaudio.paInt16,
  865. channels=1,
  866. rate=target_sample_rate,
  867. input=True,
  868. frames_per_buffer=chunk_size,
  869. input_device_index=device_index,
  870. start=False # Don't start the stream yet
  871. )
  872. # Start the stream and try to read from it
  873. test_stream.start_stream()
  874. test_data = test_stream.read(chunk_size, exception_on_overflow=False)
  875. test_stream.stop_stream()
  876. test_stream.close()
  877. # Check if we got valid data
  878. if len(test_data) == 0:
  879. return False
  880. return True
  881. except Exception as e:
  882. logging.debug(f"Device validation failed: {e}")
  883. return False
  884. """Initialize the audio stream with error handling."""
  885. while not shutdown_event.is_set():
  886. try:
  887. # First, get a list of all available input devices
  888. input_devices = []
  889. for i in range(audio_interface.get_device_count()):
  890. try:
  891. device_info = audio_interface.get_device_info_by_index(i)
  892. if device_info.get('maxInputChannels', 0) > 0:
  893. input_devices.append(i)
  894. except Exception:
  895. continue
  896. if not input_devices:
  897. raise Exception("No input devices found")
  898. # If input_device_index is None or invalid, try to find a working device
  899. if input_device_index is None or input_device_index not in input_devices:
  900. # First try the default device
  901. try:
  902. default_device = audio_interface.get_default_input_device_info()
  903. if validate_device(default_device['index']):
  904. input_device_index = default_device['index']
  905. except Exception:
  906. # If default device fails, try other available input devices
  907. for device_index in input_devices:
  908. if validate_device(device_index):
  909. input_device_index = device_index
  910. break
  911. else:
  912. raise Exception("No working input devices found")
  913. # Validate the selected device one final time
  914. if not validate_device(input_device_index):
  915. raise Exception("Selected device validation failed")
  916. # If we get here, we have a validated device
  917. stream = audio_interface.open(
  918. format=pyaudio.paInt16,
  919. channels=1,
  920. rate=sample_rate,
  921. input=True,
  922. frames_per_buffer=chunk_size,
  923. input_device_index=input_device_index,
  924. )
  925. logging.info(f"Microphone connected and validated (input_device_index: {input_device_index})")
  926. return stream
  927. except Exception as e:
  928. logging.error(f"Microphone connection failed: {e}. Retrying...")
  929. input_device_index = None
  930. time.sleep(3) # Wait before retrying
  931. continue
  932. def preprocess_audio(chunk, original_sample_rate, target_sample_rate):
  933. """Preprocess audio chunk similar to feed_audio method."""
  934. if isinstance(chunk, np.ndarray):
  935. # Handle stereo to mono conversion if necessary
  936. if chunk.ndim == 2:
  937. chunk = np.mean(chunk, axis=1)
  938. # Resample to target_sample_rate if necessary
  939. if original_sample_rate != target_sample_rate:
  940. num_samples = int(len(chunk) * target_sample_rate / original_sample_rate)
  941. chunk = signal.resample(chunk, num_samples)
  942. # Ensure data type is int16
  943. chunk = chunk.astype(np.int16)
  944. else:
  945. # If chunk is bytes, convert to numpy array
  946. chunk = np.frombuffer(chunk, dtype=np.int16)
  947. # Resample if necessary
  948. if original_sample_rate != target_sample_rate:
  949. num_samples = int(len(chunk) * target_sample_rate / original_sample_rate)
  950. chunk = signal.resample(chunk, num_samples)
  951. chunk = chunk.astype(np.int16)
  952. return chunk.tobytes()
  953. audio_interface = None
  954. stream = None
  955. device_sample_rate = None
  956. chunk_size = 1024 # Increased chunk size for better performance
  957. def setup_audio():
  958. nonlocal audio_interface, stream, device_sample_rate, input_device_index
  959. try:
  960. if audio_interface is None:
  961. audio_interface = pyaudio.PyAudio()
  962. if input_device_index is None:
  963. try:
  964. default_device = audio_interface.get_default_input_device_info()
  965. input_device_index = default_device['index']
  966. except OSError as e:
  967. input_device_index = None
  968. sample_rates_to_try = [16000] # Try 16000 Hz first
  969. if input_device_index is not None:
  970. highest_rate = get_highest_sample_rate(audio_interface, input_device_index)
  971. if highest_rate != 16000:
  972. sample_rates_to_try.append(highest_rate)
  973. else:
  974. sample_rates_to_try.append(48000) # Fallback sample rate
  975. for rate in sample_rates_to_try:
  976. try:
  977. device_sample_rate = rate
  978. stream = initialize_audio_stream(audio_interface, device_sample_rate, chunk_size)
  979. if stream is not None:
  980. logging.debug(f"Audio recording initialized successfully at {device_sample_rate} Hz, reading {chunk_size} frames at a time")
  981. # logging.error(f"Audio recording initialized successfully at {device_sample_rate} Hz, reading {chunk_size} frames at a time")
  982. return True
  983. except Exception as e:
  984. logging.warning(f"Failed to initialize audio23 stream at {device_sample_rate} Hz: {e}")
  985. continue
  986. # If we reach here, none of the sample rates worked
  987. raise Exception("Failed to initialize audio stream12 with all sample rates.")
  988. except Exception as e:
  989. logging.exception(f"Error initializing pyaudio audio recording: {e}")
  990. if audio_interface:
  991. audio_interface.terminate()
  992. return False
  993. if not setup_audio():
  994. raise Exception("Failed to set up audio recording.")
  995. buffer = bytearray()
  996. silero_buffer_size = 2 * buffer_size # silero complains if too short
  997. time_since_last_buffer_message = 0
  998. try:
  999. while not shutdown_event.is_set():
  1000. try:
  1001. data = stream.read(chunk_size, exception_on_overflow=False)
  1002. if use_microphone.value:
  1003. processed_data = preprocess_audio(data, device_sample_rate, target_sample_rate)
  1004. buffer += processed_data
  1005. # Check if the buffer has reached or exceeded the silero_buffer_size
  1006. while len(buffer) >= silero_buffer_size:
  1007. # Extract silero_buffer_size amount of data from the buffer
  1008. to_process = buffer[:silero_buffer_size]
  1009. buffer = buffer[silero_buffer_size:]
  1010. # Feed the extracted data to the audio_queue
  1011. if time_since_last_buffer_message:
  1012. time_passed = time.time() - time_since_last_buffer_message
  1013. if time_passed > 1:
  1014. logging.debug("_audio_data_worker writing audio data into queue.")
  1015. time_since_last_buffer_message = time.time()
  1016. else:
  1017. time_since_last_buffer_message = time.time()
  1018. audio_queue.put(to_process)
  1019. except OSError as e:
  1020. if e.errno == pyaudio.paInputOverflowed:
  1021. logging.warning("Input overflowed. Frame dropped.")
  1022. else:
  1023. logging.error(f"OSError during recording: {e}")
  1024. # Attempt to reinitialize the stream
  1025. logging.error("Attempting to reinitialize the audio stream...")
  1026. try:
  1027. if stream:
  1028. stream.stop_stream()
  1029. stream.close()
  1030. except Exception as e:
  1031. pass
  1032. # Wait a bit before trying to reinitialize
  1033. time.sleep(1)
  1034. if not setup_audio():
  1035. logging.error("Failed to reinitialize audio stream. Exiting.")
  1036. break
  1037. else:
  1038. logging.error("Audio stream reinitialized successfully.")
  1039. continue
  1040. except Exception as e:
  1041. logging.error(f"Unknown error during recording: {e}")
  1042. tb_str = traceback.format_exc()
  1043. logging.error(f"Traceback: {tb_str}")
  1044. logging.error(f"Error: {e}")
  1045. # Attempt to reinitialize the stream
  1046. logging.info("Attempting to reinitialize the audio stream...")
  1047. try:
  1048. if stream:
  1049. stream.stop_stream()
  1050. stream.close()
  1051. except Exception as e:
  1052. pass
  1053. # Wait a bit before trying to reinitialize
  1054. time.sleep(1)
  1055. if not setup_audio():
  1056. logging.error("Failed to reinitialize audio stream. Exiting.")
  1057. break
  1058. else:
  1059. logging.info("Audio stream reinitialized successfully.")
  1060. continue
  1061. except KeyboardInterrupt:
  1062. interrupt_stop_event.set()
  1063. logging.debug("Audio data worker process finished due to KeyboardInterrupt")
  1064. finally:
  1065. # After recording stops, feed any remaining audio data
  1066. if buffer:
  1067. audio_queue.put(bytes(buffer))
  1068. try:
  1069. if stream:
  1070. stream.stop_stream()
  1071. stream.close()
  1072. except Exception as e:
  1073. pass
  1074. if audio_interface:
  1075. audio_interface.terminate()
  1076. def wakeup(self):
  1077. """
  1078. If in wake work modus, wake up as if a wake word was spoken.
  1079. """
  1080. self.listen_start = time.time()
  1081. def abort(self):
  1082. self.start_recording_on_voice_activity = False
  1083. self.stop_recording_on_voice_deactivity = False
  1084. self._set_state("inactive")
  1085. self.interrupt_stop_event.set()
  1086. self.was_interrupted.wait()
  1087. self.was_interrupted.clear()
  1088. def wait_audio(self):
  1089. """
  1090. Waits for the start and completion of the audio recording process.
  1091. This method is responsible for:
  1092. - Waiting for voice activity to begin recording if not yet started.
  1093. - Waiting for voice inactivity to complete the recording.
  1094. - Setting the audio buffer from the recorded frames.
  1095. - Resetting recording-related attributes.
  1096. Side effects:
  1097. - Updates the state of the instance.
  1098. - Modifies the audio attribute to contain the processed audio data.
  1099. """
  1100. try:
  1101. logging.info("Setting listen time")
  1102. if self.listen_start == 0:
  1103. self.listen_start = time.time()
  1104. # If not yet started recording, wait for voice activity to initiate.
  1105. if not self.is_recording and not self.frames:
  1106. self._set_state("listening")
  1107. self.start_recording_on_voice_activity = True
  1108. # Wait until recording starts
  1109. logging.debug('Waiting for recording start')
  1110. while not self.interrupt_stop_event.is_set():
  1111. if self.start_recording_event.wait(timeout=0.02):
  1112. break
  1113. # If recording is ongoing, wait for voice inactivity
  1114. # to finish recording.
  1115. if self.is_recording:
  1116. self.stop_recording_on_voice_deactivity = True
  1117. # Wait until recording stops
  1118. logging.debug('Waiting for recording stop')
  1119. while not self.interrupt_stop_event.is_set():
  1120. if (self.stop_recording_event.wait(timeout=0.02)):
  1121. break
  1122. # Convert recorded frames to the appropriate audio format.
  1123. audio_array = np.frombuffer(b''.join(self.frames), dtype=np.int16)
  1124. self.audio = audio_array.astype(np.float32) / INT16_MAX_ABS_VALUE
  1125. self.frames.clear()
  1126. # Reset recording-related timestamps
  1127. self.recording_stop_time = 0
  1128. self.listen_start = 0
  1129. self._set_state("inactive")
  1130. except KeyboardInterrupt:
  1131. logging.info("KeyboardInterrupt in wait_audio, shutting down")
  1132. self.shutdown()
  1133. raise # Re-raise the exception after cleanup
  1134. def transcribe(self):
  1135. """
  1136. Transcribes audio captured by this class instance using the
  1137. `faster_whisper` model.
  1138. Automatically starts recording upon voice activity if not manually
  1139. started using `recorder.start()`.
  1140. Automatically stops recording upon voice deactivity if not manually
  1141. stopped with `recorder.stop()`.
  1142. Processes the recorded audio to generate transcription.
  1143. Args:
  1144. on_transcription_finished (callable, optional): Callback function
  1145. to be executed when transcription is ready.
  1146. If provided, transcription will be performed asynchronously,
  1147. and the callback will receive the transcription as its argument.
  1148. If omitted, the transcription will be performed synchronously,
  1149. and the result will be returned.
  1150. Returns (if no callback is set):
  1151. str: The transcription of the recorded audio.
  1152. Raises:
  1153. Exception: If there is an error during the transcription process.
  1154. """
  1155. self._set_state("transcribing")
  1156. audio_copy = copy.deepcopy(self.audio)
  1157. start_time = 0
  1158. with self.transcription_lock:
  1159. try:
  1160. if self.transcribe_count == 0:
  1161. logging.debug("Adding transcription request, no early transcription started")
  1162. start_time = time.time() # Start timing
  1163. self.parent_transcription_pipe.send((audio_copy, self.language))
  1164. self.transcribe_count += 1
  1165. while self.transcribe_count > 0:
  1166. logging.debug(F"Receive from parent_transcription_pipe after sendiung transcription request, transcribe_count: {self.transcribe_count}")
  1167. status, result = self.parent_transcription_pipe.recv()
  1168. self.transcribe_count -= 1
  1169. self.allowed_to_early_transcribe = True
  1170. self._set_state("inactive")
  1171. if status == 'success':
  1172. segments, info = result
  1173. self.detected_language = info.language if info.language_probability > 0 else None
  1174. self.detected_language_probability = info.language_probability
  1175. self.last_transcription_bytes = copy.deepcopy(audio_copy)
  1176. self.last_transcription_bytes_b64 = base64.b64encode(self.last_transcription_bytes.tobytes()).decode('utf-8')
  1177. transcription = self._preprocess_output(segments)
  1178. end_time = time.time() # End timing
  1179. transcription_time = end_time - start_time
  1180. if start_time:
  1181. if self.print_transcription_time:
  1182. print(f"Model {self.main_model_type} completed transcription in {transcription_time:.2f} seconds")
  1183. else:
  1184. logging.debug(f"Model {self.main_model_type} completed transcription in {transcription_time:.2f} seconds")
  1185. return transcription
  1186. else:
  1187. logging.error(f"Transcription error: {result}")
  1188. raise Exception(result)
  1189. except Exception as e:
  1190. logging.error(f"Error during transcription: {str(e)}")
  1191. raise e
  1192. def _process_wakeword(self, data):
  1193. """
  1194. Processes audio data to detect wake words.
  1195. """
  1196. if self.wakeword_backend in {'pvp', 'pvporcupine'}:
  1197. pcm = struct.unpack_from(
  1198. "h" * self.buffer_size,
  1199. data
  1200. )
  1201. porcupine_index = self.porcupine.process(pcm)
  1202. if self.debug_mode:
  1203. logging.info(f"wake words porcupine_index: {porcupine_index}")
  1204. return self.porcupine.process(pcm)
  1205. elif self.wakeword_backend in {'oww', 'openwakeword', 'openwakewords'}:
  1206. pcm = np.frombuffer(data, dtype=np.int16)
  1207. prediction = self.owwModel.predict(pcm)
  1208. max_score = -1
  1209. max_index = -1
  1210. wake_words_in_prediction = len(self.owwModel.prediction_buffer.keys())
  1211. self.wake_words_sensitivities
  1212. if wake_words_in_prediction:
  1213. for idx, mdl in enumerate(self.owwModel.prediction_buffer.keys()):
  1214. scores = list(self.owwModel.prediction_buffer[mdl])
  1215. if scores[-1] >= self.wake_words_sensitivity and scores[-1] > max_score:
  1216. max_score = scores[-1]
  1217. max_index = idx
  1218. if self.debug_mode:
  1219. logging.info(f"wake words oww max_index, max_score: {max_index} {max_score}")
  1220. return max_index
  1221. else:
  1222. if self.debug_mode:
  1223. logging.info(f"wake words oww_index: -1")
  1224. return -1
  1225. if self.debug_mode:
  1226. logging.info("wake words no match")
  1227. return -1
  1228. def text(self,
  1229. on_transcription_finished=None,
  1230. ):
  1231. """
  1232. Transcribes audio captured by this class instance
  1233. using the `faster_whisper` model.
  1234. - Automatically starts recording upon voice activity if not manually
  1235. started using `recorder.start()`.
  1236. - Automatically stops recording upon voice deactivity if not manually
  1237. stopped with `recorder.stop()`.
  1238. - Processes the recorded audio to generate transcription.
  1239. Args:
  1240. on_transcription_finished (callable, optional): Callback function
  1241. to be executed when transcription is ready.
  1242. If provided, transcription will be performed asynchronously, and
  1243. the callback will receive the transcription as its argument.
  1244. If omitted, the transcription will be performed synchronously,
  1245. and the result will be returned.
  1246. Returns (if not callback is set):
  1247. str: The transcription of the recorded audio
  1248. """
  1249. self.interrupt_stop_event.clear()
  1250. self.was_interrupted.clear()
  1251. try:
  1252. self.wait_audio()
  1253. except KeyboardInterrupt:
  1254. logging.info("KeyboardInterrupt in text() method")
  1255. self.shutdown()
  1256. raise # Re-raise the exception after cleanup
  1257. if self.is_shut_down or self.interrupt_stop_event.is_set():
  1258. if self.interrupt_stop_event.is_set():
  1259. self.was_interrupted.set()
  1260. return ""
  1261. if on_transcription_finished:
  1262. threading.Thread(target=on_transcription_finished,
  1263. args=(self.transcribe(),)).start()
  1264. else:
  1265. return self.transcribe()
  1266. def start(self):
  1267. """
  1268. Starts recording audio directly without waiting for voice activity.
  1269. """
  1270. # Ensure there's a minimum interval
  1271. # between stopping and starting recording
  1272. if (time.time() - self.recording_stop_time
  1273. < self.min_gap_between_recordings):
  1274. logging.info("Attempted to start recording "
  1275. "too soon after stopping."
  1276. )
  1277. return self
  1278. logging.info("recording started")
  1279. self._set_state("recording")
  1280. self.text_storage = []
  1281. self.realtime_stabilized_text = ""
  1282. self.realtime_stabilized_safetext = ""
  1283. self.wakeword_detected = False
  1284. self.wake_word_detect_time = 0
  1285. self.frames = []
  1286. self.is_recording = True
  1287. self.recording_start_time = time.time()
  1288. self.is_silero_speech_active = False
  1289. self.is_webrtc_speech_active = False
  1290. self.stop_recording_event.clear()
  1291. self.start_recording_event.set()
  1292. if self.on_recording_start:
  1293. self.on_recording_start()
  1294. return self
  1295. def stop(self):
  1296. """
  1297. Stops recording audio.
  1298. """
  1299. # Ensure there's a minimum interval
  1300. # between starting and stopping recording
  1301. if (time.time() - self.recording_start_time
  1302. < self.min_length_of_recording):
  1303. logging.info("Attempted to stop recording "
  1304. "too soon after starting."
  1305. )
  1306. return self
  1307. logging.info("recording stopped")
  1308. self.is_recording = False
  1309. self.recording_stop_time = time.time()
  1310. self.is_silero_speech_active = False
  1311. self.is_webrtc_speech_active = False
  1312. self.silero_check_time = 0
  1313. self.start_recording_event.clear()
  1314. self.stop_recording_event.set()
  1315. if self.on_recording_stop:
  1316. self.on_recording_stop()
  1317. return self
  1318. def listen(self):
  1319. """
  1320. Puts recorder in immediate "listen" state.
  1321. This is the state after a wake word detection, for example.
  1322. The recorder now "listens" for voice activation.
  1323. Once voice is detected we enter "recording" state.
  1324. """
  1325. self.listen_start = time.time()
  1326. self._set_state("listening")
  1327. self.start_recording_on_voice_activity = True
  1328. def feed_audio(self, chunk, original_sample_rate=16000):
  1329. """
  1330. Feed an audio chunk into the processing pipeline. Chunks are
  1331. accumulated until the buffer size is reached, and then the accumulated
  1332. data is fed into the audio_queue.
  1333. """
  1334. # Check if the buffer attribute exists, if not, initialize it
  1335. if not hasattr(self, 'buffer'):
  1336. self.buffer = bytearray()
  1337. # Check if input is a NumPy array
  1338. if isinstance(chunk, np.ndarray):
  1339. # Handle stereo to mono conversion if necessary
  1340. if chunk.ndim == 2:
  1341. chunk = np.mean(chunk, axis=1)
  1342. # Resample to 16000 Hz if necessary
  1343. if original_sample_rate != 16000:
  1344. num_samples = int(len(chunk) * 16000 / original_sample_rate)
  1345. chunk = resample(chunk, num_samples)
  1346. # Ensure data type is int16
  1347. chunk = chunk.astype(np.int16)
  1348. # Convert the NumPy array to bytes
  1349. chunk = chunk.tobytes()
  1350. # Append the chunk to the buffer
  1351. self.buffer += chunk
  1352. buf_size = 2 * self.buffer_size # silero complains if too short
  1353. # Check if the buffer has reached or exceeded the buffer_size
  1354. while len(self.buffer) >= buf_size:
  1355. # Extract self.buffer_size amount of data from the buffer
  1356. to_process = self.buffer[:buf_size]
  1357. self.buffer = self.buffer[buf_size:]
  1358. # Feed the extracted data to the audio_queue
  1359. self.audio_queue.put(to_process)
  1360. def set_microphone(self, microphone_on=True):
  1361. """
  1362. Set the microphone on or off.
  1363. """
  1364. logging.info("Setting microphone to: " + str(microphone_on))
  1365. self.use_microphone.value = microphone_on
  1366. def shutdown(self):
  1367. """
  1368. Safely shuts down the audio recording by stopping the
  1369. recording worker and closing the audio stream.
  1370. """
  1371. with self.shutdown_lock:
  1372. if self.is_shut_down:
  1373. return
  1374. print("\033[91mRealtimeSTT shutting down\033[0m")
  1375. # logging.debug("RealtimeSTT shutting down")
  1376. self.is_shut_down = True
  1377. self.start_recording_event.set()
  1378. self.stop_recording_event.set()
  1379. self.shutdown_event.set()
  1380. self.is_recording = False
  1381. self.is_running = False
  1382. logging.debug('Finishing recording thread')
  1383. if self.recording_thread:
  1384. self.audio_queue.put(bytes(1))
  1385. self.recording_thread.join()
  1386. logging.debug('Terminating reader process')
  1387. # Give it some time to finish the loop and cleanup.
  1388. if self.use_microphone.value:
  1389. self.reader_process.join(timeout=10)
  1390. if self.reader_process.is_alive():
  1391. logging.warning("Reader process did not terminate "
  1392. "in time. Terminating forcefully."
  1393. )
  1394. self.reader_process.terminate()
  1395. logging.debug('Terminating transcription process')
  1396. self.transcript_process.join(timeout=10)
  1397. if self.transcript_process.is_alive():
  1398. logging.warning("Transcript process did not terminate "
  1399. "in time. Terminating forcefully."
  1400. )
  1401. self.transcript_process.terminate()
  1402. self.parent_transcription_pipe.close()
  1403. logging.debug('Finishing realtime thread')
  1404. if self.realtime_thread:
  1405. self.realtime_thread.join()
  1406. if self.enable_realtime_transcription:
  1407. if self.realtime_model_type:
  1408. del self.realtime_model_type
  1409. self.realtime_model_type = None
  1410. gc.collect()
  1411. def _recording_worker(self):
  1412. """
  1413. The main worker method which constantly monitors the audio
  1414. input for voice activity and accordingly starts/stops the recording.
  1415. """
  1416. if self.use_extended_logging:
  1417. logging.debug('Debug: Entering try block')
  1418. last_inner_try_time = 0
  1419. try:
  1420. if self.use_extended_logging:
  1421. logging.debug('Debug: Initializing variables')
  1422. time_since_last_buffer_message = 0
  1423. was_recording = False
  1424. delay_was_passed = False
  1425. wakeword_detected_time = None
  1426. wakeword_samples_to_remove = None
  1427. self.allowed_to_early_transcribe = True
  1428. if self.use_extended_logging:
  1429. logging.debug('Debug: Starting main loop')
  1430. # Continuously monitor audio for voice activity
  1431. while self.is_running:
  1432. # if self.use_extended_logging:
  1433. # logging.debug('Debug: Entering inner try block')
  1434. if last_inner_try_time:
  1435. last_processing_time = time.time() - last_inner_try_time
  1436. if last_processing_time > 0.1:
  1437. if self.use_extended_logging:
  1438. logging.warning('### WARNING: PROCESSING TOOK TOO LONG')
  1439. last_inner_try_time = time.time()
  1440. try:
  1441. # if self.use_extended_logging:
  1442. # logging.debug('Debug: Trying to get data from audio queue')
  1443. try:
  1444. data = self.audio_queue.get(timeout=0.01)
  1445. self.last_words_buffer.append(data)
  1446. except queue.Empty:
  1447. # if self.use_extended_logging:
  1448. # logging.debug('Debug: Queue is empty, checking if still running')
  1449. if not self.is_running:
  1450. if self.use_extended_logging:
  1451. logging.debug('Debug: Not running, breaking loop')
  1452. break
  1453. # if self.use_extended_logging:
  1454. # logging.debug('Debug: Continuing to next iteration')
  1455. continue
  1456. if self.use_extended_logging:
  1457. logging.debug('Debug: Checking for on_recorded_chunk callback')
  1458. if self.on_recorded_chunk:
  1459. if self.use_extended_logging:
  1460. logging.debug('Debug: Calling on_recorded_chunk')
  1461. self.on_recorded_chunk(data)
  1462. if self.use_extended_logging:
  1463. logging.debug('Debug: Checking if handle_buffer_overflow is True')
  1464. if self.handle_buffer_overflow:
  1465. if self.use_extended_logging:
  1466. logging.debug('Debug: Handling buffer overflow')
  1467. # Handle queue overflow
  1468. if (self.audio_queue.qsize() >
  1469. self.allowed_latency_limit):
  1470. if self.use_extended_logging:
  1471. logging.debug('Debug: Queue size exceeds limit, logging warnings')
  1472. logging.warning("Audio queue size exceeds "
  1473. "latency limit. Current size: "
  1474. f"{self.audio_queue.qsize()}. "
  1475. "Discarding old audio chunks."
  1476. )
  1477. if self.use_extended_logging:
  1478. logging.debug('Debug: Discarding old chunks if necessary')
  1479. while (self.audio_queue.qsize() >
  1480. self.allowed_latency_limit):
  1481. data = self.audio_queue.get()
  1482. except BrokenPipeError:
  1483. logging.error("BrokenPipeError _recording_worker")
  1484. self.is_running = False
  1485. break
  1486. if self.use_extended_logging:
  1487. logging.debug('Debug: Updating time_since_last_buffer_message')
  1488. # Feed the extracted data to the audio_queue
  1489. if time_since_last_buffer_message:
  1490. time_passed = time.time() - time_since_last_buffer_message
  1491. if time_passed > 1:
  1492. if self.use_extended_logging:
  1493. logging.debug("_recording_worker processing audio data")
  1494. time_since_last_buffer_message = time.time()
  1495. else:
  1496. time_since_last_buffer_message = time.time()
  1497. if self.use_extended_logging:
  1498. logging.debug('Debug: Initializing failed_stop_attempt')
  1499. failed_stop_attempt = False
  1500. if self.use_extended_logging:
  1501. logging.debug('Debug: Checking if not recording')
  1502. if not self.is_recording:
  1503. if self.use_extended_logging:
  1504. logging.debug('Debug: Handling not recording state')
  1505. # Handle not recording state
  1506. time_since_listen_start = (time.time() - self.listen_start
  1507. if self.listen_start else 0)
  1508. wake_word_activation_delay_passed = (
  1509. time_since_listen_start >
  1510. self.wake_word_activation_delay
  1511. )
  1512. if self.use_extended_logging:
  1513. logging.debug('Debug: Handling wake-word timeout callback')
  1514. # Handle wake-word timeout callback
  1515. if wake_word_activation_delay_passed \
  1516. and not delay_was_passed:
  1517. if self.use_wake_words and self.wake_word_activation_delay:
  1518. if self.on_wakeword_timeout:
  1519. if self.use_extended_logging:
  1520. logging.debug('Debug: Calling on_wakeword_timeout')
  1521. self.on_wakeword_timeout()
  1522. delay_was_passed = wake_word_activation_delay_passed
  1523. if self.use_extended_logging:
  1524. logging.debug('Debug: Setting state and spinner text')
  1525. # Set state and spinner text
  1526. if not self.recording_stop_time:
  1527. if self.use_wake_words \
  1528. and wake_word_activation_delay_passed \
  1529. and not self.wakeword_detected:
  1530. if self.use_extended_logging:
  1531. logging.debug('Debug: Setting state to "wakeword"')
  1532. self._set_state("wakeword")
  1533. else:
  1534. if self.listen_start:
  1535. if self.use_extended_logging:
  1536. logging.debug('Debug: Setting state to "listening"')
  1537. self._set_state("listening")
  1538. else:
  1539. if self.use_extended_logging:
  1540. logging.debug('Debug: Setting state to "inactive"')
  1541. self._set_state("inactive")
  1542. if self.use_extended_logging:
  1543. logging.debug('Debug: Checking wake word conditions')
  1544. if self.use_wake_words and wake_word_activation_delay_passed:
  1545. try:
  1546. if self.use_extended_logging:
  1547. logging.debug('Debug: Processing wakeword')
  1548. wakeword_index = self._process_wakeword(data)
  1549. except struct.error:
  1550. logging.error("Error unpacking audio data "
  1551. "for wake word processing.")
  1552. continue
  1553. except Exception as e:
  1554. logging.error(f"Wake word processing error: {e}")
  1555. continue
  1556. if self.use_extended_logging:
  1557. logging.debug('Debug: Checking if wake word detected')
  1558. # If a wake word is detected
  1559. if wakeword_index >= 0:
  1560. if self.use_extended_logging:
  1561. logging.debug('Debug: Wake word detected, updating variables')
  1562. self.wake_word_detect_time = time.time()
  1563. wakeword_detected_time = time.time()
  1564. wakeword_samples_to_remove = int(self.sample_rate * self.wake_word_buffer_duration)
  1565. self.wakeword_detected = True
  1566. if self.on_wakeword_detected:
  1567. if self.use_extended_logging:
  1568. logging.debug('Debug: Calling on_wakeword_detected')
  1569. self.on_wakeword_detected()
  1570. if self.use_extended_logging:
  1571. logging.debug('Debug: Checking voice activity conditions')
  1572. # Check for voice activity to
  1573. # trigger the start of recording
  1574. if ((not self.use_wake_words
  1575. or not wake_word_activation_delay_passed)
  1576. and self.start_recording_on_voice_activity) \
  1577. or self.wakeword_detected:
  1578. if self.use_extended_logging:
  1579. logging.debug('Debug: Checking if voice is active')
  1580. if self._is_voice_active():
  1581. if self.use_extended_logging:
  1582. logging.debug('Debug: Voice activity detected')
  1583. logging.info("voice activity detected")
  1584. if self.use_extended_logging:
  1585. logging.debug('Debug: Starting recording')
  1586. self.start()
  1587. self.start_recording_on_voice_activity = False
  1588. if self.use_extended_logging:
  1589. logging.debug('Debug: Adding buffered audio to frames')
  1590. # Add the buffered audio
  1591. # to the recording frames
  1592. self.frames.extend(list(self.audio_buffer))
  1593. self.audio_buffer.clear()
  1594. if self.use_extended_logging:
  1595. logging.debug('Debug: Resetting Silero VAD model states')
  1596. self.silero_vad_model.reset_states()
  1597. else:
  1598. if self.use_extended_logging:
  1599. logging.debug('Debug: Checking voice activity')
  1600. data_copy = data[:]
  1601. self._check_voice_activity(data_copy)
  1602. if self.use_extended_logging:
  1603. logging.debug('Debug: Resetting speech_end_silence_start')
  1604. self.speech_end_silence_start = 0
  1605. else:
  1606. if self.use_extended_logging:
  1607. logging.debug('Debug: Handling recording state')
  1608. # If we are currently recording
  1609. if wakeword_samples_to_remove and wakeword_samples_to_remove > 0:
  1610. if self.use_extended_logging:
  1611. logging.debug('Debug: Removing wakeword samples')
  1612. # Remove samples from the beginning of self.frames
  1613. samples_removed = 0
  1614. while wakeword_samples_to_remove > 0 and self.frames:
  1615. frame = self.frames[0]
  1616. frame_samples = len(frame) // 2 # Assuming 16-bit audio
  1617. if wakeword_samples_to_remove >= frame_samples:
  1618. self.frames.pop(0)
  1619. samples_removed += frame_samples
  1620. wakeword_samples_to_remove -= frame_samples
  1621. else:
  1622. self.frames[0] = frame[wakeword_samples_to_remove * 2:]
  1623. samples_removed += wakeword_samples_to_remove
  1624. samples_to_remove = 0
  1625. wakeword_samples_to_remove = 0
  1626. if self.use_extended_logging:
  1627. logging.debug('Debug: Checking if stop_recording_on_voice_deactivity is True')
  1628. # Stop the recording if silence is detected after speech
  1629. if self.stop_recording_on_voice_deactivity:
  1630. if self.use_extended_logging:
  1631. logging.debug('Debug: Determining if speech is detected')
  1632. is_speech = (
  1633. self._is_silero_speech(data) if self.silero_deactivity_detection
  1634. else self._is_webrtc_speech(data, True)
  1635. )
  1636. if self.use_extended_logging:
  1637. logging.debug('Debug: Formatting speech_end_silence_start')
  1638. if not self.speech_end_silence_start:
  1639. str_speech_end_silence_start = "0"
  1640. else:
  1641. str_speech_end_silence_start = datetime.datetime.fromtimestamp(self.speech_end_silence_start).strftime('%H:%M:%S.%f')[:-3]
  1642. if self.use_extended_logging:
  1643. logging.debug(f"is_speech: {is_speech}, str_speech_end_silence_start: {str_speech_end_silence_start}")
  1644. if self.use_extended_logging:
  1645. logging.debug('Debug: Checking if speech is not detected')
  1646. if not is_speech:
  1647. if self.use_extended_logging:
  1648. logging.debug('Debug: Handling voice deactivity')
  1649. # Voice deactivity was detected, so we start
  1650. # measuring silence time before stopping recording
  1651. if self.speech_end_silence_start == 0 and \
  1652. (time.time() - self.recording_start_time > self.min_length_of_recording):
  1653. self.speech_end_silence_start = time.time()
  1654. if self.use_extended_logging:
  1655. logging.debug('Debug: Checking early transcription conditions')
  1656. if self.speech_end_silence_start and self.early_transcription_on_silence and len(self.frames) > 0 and \
  1657. (time.time() - self.speech_end_silence_start > self.early_transcription_on_silence) and \
  1658. self.allowed_to_early_transcribe:
  1659. if self.use_extended_logging:
  1660. logging.debug("Debug:Adding early transcription request")
  1661. self.transcribe_count += 1
  1662. audio_array = np.frombuffer(b''.join(self.frames), dtype=np.int16)
  1663. audio = audio_array.astype(np.float32) / INT16_MAX_ABS_VALUE
  1664. if self.use_extended_logging:
  1665. logging.debug("Debug: early transcription request pipe send")
  1666. self.parent_transcription_pipe.send((audio, self.language))
  1667. if self.use_extended_logging:
  1668. logging.debug("Debug: early transcription request pipe send return")
  1669. self.allowed_to_early_transcribe = False
  1670. else:
  1671. if self.use_extended_logging:
  1672. logging.debug('Debug: Handling speech detection')
  1673. if self.speech_end_silence_start:
  1674. if self.use_extended_logging:
  1675. logging.info("Resetting self.speech_end_silence_start")
  1676. self.speech_end_silence_start = 0
  1677. self.allowed_to_early_transcribe = True
  1678. if self.use_extended_logging:
  1679. logging.debug('Debug: Checking if silence duration exceeds threshold')
  1680. # Wait for silence to stop recording after speech
  1681. if self.speech_end_silence_start and time.time() - \
  1682. self.speech_end_silence_start >= \
  1683. self.post_speech_silence_duration:
  1684. if self.use_extended_logging:
  1685. logging.debug('Debug: Formatting silence start time')
  1686. # Get time in desired format (HH:MM:SS.nnn)
  1687. silence_start_time = datetime.datetime.fromtimestamp(self.speech_end_silence_start).strftime('%H:%M:%S.%f')[:-3]
  1688. if self.use_extended_logging:
  1689. logging.debug('Debug: Calculating time difference')
  1690. # Calculate time difference
  1691. time_diff = time.time() - self.speech_end_silence_start
  1692. if self.use_extended_logging:
  1693. logging.debug('Debug: Logging voice deactivity detection')
  1694. logging.info(f"voice deactivity detected at {silence_start_time}, "
  1695. f"time since silence start: {time_diff:.3f} seconds")
  1696. logging.debug('Debug: Appending data to frames and stopping recording')
  1697. self.frames.append(data)
  1698. self.stop()
  1699. if not self.is_recording:
  1700. if self.use_extended_logging:
  1701. logging.debug('Debug: Resetting speech_end_silence_start')
  1702. self.speech_end_silence_start = 0
  1703. if self.use_extended_logging:
  1704. logging.debug('Debug: Handling non-wake word scenario')
  1705. else:
  1706. if self.use_extended_logging:
  1707. logging.debug('Debug: Setting failed_stop_attempt to True')
  1708. failed_stop_attempt = True
  1709. if self.use_extended_logging:
  1710. logging.debug('Debug: Checking if recording stopped')
  1711. if not self.is_recording and was_recording:
  1712. if self.use_extended_logging:
  1713. logging.debug('Debug: Resetting after stopping recording')
  1714. # Reset after stopping recording to ensure clean state
  1715. self.stop_recording_on_voice_deactivity = False
  1716. if self.use_extended_logging:
  1717. logging.debug('Debug: Checking Silero time')
  1718. if time.time() - self.silero_check_time > 0.1:
  1719. self.silero_check_time = 0
  1720. if self.use_extended_logging:
  1721. logging.debug('Debug: Handling wake word timeout')
  1722. # Handle wake word timeout (waited to long initiating
  1723. # speech after wake word detection)
  1724. if self.wake_word_detect_time and time.time() - \
  1725. self.wake_word_detect_time > self.wake_word_timeout:
  1726. self.wake_word_detect_time = 0
  1727. if self.wakeword_detected and self.on_wakeword_timeout:
  1728. if self.use_extended_logging:
  1729. logging.debug('Debug: Calling on_wakeword_timeout')
  1730. self.on_wakeword_timeout()
  1731. self.wakeword_detected = False
  1732. if self.use_extended_logging:
  1733. logging.debug('Debug: Updating was_recording')
  1734. was_recording = self.is_recording
  1735. if self.use_extended_logging:
  1736. logging.debug('Debug: Checking if recording and not failed stop attempt')
  1737. if self.is_recording and not failed_stop_attempt:
  1738. if self.use_extended_logging:
  1739. logging.debug('Debug: Appending data to frames')
  1740. self.frames.append(data)
  1741. if self.use_extended_logging:
  1742. logging.debug('Debug: Checking if not recording or speech end silence start')
  1743. if not self.is_recording or self.speech_end_silence_start:
  1744. if self.use_extended_logging:
  1745. logging.debug('Debug: Appending data to audio buffer')
  1746. self.audio_buffer.append(data)
  1747. except Exception as e:
  1748. logging.debug('Debug: Caught exception in main try block')
  1749. if not self.interrupt_stop_event.is_set():
  1750. logging.error(f"Unhandled exeption in _recording_worker: {e}")
  1751. raise
  1752. if self.use_extended_logging:
  1753. logging.debug('Debug: Exiting _recording_worker method')
  1754. def _realtime_worker(self):
  1755. """
  1756. Performs real-time transcription if the feature is enabled.
  1757. The method is responsible transcribing recorded audio frames
  1758. in real-time based on the specified resolution interval.
  1759. The transcribed text is stored in `self.realtime_transcription_text`
  1760. and a callback
  1761. function is invoked with this text if specified.
  1762. """
  1763. try:
  1764. logging.debug('Starting realtime worker')
  1765. # Return immediately if real-time transcription is not enabled
  1766. if not self.enable_realtime_transcription:
  1767. return
  1768. # Continue running as long as the main process is active
  1769. while self.is_running:
  1770. # Check if the recording is active
  1771. if self.is_recording:
  1772. # Sleep for the duration of the transcription resolution
  1773. time.sleep(self.realtime_processing_pause)
  1774. # Convert the buffer frames to a NumPy array
  1775. audio_array = np.frombuffer(
  1776. b''.join(self.frames),
  1777. dtype=np.int16
  1778. )
  1779. logging.debug(f"Current realtime buffer size: {len(audio_array)}")
  1780. # Normalize the array to a [-1, 1] range
  1781. audio_array = audio_array.astype(np.float32) / \
  1782. INT16_MAX_ABS_VALUE
  1783. if self.use_main_model_for_realtime:
  1784. with self.transcription_lock:
  1785. try:
  1786. self.parent_transcription_pipe.send((audio_array, self.language))
  1787. if self.parent_transcription_pipe.poll(timeout=5): # Wait for 5 seconds
  1788. logging.debug("Receive from realtime worker after transcription request to main model")
  1789. status, result = self.parent_transcription_pipe.recv()
  1790. if status == 'success':
  1791. segments, info = result
  1792. self.detected_realtime_language = info.language if info.language_probability > 0 else None
  1793. self.detected_realtime_language_probability = info.language_probability
  1794. realtime_text = segments
  1795. logging.debug(f"Realtime text detected with main model: {realtime_text}")
  1796. else:
  1797. logging.error(f"Realtime transcription error: {result}")
  1798. continue
  1799. else:
  1800. logging.warning("Realtime transcription timed out")
  1801. continue
  1802. except Exception as e:
  1803. logging.error(f"Error in realtime transcription: {str(e)}")
  1804. continue
  1805. else:
  1806. # Perform transcription and assemble the text
  1807. segments, info = self.realtime_model_type.transcribe(
  1808. audio_array,
  1809. language=self.language if self.language else None,
  1810. beam_size=self.beam_size_realtime,
  1811. initial_prompt=self.initial_prompt,
  1812. suppress_tokens=self.suppress_tokens,
  1813. )
  1814. self.detected_realtime_language = info.language if info.language_probability > 0 else None
  1815. self.detected_realtime_language_probability = info.language_probability
  1816. realtime_text = " ".join(
  1817. seg.text for seg in segments
  1818. )
  1819. logging.debug(f"Realtime text detected: {realtime_text}")
  1820. # double check recording state
  1821. # because it could have changed mid-transcription
  1822. if self.is_recording and time.time() - \
  1823. self.recording_start_time > self.init_realtime_after_seconds:
  1824. # logging.debug('Starting realtime transcription')
  1825. self.realtime_transcription_text = realtime_text
  1826. self.realtime_transcription_text = \
  1827. self.realtime_transcription_text.strip()
  1828. self.text_storage.append(
  1829. self.realtime_transcription_text
  1830. )
  1831. # Take the last two texts in storage, if they exist
  1832. if len(self.text_storage) >= 2:
  1833. last_two_texts = self.text_storage[-2:]
  1834. # Find the longest common prefix
  1835. # between the two texts
  1836. prefix = os.path.commonprefix(
  1837. [last_two_texts[0], last_two_texts[1]]
  1838. )
  1839. # This prefix is the text that was transcripted
  1840. # two times in the same way
  1841. # Store as "safely detected text"
  1842. if len(prefix) >= \
  1843. len(self.realtime_stabilized_safetext):
  1844. # Only store when longer than the previous
  1845. # as additional security
  1846. self.realtime_stabilized_safetext = prefix
  1847. # Find parts of the stabilized text
  1848. # in the freshly transcripted text
  1849. matching_pos = self._find_tail_match_in_text(
  1850. self.realtime_stabilized_safetext,
  1851. self.realtime_transcription_text
  1852. )
  1853. if matching_pos < 0:
  1854. if self.realtime_stabilized_safetext:
  1855. self._on_realtime_transcription_stabilized(
  1856. self._preprocess_output(
  1857. self.realtime_stabilized_safetext,
  1858. True
  1859. )
  1860. )
  1861. else:
  1862. self._on_realtime_transcription_stabilized(
  1863. self._preprocess_output(
  1864. self.realtime_transcription_text,
  1865. True
  1866. )
  1867. )
  1868. else:
  1869. # We found parts of the stabilized text
  1870. # in the transcripted text
  1871. # We now take the stabilized text
  1872. # and add only the freshly transcripted part to it
  1873. output_text = self.realtime_stabilized_safetext + \
  1874. self.realtime_transcription_text[matching_pos:]
  1875. # This yields us the "left" text part as stabilized
  1876. # AND at the same time delivers fresh detected
  1877. # parts on the first run without the need for
  1878. # two transcriptions
  1879. self._on_realtime_transcription_stabilized(
  1880. self._preprocess_output(output_text, True)
  1881. )
  1882. # Invoke the callback with the transcribed text
  1883. self._on_realtime_transcription_update(
  1884. self._preprocess_output(
  1885. self.realtime_transcription_text,
  1886. True
  1887. )
  1888. )
  1889. # If not recording, sleep briefly before checking again
  1890. else:
  1891. time.sleep(TIME_SLEEP)
  1892. except Exception as e:
  1893. logging.error(f"Unhandled exeption in _realtime_worker: {e}")
  1894. raise
  1895. def _is_silero_speech(self, chunk):
  1896. """
  1897. Returns true if speech is detected in the provided audio data
  1898. Args:
  1899. data (bytes): raw bytes of audio data (1024 raw bytes with
  1900. 16000 sample rate and 16 bits per sample)
  1901. """
  1902. if self.sample_rate != 16000:
  1903. pcm_data = np.frombuffer(chunk, dtype=np.int16)
  1904. data_16000 = signal.resample_poly(
  1905. pcm_data, 16000, self.sample_rate)
  1906. chunk = data_16000.astype(np.int16).tobytes()
  1907. self.silero_working = True
  1908. audio_chunk = np.frombuffer(chunk, dtype=np.int16)
  1909. audio_chunk = audio_chunk.astype(np.float32) / INT16_MAX_ABS_VALUE
  1910. vad_prob = self.silero_vad_model(
  1911. torch.from_numpy(audio_chunk),
  1912. SAMPLE_RATE).item()
  1913. is_silero_speech_active = vad_prob > (1 - self.silero_sensitivity)
  1914. if is_silero_speech_active:
  1915. if not self.is_silero_speech_active and self.use_extended_logging:
  1916. logging.info(f"{bcolors.OKGREEN}Silero VAD detected speech{bcolors.ENDC}")
  1917. elif self.is_silero_speech_active and self.use_extended_logging:
  1918. logging.info(f"{bcolors.WARNING}Silero VAD detected silence{bcolors.ENDC}")
  1919. self.is_silero_speech_active = is_silero_speech_active
  1920. self.silero_working = False
  1921. return is_silero_speech_active
  1922. def _is_webrtc_speech(self, chunk, all_frames_must_be_true=False):
  1923. """
  1924. Returns true if speech is detected in the provided audio data
  1925. Args:
  1926. data (bytes): raw bytes of audio data (1024 raw bytes with
  1927. 16000 sample rate and 16 bits per sample)
  1928. """
  1929. speech_str = f"{bcolors.OKGREEN}WebRTC VAD detected speech{bcolors.ENDC}"
  1930. silence_str = f"{bcolors.WARNING}WebRTC VAD detected silence{bcolors.ENDC}"
  1931. if self.sample_rate != 16000:
  1932. pcm_data = np.frombuffer(chunk, dtype=np.int16)
  1933. data_16000 = signal.resample_poly(
  1934. pcm_data, 16000, self.sample_rate)
  1935. chunk = data_16000.astype(np.int16).tobytes()
  1936. # Number of audio frames per millisecond
  1937. frame_length = int(16000 * 0.01) # for 10ms frame
  1938. num_frames = int(len(chunk) / (2 * frame_length))
  1939. speech_frames = 0
  1940. for i in range(num_frames):
  1941. start_byte = i * frame_length * 2
  1942. end_byte = start_byte + frame_length * 2
  1943. frame = chunk[start_byte:end_byte]
  1944. if self.webrtc_vad_model.is_speech(frame, 16000):
  1945. speech_frames += 1
  1946. if not all_frames_must_be_true:
  1947. if self.debug_mode:
  1948. logging.info(f"Speech detected in frame {i + 1}"
  1949. f" of {num_frames}")
  1950. if not self.is_webrtc_speech_active and self.use_extended_logging:
  1951. logging.info(speech_str)
  1952. self.is_webrtc_speech_active = True
  1953. return True
  1954. if all_frames_must_be_true:
  1955. if self.debug_mode and speech_frames == num_frames:
  1956. logging.info(f"Speech detected in {speech_frames} of "
  1957. f"{num_frames} frames")
  1958. elif self.debug_mode:
  1959. logging.info(f"Speech not detected in all {num_frames} frames")
  1960. speech_detected = speech_frames == num_frames
  1961. if speech_detected and not self.is_webrtc_speech_active and self.use_extended_logging:
  1962. logging.info(speech_str)
  1963. elif not speech_detected and self.is_webrtc_speech_active and self.use_extended_logging:
  1964. logging.info(silence_str)
  1965. self.is_webrtc_speech_active = speech_detected
  1966. return speech_detected
  1967. else:
  1968. if self.debug_mode:
  1969. logging.info(f"Speech not detected in any of {num_frames} frames")
  1970. if self.is_webrtc_speech_active and self.use_extended_logging:
  1971. logging.info(silence_str)
  1972. self.is_webrtc_speech_active = False
  1973. return False
  1974. def _check_voice_activity(self, data):
  1975. """
  1976. Initiate check if voice is active based on the provided data.
  1977. Args:
  1978. data: The audio data to be checked for voice activity.
  1979. """
  1980. self._is_webrtc_speech(data)
  1981. # First quick performing check for voice activity using WebRTC
  1982. if self.is_webrtc_speech_active:
  1983. if not self.silero_working:
  1984. self.silero_working = True
  1985. # Run the intensive check in a separate thread
  1986. threading.Thread(
  1987. target=self._is_silero_speech,
  1988. args=(data,)).start()
  1989. def clear_audio_queue(self):
  1990. """
  1991. Safely empties the audio queue to ensure no remaining audio
  1992. fragments get processed e.g. after waking up the recorder.
  1993. """
  1994. self.audio_buffer.clear()
  1995. try:
  1996. self.text_storage = []
  1997. self.realtime_stabilized_text = ""
  1998. self.realtime_stabilized_safetext = ""
  1999. self.frames = []
  2000. while True:
  2001. self.audio_queue.get_nowait()
  2002. except:
  2003. # PyTorch's mp.Queue doesn't have a specific Empty exception
  2004. # so we catch any exception that might occur when the queue is empty
  2005. pass
  2006. def _is_voice_active(self):
  2007. """
  2008. Determine if voice is active.
  2009. Returns:
  2010. bool: True if voice is active, False otherwise.
  2011. """
  2012. return self.is_webrtc_speech_active and self.is_silero_speech_active
  2013. def _set_state(self, new_state):
  2014. """
  2015. Update the current state of the recorder and execute
  2016. corresponding state-change callbacks.
  2017. Args:
  2018. new_state (str): The new state to set.
  2019. """
  2020. # Check if the state has actually changed
  2021. if new_state == self.state:
  2022. return
  2023. # Store the current state for later comparison
  2024. old_state = self.state
  2025. # Update to the new state
  2026. self.state = new_state
  2027. # Log the state change
  2028. logging.info(f"State changed from '{old_state}' to '{new_state}'")
  2029. # Execute callbacks based on transitioning FROM a particular state
  2030. if old_state == "listening":
  2031. if self.on_vad_detect_stop:
  2032. self.on_vad_detect_stop()
  2033. elif old_state == "wakeword":
  2034. if self.on_wakeword_detection_end:
  2035. self.on_wakeword_detection_end()
  2036. # Execute callbacks based on transitioning TO a particular state
  2037. if new_state == "listening":
  2038. if self.on_vad_detect_start:
  2039. self.on_vad_detect_start()
  2040. self._set_spinner("speak now")
  2041. if self.spinner and self.halo:
  2042. self.halo._interval = 250
  2043. elif new_state == "wakeword":
  2044. if self.on_wakeword_detection_start:
  2045. self.on_wakeword_detection_start()
  2046. self._set_spinner(f"say {self.wake_words}")
  2047. if self.spinner and self.halo:
  2048. self.halo._interval = 500
  2049. elif new_state == "transcribing":
  2050. if self.on_transcription_start:
  2051. self.on_transcription_start()
  2052. self._set_spinner("transcribing")
  2053. if self.spinner and self.halo:
  2054. self.halo._interval = 50
  2055. elif new_state == "recording":
  2056. self._set_spinner("recording")
  2057. if self.spinner and self.halo:
  2058. self.halo._interval = 100
  2059. elif new_state == "inactive":
  2060. if self.spinner and self.halo:
  2061. self.halo.stop()
  2062. self.halo = None
  2063. def _set_spinner(self, text):
  2064. """
  2065. Update the spinner's text or create a new
  2066. spinner with the provided text.
  2067. Args:
  2068. text (str): The text to be displayed alongside the spinner.
  2069. """
  2070. if self.spinner:
  2071. # If the Halo spinner doesn't exist, create and start it
  2072. if self.halo is None:
  2073. self.halo = halo.Halo(text=text)
  2074. self.halo.start()
  2075. # If the Halo spinner already exists, just update the text
  2076. else:
  2077. self.halo.text = text
  2078. def _preprocess_output(self, text, preview=False):
  2079. """
  2080. Preprocesses the output text by removing any leading or trailing
  2081. whitespace, converting all whitespace sequences to a single space
  2082. character, and capitalizing the first character of the text.
  2083. Args:
  2084. text (str): The text to be preprocessed.
  2085. Returns:
  2086. str: The preprocessed text.
  2087. """
  2088. text = re.sub(r'\s+', ' ', text.strip())
  2089. if self.ensure_sentence_starting_uppercase:
  2090. if text:
  2091. text = text[0].upper() + text[1:]
  2092. # Ensure the text ends with a proper punctuation
  2093. # if it ends with an alphanumeric character
  2094. if not preview:
  2095. if self.ensure_sentence_ends_with_period:
  2096. if text and text[-1].isalnum():
  2097. text += '.'
  2098. return text
  2099. def _find_tail_match_in_text(self, text1, text2, length_of_match=10):
  2100. """
  2101. Find the position where the last 'n' characters of text1
  2102. match with a substring in text2.
  2103. This method takes two texts, extracts the last 'n' characters from
  2104. text1 (where 'n' is determined by the variable 'length_of_match'), and
  2105. searches for an occurrence of this substring in text2, starting from
  2106. the end of text2 and moving towards the beginning.
  2107. Parameters:
  2108. - text1 (str): The text containing the substring that we want to find
  2109. in text2.
  2110. - text2 (str): The text in which we want to find the matching
  2111. substring.
  2112. - length_of_match(int): The length of the matching string that we are
  2113. looking for
  2114. Returns:
  2115. int: The position (0-based index) in text2 where the matching
  2116. substring starts. If no match is found or either of the texts is
  2117. too short, returns -1.
  2118. """
  2119. # Check if either of the texts is too short
  2120. if len(text1) < length_of_match or len(text2) < length_of_match:
  2121. return -1
  2122. # The end portion of the first text that we want to compare
  2123. target_substring = text1[-length_of_match:]
  2124. # Loop through text2 from right to left
  2125. for i in range(len(text2) - length_of_match + 1):
  2126. # Extract the substring from text2
  2127. # to compare with the target_substring
  2128. current_substring = text2[len(text2) - i - length_of_match:
  2129. len(text2) - i]
  2130. # Compare the current_substring with the target_substring
  2131. if current_substring == target_substring:
  2132. # Position in text2 where the match starts
  2133. return len(text2) - i
  2134. return -1
  2135. def _on_realtime_transcription_stabilized(self, text):
  2136. """
  2137. Callback method invoked when the real-time transcription stabilizes.
  2138. This method is called internally when the transcription text is
  2139. considered "stable" meaning it's less likely to change significantly
  2140. with additional audio input. It notifies any registered external
  2141. listener about the stabilized text if recording is still ongoing.
  2142. This is particularly useful for applications that need to display
  2143. live transcription results to users and want to highlight parts of the
  2144. transcription that are less likely to change.
  2145. Args:
  2146. text (str): The stabilized transcription text.
  2147. """
  2148. if self.on_realtime_transcription_stabilized:
  2149. if self.is_recording:
  2150. self.on_realtime_transcription_stabilized(text)
  2151. def _on_realtime_transcription_update(self, text):
  2152. """
  2153. Callback method invoked when there's an update in the real-time
  2154. transcription.
  2155. This method is called internally whenever there's a change in the
  2156. transcription text, notifying any registered external listener about
  2157. the update if recording is still ongoing. This provides a mechanism
  2158. for applications to receive and possibly display live transcription
  2159. updates, which could be partial and still subject to change.
  2160. Args:
  2161. text (str): The updated transcription text.
  2162. """
  2163. if self.on_realtime_transcription_update:
  2164. if self.is_recording:
  2165. self.on_realtime_transcription_update(text)
  2166. def __enter__(self):
  2167. """
  2168. Method to setup the context manager protocol.
  2169. This enables the instance to be used in a `with` statement, ensuring
  2170. proper resource management. When the `with` block is entered, this
  2171. method is automatically called.
  2172. Returns:
  2173. self: The current instance of the class.
  2174. """
  2175. return self
  2176. def __exit__(self, exc_type, exc_value, traceback):
  2177. """
  2178. Method to define behavior when the context manager protocol exits.
  2179. This is called when exiting the `with` block and ensures that any
  2180. necessary cleanup or resource release processes are executed, such as
  2181. shutting down the system properly.
  2182. Args:
  2183. exc_type (Exception or None): The type of the exception that
  2184. caused the context to be exited, if any.
  2185. exc_value (Exception or None): The exception instance that caused
  2186. the context to be exited, if any.
  2187. traceback (Traceback or None): The traceback corresponding to the
  2188. exception, if any.
  2189. """
  2190. self.shutdown()