audio_recorder.py 61 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510
  1. """
  2. The AudioToTextRecorder class in the provided code facilitates
  3. fast speech-to-text transcription.
  4. The class employs the faster_whisper library to transcribe the recorded audio
  5. into text using machine learning models, which can be run either on a GPU or
  6. CPU. Voice activity detection (VAD) is built in, meaning the software can
  7. automatically start or stop recording based on the presence or absence of
  8. speech. It integrates wake word detection through the pvporcupine library,
  9. allowing the software to initiate recording when a specific word or phrase
  10. is spoken. The system provides real-time feedback and can be further
  11. customized.
  12. Features:
  13. - Voice Activity Detection: Automatically starts/stops recording when speech
  14. is detected or when speech ends.
  15. - Wake Word Detection: Starts recording when a specified wake word (or words)
  16. is detected.
  17. - Event Callbacks: Customizable callbacks for when recording starts
  18. or finishes.
  19. - Fast Transcription: Returns the transcribed text from the audio as fast
  20. as possible.
  21. Author: Kolja Beigel
  22. """
  23. from multiprocessing import Process, Pipe, Event, Manager
  24. import faster_whisper
  25. import collections
  26. import numpy as np
  27. import pvporcupine
  28. import traceback
  29. import threading
  30. import webrtcvad
  31. import itertools
  32. import pyaudio
  33. import logging
  34. import struct
  35. import torch
  36. import halo
  37. import time
  38. import os
  39. import re
  40. INIT_MODEL_TRANSCRIPTION = "tiny"
  41. INIT_MODEL_TRANSCRIPTION_REALTIME = "tiny"
  42. INIT_REALTIME_PROCESSING_PAUSE = 0.2
  43. INIT_SILERO_SENSITIVITY = 0.4
  44. INIT_WEBRTC_SENSITIVITY = 3
  45. INIT_POST_SPEECH_SILENCE_DURATION = 0.6
  46. INIT_MIN_LENGTH_OF_RECORDING = 0.5
  47. INIT_MIN_GAP_BETWEEN_RECORDINGS = 0
  48. INIT_WAKE_WORDS_SENSITIVITY = 0.6
  49. INIT_PRE_RECORDING_BUFFER_DURATION = 1.0
  50. INIT_WAKE_WORD_ACTIVATION_DELAY = 0.0
  51. INIT_WAKE_WORD_TIMEOUT = 5.0
  52. ALLOWED_LATENCY_LIMIT = 10
  53. TIME_SLEEP = 0.02
  54. SAMPLE_RATE = 16000
  55. BUFFER_SIZE = 512
  56. INT16_MAX_ABS_VALUE = 32768.0
  57. class AudioToTextRecorder:
  58. """
  59. A class responsible for capturing audio from the microphone, detecting
  60. voice activity, and then transcribing the captured audio using the
  61. `faster_whisper` model.
  62. """
  63. def __init__(self,
  64. model: str = INIT_MODEL_TRANSCRIPTION,
  65. language: str = "",
  66. on_recording_start=None,
  67. on_recording_stop=None,
  68. on_transcription_start=None,
  69. ensure_sentence_starting_uppercase=True,
  70. ensure_sentence_ends_with_period=True,
  71. use_microphone=True,
  72. spinner=True,
  73. level=logging.WARNING,
  74. # Realtime transcription parameters
  75. enable_realtime_transcription=False,
  76. realtime_model_type=INIT_MODEL_TRANSCRIPTION_REALTIME,
  77. realtime_processing_pause=INIT_REALTIME_PROCESSING_PAUSE,
  78. on_realtime_transcription_update=None,
  79. on_realtime_transcription_stabilized=None,
  80. # Voice activation parameters
  81. silero_sensitivity: float = INIT_SILERO_SENSITIVITY,
  82. silero_use_onnx: bool = False,
  83. webrtc_sensitivity: int = INIT_WEBRTC_SENSITIVITY,
  84. post_speech_silence_duration: float = (
  85. INIT_POST_SPEECH_SILENCE_DURATION
  86. ),
  87. min_length_of_recording: float = (
  88. INIT_MIN_LENGTH_OF_RECORDING
  89. ),
  90. min_gap_between_recordings: float = (
  91. INIT_MIN_GAP_BETWEEN_RECORDINGS
  92. ),
  93. pre_recording_buffer_duration: float = (
  94. INIT_PRE_RECORDING_BUFFER_DURATION
  95. ),
  96. on_vad_detect_start=None,
  97. on_vad_detect_stop=None,
  98. # Wake word parameters
  99. wake_words: str = "",
  100. wake_words_sensitivity: float = INIT_WAKE_WORDS_SENSITIVITY,
  101. wake_word_activation_delay: float = (
  102. INIT_WAKE_WORD_ACTIVATION_DELAY
  103. ),
  104. wake_word_timeout: float = INIT_WAKE_WORD_TIMEOUT,
  105. on_wakeword_detected=None,
  106. on_wakeword_timeout=None,
  107. on_wakeword_detection_start=None,
  108. on_wakeword_detection_end=None,
  109. ):
  110. """
  111. Initializes an audio recorder and transcription
  112. and wake word detection.
  113. Args:
  114. - model (str, default="tiny"): Specifies the size of the transcription
  115. model to use or the path to a converted model directory.
  116. Valid options are 'tiny', 'tiny.en', 'base', 'base.en',
  117. 'small', 'small.en', 'medium', 'medium.en', 'large-v1',
  118. 'large-v2'.
  119. If a specific size is provided, the model is downloaded
  120. from the Hugging Face Hub.
  121. - language (str, default=""): Language code for speech-to-text engine.
  122. If not specified, the model will attempt to detect the language
  123. automatically.
  124. - on_recording_start (callable, default=None): Callback function to be
  125. called when recording of audio to be transcripted starts.
  126. - on_recording_stop (callable, default=None): Callback function to be
  127. called when recording of audio to be transcripted stops.
  128. - on_transcription_start (callable, default=None): Callback function
  129. to be called when transcription of audio to text starts.
  130. - ensure_sentence_starting_uppercase (bool, default=True): Ensures
  131. that every sentence detected by the algorithm starts with an
  132. uppercase letter.
  133. - ensure_sentence_ends_with_period (bool, default=True): Ensures that
  134. every sentence that doesn't end with punctuation such as "?", "!"
  135. ends with a period
  136. - use_microphone (bool, default=True): Specifies whether to use the
  137. microphone as the audio input source. If set to False, the
  138. audio input source will be the audio data sent through the
  139. feed_audio() method.
  140. - spinner (bool, default=True): Show spinner animation with current
  141. state.
  142. - level (int, default=logging.WARNING): Logging level.
  143. - enable_realtime_transcription (bool, default=False): Enables or
  144. disables real-time transcription of audio. When set to True, the
  145. audio will be transcribed continuously as it is being recorded.
  146. - realtime_model_type (str, default="tiny"): Specifies the machine
  147. learning model to be used for real-time transcription. Valid
  148. options include 'tiny', 'tiny.en', 'base', 'base.en', 'small',
  149. 'small.en', 'medium', 'medium.en', 'large-v1', 'large-v2'.
  150. - realtime_processing_pause (float, default=0.1): Specifies the time
  151. interval in seconds after a chunk of audio gets transcribed. Lower
  152. values will result in more "real-time" (frequent) transcription
  153. updates but may increase computational load.
  154. - on_realtime_transcription_update = A callback function that is
  155. triggered whenever there's an update in the real-time
  156. transcription. The function is called with the newly transcribed
  157. text as its argument.
  158. - on_realtime_transcription_stabilized = A callback function that is
  159. triggered when the transcribed text stabilizes in quality. The
  160. stabilized text is generally more accurate but may arrive with a
  161. slight delay compared to the regular real-time updates.
  162. - silero_sensitivity (float, default=SILERO_SENSITIVITY): Sensitivity
  163. for the Silero Voice Activity Detection model ranging from 0
  164. (least sensitive) to 1 (most sensitive). Default is 0.5.
  165. - silero_use_onnx (bool, default=False): Enables usage of the
  166. pre-trained model from Silero in the ONNX (Open Neural Network
  167. Exchange) format instead of the PyTorch format. This is
  168. recommended for faster performance.
  169. - webrtc_sensitivity (int, default=WEBRTC_SENSITIVITY): Sensitivity
  170. for the WebRTC Voice Activity Detection engine ranging from 0
  171. (least aggressive / most sensitive) to 3 (most aggressive,
  172. least sensitive). Default is 3.
  173. - post_speech_silence_duration (float, default=0.2): Duration in
  174. seconds of silence that must follow speech before the recording
  175. is considered to be completed. This ensures that any brief
  176. pauses during speech don't prematurely end the recording.
  177. - min_gap_between_recordings (float, default=1.0): Specifies the
  178. minimum time interval in seconds that should exist between the
  179. end of one recording session and the beginning of another to
  180. prevent rapid consecutive recordings.
  181. - min_length_of_recording (float, default=1.0): Specifies the minimum
  182. duration in seconds that a recording session should last to ensure
  183. meaningful audio capture, preventing excessively short or
  184. fragmented recordings.
  185. - pre_recording_buffer_duration (float, default=0.2): Duration in
  186. seconds for the audio buffer to maintain pre-roll audio
  187. (compensates speech activity detection latency)
  188. - on_vad_detect_start (callable, default=None): Callback function to
  189. be called when the system listens for voice activity.
  190. - on_vad_detect_stop (callable, default=None): Callback function to be
  191. called when the system stops listening for voice activity.
  192. - wake_words (str, default=""): Comma-separated string of wake words to
  193. initiate recording. Supported wake words include:
  194. 'alexa', 'americano', 'blueberry', 'bumblebee', 'computer',
  195. 'grapefruits', 'grasshopper', 'hey google', 'hey siri', 'jarvis',
  196. 'ok google', 'picovoice', 'porcupine', 'terminator'.
  197. - wake_words_sensitivity (float, default=0.5): Sensitivity for wake
  198. word detection, ranging from 0 (least sensitive) to 1 (most
  199. sensitive). Default is 0.5.
  200. - wake_word_activation_delay (float, default=0): Duration in seconds
  201. after the start of monitoring before the system switches to wake
  202. word activation if no voice is initially detected. If set to
  203. zero, the system uses wake word activation immediately.
  204. - wake_word_timeout (float, default=5): Duration in seconds after a
  205. wake word is recognized. If no subsequent voice activity is
  206. detected within this window, the system transitions back to an
  207. inactive state, awaiting the next wake word or voice activation.
  208. - on_wakeword_detected (callable, default=None): Callback function to
  209. be called when a wake word is detected.
  210. - on_wakeword_timeout (callable, default=None): Callback function to
  211. be called when the system goes back to an inactive state after when
  212. no speech was detected after wake word activation
  213. - on_wakeword_detection_start (callable, default=None): Callback
  214. function to be called when the system starts to listen for wake
  215. words
  216. - on_wakeword_detection_end (callable, default=None): Callback
  217. function to be called when the system stops to listen for
  218. wake words (e.g. because of timeout or wake word detected)
  219. Raises:
  220. Exception: Errors related to initializing transcription
  221. model, wake word detection, or audio recording.
  222. """
  223. self.language = language
  224. self.wake_words = wake_words
  225. self.wake_word_activation_delay = wake_word_activation_delay
  226. self.wake_word_timeout = wake_word_timeout
  227. self.ensure_sentence_starting_uppercase = (
  228. ensure_sentence_starting_uppercase
  229. )
  230. self.ensure_sentence_ends_with_period = (
  231. ensure_sentence_ends_with_period
  232. )
  233. self.min_gap_between_recordings = min_gap_between_recordings
  234. self.min_length_of_recording = min_length_of_recording
  235. self.pre_recording_buffer_duration = pre_recording_buffer_duration
  236. self.post_speech_silence_duration = post_speech_silence_duration
  237. self.on_recording_start = on_recording_start
  238. self.on_recording_stop = on_recording_stop
  239. self.on_wakeword_detected = on_wakeword_detected
  240. self.on_wakeword_timeout = on_wakeword_timeout
  241. self.on_vad_detect_start = on_vad_detect_start
  242. self.on_vad_detect_stop = on_vad_detect_stop
  243. self.on_wakeword_detection_start = on_wakeword_detection_start
  244. self.on_wakeword_detection_end = on_wakeword_detection_end
  245. self.on_transcription_start = on_transcription_start
  246. self.enable_realtime_transcription = enable_realtime_transcription
  247. self.realtime_model_type = realtime_model_type
  248. self.realtime_processing_pause = realtime_processing_pause
  249. self.on_realtime_transcription_update = (
  250. on_realtime_transcription_update
  251. )
  252. self.on_realtime_transcription_stabilized = (
  253. on_realtime_transcription_stabilized
  254. )
  255. self.allowed_latency_limit = ALLOWED_LATENCY_LIMIT
  256. self.level = level
  257. manager = Manager()
  258. self.audio_queue = manager.Queue()
  259. self.buffer_size = BUFFER_SIZE
  260. self.sample_rate = SAMPLE_RATE
  261. self.recording_start_time = 0
  262. self.recording_stop_time = 0
  263. self.wake_word_detect_time = 0
  264. self.silero_check_time = 0
  265. self.silero_working = False
  266. self.speech_end_silence_start = 0
  267. self.silero_sensitivity = silero_sensitivity
  268. self.listen_start = 0
  269. self.spinner = spinner
  270. self.halo = None
  271. self.state = "inactive"
  272. self.wakeword_detected = False
  273. self.text_storage = []
  274. self.realtime_stabilized_text = ""
  275. self.realtime_stabilized_safetext = ""
  276. self.is_webrtc_speech_active = False
  277. self.is_silero_speech_active = False
  278. self.recording_thread = None
  279. self.realtime_thread = None
  280. self.audio_interface = None
  281. self.audio = None
  282. self.stream = None
  283. self.start_recording_event = threading.Event()
  284. self.stop_recording_event = threading.Event()
  285. # Initialize the logging configuration with the specified level
  286. log_format = 'RealTimeSTT: %(name)s - %(levelname)s - %(message)s'
  287. # Create a logger
  288. logger = logging.getLogger()
  289. logger.setLevel(level) # Set the root logger's level
  290. # Create a file handler and set its level
  291. file_handler = logging.FileHandler('realtimesst.log')
  292. file_handler.setLevel(logging.DEBUG)
  293. file_handler.setFormatter(logging.Formatter(log_format))
  294. # Create a console handler and set its level
  295. console_handler = logging.StreamHandler()
  296. console_handler.setLevel(level)
  297. console_handler.setFormatter(logging.Formatter(log_format))
  298. # Add the handlers to the logger
  299. logger.addHandler(file_handler)
  300. logger.addHandler(console_handler)
  301. self.is_shut_down = False
  302. self.shutdown_event = Event()
  303. logging.info("Starting RealTimeSTT")
  304. # Start transcription process
  305. self.interrupt_stop_event = Event()
  306. self.main_transcription_ready_event = Event()
  307. self.parent_transcription_pipe, child_transcription_pipe = Pipe()
  308. self.transcript_process = Process(
  309. target=AudioToTextRecorder._transcription_worker,
  310. args=(
  311. child_transcription_pipe,
  312. model,
  313. self.main_transcription_ready_event,
  314. self.shutdown_event,
  315. self.interrupt_stop_event
  316. )
  317. )
  318. self.transcript_process.start()
  319. # Start audio data reading process
  320. if use_microphone:
  321. self.reader_process = Process(
  322. target=AudioToTextRecorder._audio_data_worker,
  323. args=(
  324. self.audio_queue,
  325. self.sample_rate,
  326. self.buffer_size,
  327. self.shutdown_event,
  328. self.interrupt_stop_event
  329. )
  330. )
  331. self.reader_process.start()
  332. # Initialize the realtime transcription model
  333. if self.enable_realtime_transcription:
  334. try:
  335. logging.info("Initializing faster_whisper realtime "
  336. f"transcription model {self.realtime_model_type}"
  337. )
  338. self.realtime_model_type = faster_whisper.WhisperModel(
  339. model_size_or_path=self.realtime_model_type,
  340. device='cuda' if torch.cuda.is_available() else 'cpu'
  341. )
  342. except Exception as e:
  343. logging.exception("Error initializing faster_whisper "
  344. f"realtime transcription model: {e}"
  345. )
  346. raise
  347. logging.debug("Faster_whisper realtime speech to text "
  348. "transcription model initialized successfully")
  349. # Setup wake word detection
  350. if wake_words:
  351. self.wake_words_list = [
  352. word.strip() for word in wake_words.lower().split(',')
  353. ]
  354. sensitivity_list = [
  355. float(wake_words_sensitivity)
  356. for _ in range(len(self.wake_words_list))
  357. ]
  358. try:
  359. self.porcupine = pvporcupine.create(
  360. keywords=self.wake_words_list,
  361. sensitivities=sensitivity_list
  362. )
  363. self.buffer_size = self.porcupine.frame_length
  364. self.sample_rate = self.porcupine.sample_rate
  365. except Exception as e:
  366. logging.exception("Error initializing porcupine "
  367. f"wake word detection engine: {e}"
  368. )
  369. raise
  370. logging.debug("Porcupine wake word detection "
  371. "engine initialized successfully"
  372. )
  373. # Setup voice activity detection model WebRTC
  374. try:
  375. logging.info("Initializing WebRTC voice with "
  376. f"Sensitivity {webrtc_sensitivity}"
  377. )
  378. self.webrtc_vad_model = webrtcvad.Vad()
  379. self.webrtc_vad_model.set_mode(webrtc_sensitivity)
  380. except Exception as e:
  381. logging.exception("Error initializing WebRTC voice "
  382. f"activity detection engine: {e}"
  383. )
  384. raise
  385. logging.debug("WebRTC VAD voice activity detection "
  386. "engine initialized successfully"
  387. )
  388. # Setup voice activity detection model Silero VAD
  389. try:
  390. self.silero_vad_model, _ = torch.hub.load(
  391. repo_or_dir="snakers4/silero-vad",
  392. model="silero_vad",
  393. verbose=False,
  394. onnx=silero_use_onnx
  395. )
  396. except Exception as e:
  397. logging.exception(f"Error initializing Silero VAD "
  398. f"voice activity detection engine: {e}"
  399. )
  400. raise
  401. logging.debug("Silero VAD voice activity detection "
  402. "engine initialized successfully"
  403. )
  404. self.audio_buffer = collections.deque(
  405. maxlen=int((self.sample_rate // self.buffer_size) *
  406. self.pre_recording_buffer_duration)
  407. )
  408. self.frames = []
  409. # Recording control flags
  410. self.is_recording = False
  411. self.is_running = True
  412. self.start_recording_on_voice_activity = False
  413. self.stop_recording_on_voice_deactivity = False
  414. # Start the recording worker thread
  415. self.recording_thread = threading.Thread(target=self._recording_worker)
  416. self.recording_thread.daemon = True
  417. self.recording_thread.start()
  418. # Start the realtime transcription worker thread
  419. self.realtime_thread = threading.Thread(target=self._realtime_worker)
  420. self.realtime_thread.daemon = True
  421. self.realtime_thread.start()
  422. # Wait for transcription models to start
  423. logging.debug('Waiting for main transcription model to start')
  424. self.main_transcription_ready_event.wait()
  425. logging.debug('Main transcription model ready')
  426. logging.debug('RealtimeSTT initialization completed successfully')
  427. print(f"buffer_size: {self.buffer_size}")
  428. print(f"samplerate: {self.sample_rate}")
  429. @staticmethod
  430. def _transcription_worker(conn,
  431. model_path,
  432. ready_event,
  433. shutdown_event,
  434. interrupt_stop_event):
  435. """
  436. Worker method that handles the continuous
  437. process of transcribing audio data.
  438. This method runs in a separate process and is responsible for:
  439. - Initializing the `faster_whisper` model used for transcription.
  440. - Receiving audio data sent through a pipe and using the model
  441. to transcribe it.
  442. - Sending transcription results back through the pipe.
  443. - Continuously checking for a shutdown event to gracefully
  444. terminate the transcription process.
  445. Args:
  446. conn (multiprocessing.Connection): The connection endpoint used
  447. for receiving audio data and sending transcription results.
  448. model_path (str): The path to the pre-trained faster_whisper model
  449. for transcription.
  450. ready_event (threading.Event): An event that is set when the
  451. transcription model is successfully initialized and ready.
  452. shutdown_event (threading.Event): An event that, when set,
  453. signals this worker method to terminate.
  454. Raises:
  455. Exception: If there is an error while initializing the
  456. transcription model.
  457. """
  458. logging.info("Initializing faster_whisper "
  459. f"main transcription model {model_path}"
  460. )
  461. try:
  462. model = faster_whisper.WhisperModel(
  463. model_size_or_path=model_path,
  464. device='cuda' if torch.cuda.is_available() else 'cpu'
  465. )
  466. except Exception as e:
  467. logging.exception("Error initializing main "
  468. f"faster_whisper transcription model: {e}"
  469. )
  470. raise
  471. ready_event.set()
  472. logging.debug("Faster_whisper main speech to text "
  473. "transcription model initialized successfully"
  474. )
  475. while not shutdown_event.is_set():
  476. try:
  477. if conn.poll(0.5):
  478. audio, language = conn.recv()
  479. try:
  480. segments = model.transcribe(
  481. audio, language=language if language else None
  482. )
  483. segments = segments[0]
  484. transcription = " ".join(seg.text for seg in segments)
  485. transcription = transcription.strip()
  486. conn.send(('success', transcription))
  487. except faster_whisper.WhisperError as e:
  488. logging.error(f"Whisper transcription error: {e}")
  489. conn.send(('error', str(e)))
  490. except Exception as e:
  491. logging.error(f"General transcription error: {e}")
  492. conn.send(('error', str(e)))
  493. else:
  494. # If there's no data, sleep / prevent busy waiting
  495. time.sleep(0.02)
  496. except KeyboardInterrupt:
  497. interrupt_stop_event.set()
  498. logging.debug("Transcription worker process "
  499. "finished due to KeyboardInterrupt"
  500. )
  501. break
  502. @staticmethod
  503. def _audio_data_worker(audio_queue,
  504. sample_rate,
  505. buffer_size,
  506. shutdown_event,
  507. interrupt_stop_event):
  508. """
  509. Worker method that handles the audio recording process.
  510. This method runs in a separate process and is responsible for:
  511. - Setting up the audio input stream for recording.
  512. - Continuously reading audio data from the input stream
  513. and placing it in a queue.
  514. - Handling errors during the recording process, including
  515. input overflow.
  516. - Gracefully terminating the recording process when a shutdown
  517. event is set.
  518. Args:
  519. audio_queue (queue.Queue): A queue where recorded audio
  520. data is placed.
  521. sample_rate (int): The sample rate of the audio input stream.
  522. buffer_size (int): The size of the buffer used in the audio
  523. input stream.
  524. shutdown_event (threading.Event): An event that, when set, signals
  525. this worker method to terminate.
  526. Raises:
  527. Exception: If there is an error while initializing the audio
  528. recording.
  529. """
  530. logging.info("Initializing audio recording "
  531. "(creating pyAudio input stream)"
  532. )
  533. try:
  534. audio_interface = pyaudio.PyAudio()
  535. stream = audio_interface.open(rate=sample_rate,
  536. format=pyaudio.paInt16,
  537. channels=1,
  538. input=True,
  539. frames_per_buffer=buffer_size
  540. )
  541. except Exception as e:
  542. logging.exception("Error initializing pyaudio "
  543. f"audio recording: {e}"
  544. )
  545. raise
  546. logging.debug("Audio recording (pyAudio input "
  547. "stream) initialized successfully"
  548. )
  549. try:
  550. while not shutdown_event.is_set():
  551. try:
  552. data = stream.read(buffer_size)
  553. except OSError as e:
  554. if e.errno == pyaudio.paInputOverflowed:
  555. logging.warning("Input overflowed. Frame dropped.")
  556. else:
  557. logging.error(f"Error during recording: {e}")
  558. tb_str = traceback.format_exc()
  559. print(f"Traceback: {tb_str}")
  560. print(f"Error: {e}")
  561. continue
  562. except Exception as e:
  563. logging.error(f"Error during recording: {e}")
  564. tb_str = traceback.format_exc()
  565. print(f"Traceback: {tb_str}")
  566. print(f"Error: {e}")
  567. continue
  568. audio_queue.put(data)
  569. except KeyboardInterrupt:
  570. interrupt_stop_event.set()
  571. logging.debug("Audio data worker process "
  572. "finished due to KeyboardInterrupt"
  573. )
  574. finally:
  575. stream.stop_stream()
  576. stream.close()
  577. audio_interface.terminate()
  578. def wait_audio(self):
  579. """
  580. Waits for the start and completion of the audio recording process.
  581. This method is responsible for:
  582. - Waiting for voice activity to begin recording if not yet started.
  583. - Waiting for voice inactivity to complete the recording.
  584. - Setting the audio buffer from the recorded frames.
  585. - Resetting recording-related attributes.
  586. Side effects:
  587. - Updates the state of the instance.
  588. - Modifies the audio attribute to contain the processed audio data.
  589. """
  590. self.listen_start = time.time()
  591. # If not yet started recording, wait for voice activity to initiate.
  592. if not self.is_recording and not self.frames:
  593. self._set_state("listening")
  594. self.start_recording_on_voice_activity = True
  595. # Wait until recording starts
  596. while not self.interrupt_stop_event.is_set():
  597. if self.start_recording_event.wait(timeout=0.5):
  598. break
  599. # If recording is ongoing, wait for voice inactivity
  600. # to finish recording.
  601. if self.is_recording:
  602. self.stop_recording_on_voice_deactivity = True
  603. # Wait until recording stops
  604. while not self.interrupt_stop_event.is_set():
  605. if (self.stop_recording_event.wait(timeout=0.5)):
  606. break
  607. # Convert recorded frames to the appropriate audio format.
  608. audio_array = np.frombuffer(b''.join(self.frames), dtype=np.int16)
  609. self.audio = audio_array.astype(np.float32) / INT16_MAX_ABS_VALUE
  610. self.frames.clear()
  611. # Reset recording-related timestamps
  612. self.recording_stop_time = 0
  613. self.listen_start = 0
  614. self._set_state("inactive")
  615. def transcribe(self):
  616. """
  617. Transcribes audio captured by this class instance using the
  618. `faster_whisper` model.
  619. Automatically starts recording upon voice activity if not manually
  620. started using `recorder.start()`.
  621. Automatically stops recording upon voice deactivity if not manually
  622. stopped with `recorder.stop()`.
  623. Processes the recorded audio to generate transcription.
  624. Args:
  625. on_transcription_finished (callable, optional): Callback function
  626. to be executed when transcription is ready.
  627. If provided, transcription will be performed asynchronously,
  628. and the callback will receive the transcription as its argument.
  629. If omitted, the transcription will be performed synchronously,
  630. and the result will be returned.
  631. Returns (if no callback is set):
  632. str: The transcription of the recorded audio.
  633. Raises:
  634. Exception: If there is an error during the transcription process.
  635. """
  636. self._set_state("transcribing")
  637. self.parent_transcription_pipe.send((self.audio, self.language))
  638. status, result = self.parent_transcription_pipe.recv()
  639. self._set_state("inactive")
  640. if status == 'success':
  641. return self._preprocess_output(result)
  642. else:
  643. logging.error(result)
  644. raise Exception(result)
  645. def text(self,
  646. on_transcription_finished=None,
  647. ):
  648. """
  649. Transcribes audio captured by this class instance
  650. using the `faster_whisper` model.
  651. - Automatically starts recording upon voice activity if not manually
  652. started using `recorder.start()`.
  653. - Automatically stops recording upon voice deactivity if not manually
  654. stopped with `recorder.stop()`.
  655. - Processes the recorded audio to generate transcription.
  656. Args:
  657. on_transcription_finished (callable, optional): Callback function
  658. to be executed when transcription is ready.
  659. If provided, transcription will be performed asynchronously, and
  660. the callback will receive the transcription as its argument.
  661. If omitted, the transcription will be performed synchronously,
  662. and the result will be returned.
  663. Returns (if not callback is set):
  664. str: The transcription of the recorded audio
  665. """
  666. self.wait_audio()
  667. if self.is_shut_down or self.interrupt_stop_event.is_set():
  668. return ""
  669. if on_transcription_finished:
  670. threading.Thread(target=on_transcription_finished,
  671. args=(self.transcribe(),)).start()
  672. else:
  673. return self.transcribe()
  674. def start(self):
  675. """
  676. Starts recording audio directly without waiting for voice activity.
  677. """
  678. # Ensure there's a minimum interval
  679. # between stopping and starting recording
  680. if (time.time() - self.recording_stop_time
  681. < self.min_gap_between_recordings):
  682. logging.info("Attempted to start recording "
  683. "too soon after stopping."
  684. )
  685. return self
  686. logging.info("recording started")
  687. self._set_state("recording")
  688. self.text_storage = []
  689. self.realtime_stabilized_text = ""
  690. self.realtime_stabilized_safetext = ""
  691. self.wakeword_detected = False
  692. self.wake_word_detect_time = 0
  693. self.frames = []
  694. self.is_recording = True
  695. self.recording_start_time = time.time()
  696. self.is_silero_speech_active = False
  697. self.is_webrtc_speech_active = False
  698. self.stop_recording_event.clear()
  699. self.start_recording_event.set()
  700. if self.on_recording_start:
  701. self.on_recording_start()
  702. return self
  703. def stop(self):
  704. """
  705. Stops recording audio.
  706. """
  707. # Ensure there's a minimum interval
  708. # between starting and stopping recording
  709. if (time.time() - self.recording_start_time
  710. < self.min_length_of_recording):
  711. logging.info("Attempted to stop recording "
  712. "too soon after starting."
  713. )
  714. return self
  715. logging.info("recording stopped")
  716. self.is_recording = False
  717. self.recording_stop_time = time.time()
  718. self.is_silero_speech_active = False
  719. self.is_webrtc_speech_active = False
  720. self.silero_check_time = 0
  721. self.start_recording_event.clear()
  722. self.stop_recording_event.set()
  723. if self.on_recording_stop:
  724. self.on_recording_stop()
  725. return self
  726. def feed_audio(self, chunk):
  727. """
  728. Feed an audio chunk into the processing pipeline. Chunks are
  729. accumulated until the buffer size is reached, and then the accumulated
  730. data is fed into the audio_queue.
  731. """
  732. # Check if the buffer attribute exists, if not, initialize it
  733. if not hasattr(self, 'buffer'):
  734. self.buffer = bytearray()
  735. # Append the chunk to the buffer
  736. self.buffer += chunk
  737. buf_size = 2 * self.buffer_size # silero complains if too short
  738. # Check if the buffer has reached or exceeded the buffer_size
  739. while len(self.buffer) >= buf_size:
  740. # Extract self.buffer_size amount of data from the buffer
  741. to_process = self.buffer[:buf_size]
  742. self.buffer = self.buffer[buf_size:]
  743. # Feed the extracted data to the audio_queue
  744. self.audio_queue.put(to_process)
  745. def shutdown(self):
  746. """
  747. Safely shuts down the audio recording by stopping the
  748. recording worker and closing the audio stream.
  749. """
  750. # Force wait_audio() and text() to exit
  751. self.is_shut_down = True
  752. self.start_recording_event.set()
  753. self.stop_recording_event.set()
  754. self.shutdown_event.set()
  755. self.is_recording = False
  756. self.is_running = False
  757. logging.debug('Finishing recording thread')
  758. if self.recording_thread:
  759. self.recording_thread.join()
  760. logging.debug('Terminating reader process')
  761. # Give it some time to finish the loop and cleanup.
  762. self.reader_process.join(timeout=10)
  763. if self.reader_process.is_alive():
  764. logging.warning("Reader process did not terminate "
  765. "in time. Terminating forcefully."
  766. )
  767. self.reader_process.terminate()
  768. logging.debug('Terminating transcription process')
  769. self.transcript_process.join(timeout=10)
  770. if self.transcript_process.is_alive():
  771. logging.warning("Transcript process did not terminate "
  772. "in time. Terminating forcefully."
  773. )
  774. self.transcript_process.terminate()
  775. self.parent_transcription_pipe.close()
  776. logging.debug('Finishing realtime thread')
  777. if self.realtime_thread:
  778. self.realtime_thread.join()
  779. def _recording_worker(self):
  780. """
  781. The main worker method which constantly monitors the audio
  782. input for voice activity and accordingly starts/stops the recording.
  783. """
  784. logging.debug('Starting recording worker')
  785. try:
  786. was_recording = False
  787. delay_was_passed = False
  788. # Continuously monitor audio for voice activity
  789. while self.is_running:
  790. try:
  791. data = self.audio_queue.get()
  792. # Handle queue overflow
  793. queue_overflow_logged = False
  794. while (self.audio_queue.qsize() >
  795. self.allowed_latency_limit):
  796. if not queue_overflow_logged:
  797. logging.warning("Audio queue size exceeds latency "
  798. "limit. Current size: "
  799. f"{self.audio_queue.qsize()}. "
  800. "Discarding old audio chunks."
  801. )
  802. queue_overflow_logged = True
  803. data = self.audio_queue.get()
  804. except BrokenPipeError:
  805. print("BrokenPipeError _recording_worker")
  806. self.is_running = False
  807. break
  808. if not self.is_recording:
  809. # Handle not recording state
  810. time_since_listen_start = (time.time() - self.listen_start
  811. if self.listen_start else 0)
  812. wake_word_activation_delay_passed = (
  813. time_since_listen_start >
  814. self.wake_word_activation_delay
  815. )
  816. # Handle wake-word timeout callback
  817. if wake_word_activation_delay_passed \
  818. and not delay_was_passed:
  819. if self.wake_words and self.wake_word_activation_delay:
  820. if self.on_wakeword_timeout:
  821. self.on_wakeword_timeout()
  822. delay_was_passed = wake_word_activation_delay_passed
  823. # Set state and spinner text
  824. if not self.recording_stop_time:
  825. if self.wake_words \
  826. and wake_word_activation_delay_passed \
  827. and not self.wakeword_detected:
  828. self._set_state("wakeword")
  829. else:
  830. if self.listen_start:
  831. self._set_state("listening")
  832. else:
  833. self._set_state("inactive")
  834. # Detect wake words if applicable
  835. if self.wake_words and wake_word_activation_delay_passed:
  836. try:
  837. pcm = struct.unpack_from(
  838. "h" * self.buffer_size,
  839. data
  840. )
  841. wakeword_index = self.porcupine.process(pcm)
  842. except struct.error:
  843. logging.error("Error unpacking audio data "
  844. "for wake word processing.")
  845. continue
  846. except Exception as e:
  847. logging.error(f"Wake word processing error: {e}")
  848. continue
  849. # If a wake word is detected
  850. if wakeword_index >= 0:
  851. # Removing the wake word from the recording
  852. samples_for_0_1_sec = int(self.sample_rate * 0.1)
  853. start_index = max(
  854. 0,
  855. len(self.audio_buffer) - samples_for_0_1_sec
  856. )
  857. temp_samples = collections.deque(
  858. itertools.islice(
  859. self.audio_buffer,
  860. start_index,
  861. None)
  862. )
  863. self.audio_buffer.clear()
  864. self.audio_buffer.extend(temp_samples)
  865. self.wake_word_detect_time = time.time()
  866. self.wakeword_detected = True
  867. if self.on_wakeword_detected:
  868. self.on_wakeword_detected()
  869. # Check for voice activity to
  870. # trigger the start of recording
  871. if ((not self.wake_words
  872. or not wake_word_activation_delay_passed)
  873. and self.start_recording_on_voice_activity) \
  874. or self.wakeword_detected:
  875. if self._is_voice_active():
  876. logging.info("voice activity detected")
  877. self.start()
  878. if self.is_recording:
  879. self.start_recording_on_voice_activity = False
  880. # Add the buffered audio
  881. # to the recording frames
  882. self.frames.extend(list(self.audio_buffer))
  883. self.audio_buffer.clear()
  884. self.silero_vad_model.reset_states()
  885. else:
  886. data_copy = data[:]
  887. self._check_voice_activity(data_copy)
  888. self.speech_end_silence_start = 0
  889. else:
  890. # If we are currently recording
  891. # Stop the recording if silence is detected after speech
  892. if self.stop_recording_on_voice_deactivity:
  893. if not self._is_webrtc_speech(data, True):
  894. # Voice deactivity was detected, so we start
  895. # measuring silence time before stopping recording
  896. if self.speech_end_silence_start == 0:
  897. self.speech_end_silence_start = time.time()
  898. else:
  899. self.speech_end_silence_start = 0
  900. # Wait for silence to stop recording after speech
  901. if self.speech_end_silence_start and time.time() - \
  902. self.speech_end_silence_start > \
  903. self.post_speech_silence_duration:
  904. logging.info("voice deactivity detected")
  905. self.stop()
  906. if not self.is_recording and was_recording:
  907. # Reset after stopping recording to ensure clean state
  908. self.stop_recording_on_voice_deactivity = False
  909. if time.time() - self.silero_check_time > 0.1:
  910. self.silero_check_time = 0
  911. # Handle wake word timeout (waited to long initiating
  912. # speech after wake word detection)
  913. if self.wake_word_detect_time and time.time() - \
  914. self.wake_word_detect_time > self.wake_word_timeout:
  915. self.wake_word_detect_time = 0
  916. if self.wakeword_detected and self.on_wakeword_timeout:
  917. self.on_wakeword_timeout()
  918. self.wakeword_detected = False
  919. was_recording = self.is_recording
  920. if self.is_recording:
  921. self.frames.append(data)
  922. if not self.is_recording or self.speech_end_silence_start:
  923. self.audio_buffer.append(data)
  924. except Exception as e:
  925. if not self.interrupt_stop_event.is_set():
  926. logging.error(f"Unhandled exeption in _recording_worker: {e}")
  927. raise
  928. def _realtime_worker(self):
  929. """
  930. Performs real-time transcription if the feature is enabled.
  931. The method is responsible transcribing recorded audio frames
  932. in real-time based on the specified resolution interval.
  933. The transcribed text is stored in `self.realtime_transcription_text`
  934. and a callback
  935. function is invoked with this text if specified.
  936. """
  937. try:
  938. logging.debug('Starting realtime worker')
  939. # Return immediately if real-time transcription is not enabled
  940. if not self.enable_realtime_transcription:
  941. return
  942. # Continue running as long as the main process is active
  943. while self.is_running:
  944. # Check if the recording is active
  945. if self.is_recording:
  946. # Sleep for the duration of the transcription resolution
  947. time.sleep(self.realtime_processing_pause)
  948. # Convert the buffer frames to a NumPy array
  949. audio_array = np.frombuffer(
  950. b''.join(self.frames),
  951. dtype=np.int16
  952. )
  953. # Normalize the array to a [-1, 1] range
  954. audio_array = audio_array.astype(np.float32) / \
  955. INT16_MAX_ABS_VALUE
  956. # Perform transcription and assemble the text
  957. segments = self.realtime_model_type.transcribe(
  958. audio_array,
  959. language=self.language if self.language else None
  960. )
  961. # double check recording state
  962. # because it could have changed mid-transcription
  963. if self.is_recording and time.time() - \
  964. self.recording_start_time > 0.5:
  965. logging.debug('Starting realtime transcription')
  966. self.realtime_transcription_text = " ".join(
  967. seg.text for seg in segments[0]
  968. )
  969. self.realtime_transcription_text = \
  970. self.realtime_transcription_text.strip()
  971. self.text_storage.append(
  972. self.realtime_transcription_text
  973. )
  974. # Take the last two texts in storage, if they exist
  975. if len(self.text_storage) >= 2:
  976. last_two_texts = self.text_storage[-2:]
  977. # Find the longest common prefix
  978. # between the two texts
  979. prefix = os.path.commonprefix(
  980. [last_two_texts[0], last_two_texts[1]]
  981. )
  982. # This prefix is the text that was transcripted
  983. # two times in the same way
  984. # Store as "safely detected text"
  985. if len(prefix) >= \
  986. len(self.realtime_stabilized_safetext):
  987. # Only store when longer than the previous
  988. # as additional security
  989. self.realtime_stabilized_safetext = prefix
  990. # Find parts of the stabilized text
  991. # in the freshly transcripted text
  992. matching_pos = self._find_tail_match_in_text(
  993. self.realtime_stabilized_safetext,
  994. self.realtime_transcription_text
  995. )
  996. if matching_pos < 0:
  997. if self.realtime_stabilized_safetext:
  998. self._on_realtime_transcription_stabilized(
  999. self._preprocess_output(
  1000. self.realtime_stabilized_safetext,
  1001. True
  1002. )
  1003. )
  1004. else:
  1005. self._on_realtime_transcription_stabilized(
  1006. self._preprocess_output(
  1007. self.realtime_transcription_text,
  1008. True
  1009. )
  1010. )
  1011. else:
  1012. # We found parts of the stabilized text
  1013. # in the transcripted text
  1014. # We now take the stabilized text
  1015. # and add only the freshly transcripted part to it
  1016. output_text = self.realtime_stabilized_safetext + \
  1017. self.realtime_transcription_text[matching_pos:]
  1018. # This yields us the "left" text part as stabilized
  1019. # AND at the same time delivers fresh detected
  1020. # parts on the first run without the need for
  1021. # two transcriptions
  1022. self._on_realtime_transcription_stabilized(
  1023. self._preprocess_output(output_text, True)
  1024. )
  1025. # Invoke the callback with the transcribed text
  1026. self._on_realtime_transcription_update(
  1027. self._preprocess_output(
  1028. self.realtime_transcription_text,
  1029. True
  1030. )
  1031. )
  1032. # If not recording, sleep briefly before checking again
  1033. else:
  1034. time.sleep(TIME_SLEEP)
  1035. except Exception as e:
  1036. logging.error(f"Unhandled exeption in _realtime_worker: {e}")
  1037. raise
  1038. def _is_silero_speech(self, data):
  1039. """
  1040. Returns true if speech is detected in the provided audio data
  1041. Args:
  1042. data (bytes): raw bytes of audio data (1024 raw bytes with
  1043. 16000 sample rate and 16 bits per sample)
  1044. """
  1045. self.silero_working = True
  1046. audio_chunk = np.frombuffer(data, dtype=np.int16)
  1047. audio_chunk = audio_chunk.astype(np.float32) / INT16_MAX_ABS_VALUE
  1048. vad_prob = self.silero_vad_model(
  1049. torch.from_numpy(audio_chunk),
  1050. SAMPLE_RATE).item()
  1051. is_silero_speech_active = vad_prob > (1 - self.silero_sensitivity)
  1052. if is_silero_speech_active:
  1053. self.is_silero_speech_active = True
  1054. self.silero_working = False
  1055. return is_silero_speech_active
  1056. def _is_webrtc_speech(self, data, all_frames_must_be_true=False):
  1057. """
  1058. Returns true if speech is detected in the provided audio data
  1059. Args:
  1060. data (bytes): raw bytes of audio data (1024 raw bytes with
  1061. 16000 sample rate and 16 bits per sample)
  1062. """
  1063. # Number of audio frames per millisecond
  1064. frame_length = int(self.sample_rate * 0.01) # for 10ms frame
  1065. num_frames = int(len(data) / (2 * frame_length))
  1066. speech_frames = 0
  1067. for i in range(num_frames):
  1068. start_byte = i * frame_length * 2
  1069. end_byte = start_byte + frame_length * 2
  1070. frame = data[start_byte:end_byte]
  1071. if self.webrtc_vad_model.is_speech(frame, self.sample_rate):
  1072. speech_frames += 1
  1073. if not all_frames_must_be_true:
  1074. return True
  1075. if all_frames_must_be_true:
  1076. return speech_frames == num_frames
  1077. else:
  1078. return False
  1079. def _check_voice_activity(self, data):
  1080. """
  1081. Initiate check if voice is active based on the provided data.
  1082. Args:
  1083. data: The audio data to be checked for voice activity.
  1084. """
  1085. self.is_webrtc_speech_active = self._is_webrtc_speech(data)
  1086. # First quick performing check for voice activity using WebRTC
  1087. if self.is_webrtc_speech_active:
  1088. if not self.silero_working:
  1089. self.silero_working = True
  1090. # Run the intensive check in a separate thread
  1091. threading.Thread(
  1092. target=self._is_silero_speech,
  1093. args=(data,)).start()
  1094. def _is_voice_active(self):
  1095. """
  1096. Determine if voice is active.
  1097. Returns:
  1098. bool: True if voice is active, False otherwise.
  1099. """
  1100. return self.is_webrtc_speech_active and self.is_silero_speech_active
  1101. def _set_state(self, new_state):
  1102. """
  1103. Update the current state of the recorder and execute
  1104. corresponding state-change callbacks.
  1105. Args:
  1106. new_state (str): The new state to set.
  1107. """
  1108. # Check if the state has actually changed
  1109. if new_state == self.state:
  1110. return
  1111. # Store the current state for later comparison
  1112. old_state = self.state
  1113. # Update to the new state
  1114. self.state = new_state
  1115. # Execute callbacks based on transitioning FROM a particular state
  1116. if old_state == "listening":
  1117. if self.on_vad_detect_stop:
  1118. self.on_vad_detect_stop()
  1119. elif old_state == "wakeword":
  1120. if self.on_wakeword_detection_end:
  1121. self.on_wakeword_detection_end()
  1122. # Execute callbacks based on transitioning TO a particular state
  1123. if new_state == "listening":
  1124. if self.on_vad_detect_start:
  1125. self.on_vad_detect_start()
  1126. self._set_spinner("speak now")
  1127. if self.spinner and self.halo:
  1128. self.halo._interval = 250
  1129. elif new_state == "wakeword":
  1130. if self.on_wakeword_detection_start:
  1131. self.on_wakeword_detection_start()
  1132. self._set_spinner(f"say {self.wake_words}")
  1133. if self.spinner and self.halo:
  1134. self.halo._interval = 500
  1135. elif new_state == "transcribing":
  1136. if self.on_transcription_start:
  1137. self.on_transcription_start()
  1138. self._set_spinner("transcribing")
  1139. if self.spinner and self.halo:
  1140. self.halo._interval = 50
  1141. elif new_state == "recording":
  1142. self._set_spinner("recording")
  1143. if self.spinner and self.halo:
  1144. self.halo._interval = 100
  1145. elif new_state == "inactive":
  1146. if self.spinner and self.halo:
  1147. self.halo.stop()
  1148. self.halo = None
  1149. def _set_spinner(self, text):
  1150. """
  1151. Update the spinner's text or create a new
  1152. spinner with the provided text.
  1153. Args:
  1154. text (str): The text to be displayed alongside the spinner.
  1155. """
  1156. if self.spinner:
  1157. # If the Halo spinner doesn't exist, create and start it
  1158. if self.halo is None:
  1159. self.halo = halo.Halo(text=text)
  1160. self.halo.start()
  1161. # If the Halo spinner already exists, just update the text
  1162. else:
  1163. self.halo.text = text
  1164. def _preprocess_output(self, text, preview=False):
  1165. """
  1166. Preprocesses the output text by removing any leading or trailing
  1167. whitespace, converting all whitespace sequences to a single space
  1168. character, and capitalizing the first character of the text.
  1169. Args:
  1170. text (str): The text to be preprocessed.
  1171. Returns:
  1172. str: The preprocessed text.
  1173. """
  1174. text = re.sub(r'\s+', ' ', text.strip())
  1175. if self.ensure_sentence_starting_uppercase:
  1176. if text:
  1177. text = text[0].upper() + text[1:]
  1178. # Ensure the text ends with a proper punctuation
  1179. # if it ends with an alphanumeric character
  1180. if not preview:
  1181. if self.ensure_sentence_ends_with_period:
  1182. if text and text[-1].isalnum():
  1183. text += '.'
  1184. return text
  1185. def _find_tail_match_in_text(self, text1, text2, length_of_match=10):
  1186. """
  1187. Find the position where the last 'n' characters of text1
  1188. match with a substring in text2.
  1189. This method takes two texts, extracts the last 'n' characters from
  1190. text1 (where 'n' is determined by the variable 'length_of_match'), and
  1191. searches for an occurrence of this substring in text2, starting from
  1192. the end of text2 and moving towards the beginning.
  1193. Parameters:
  1194. - text1 (str): The text containing the substring that we want to find
  1195. in text2.
  1196. - text2 (str): The text in which we want to find the matching
  1197. substring.
  1198. - length_of_match(int): The length of the matching string that we are
  1199. looking for
  1200. Returns:
  1201. int: The position (0-based index) in text2 where the matching
  1202. substring starts. If no match is found or either of the texts is
  1203. too short, returns -1.
  1204. """
  1205. # Check if either of the texts is too short
  1206. if len(text1) < length_of_match or len(text2) < length_of_match:
  1207. return -1
  1208. # The end portion of the first text that we want to compare
  1209. target_substring = text1[-length_of_match:]
  1210. # Loop through text2 from right to left
  1211. for i in range(len(text2) - length_of_match + 1):
  1212. # Extract the substring from text2
  1213. # to compare with the target_substring
  1214. current_substring = text2[len(text2) - i - length_of_match:
  1215. len(text2) - i]
  1216. # Compare the current_substring with the target_substring
  1217. if current_substring == target_substring:
  1218. # Position in text2 where the match starts
  1219. return len(text2) - i
  1220. return -1
  1221. def _on_realtime_transcription_stabilized(self, text):
  1222. """
  1223. Callback method invoked when the real-time transcription stabilizes.
  1224. This method is called internally when the transcription text is
  1225. considered "stable" meaning it's less likely to change significantly
  1226. with additional audio input. It notifies any registered external
  1227. listener about the stabilized text if recording is still ongoing.
  1228. This is particularly useful for applications that need to display
  1229. live transcription results to users and want to highlight parts of the
  1230. transcription that are less likely to change.
  1231. Args:
  1232. text (str): The stabilized transcription text.
  1233. """
  1234. if self.on_realtime_transcription_stabilized:
  1235. if self.is_recording:
  1236. self.on_realtime_transcription_stabilized(text)
  1237. def _on_realtime_transcription_update(self, text):
  1238. """
  1239. Callback method invoked when there's an update in the real-time
  1240. transcription.
  1241. This method is called internally whenever there's a change in the
  1242. transcription text, notifying any registered external listener about
  1243. the update if recording is still ongoing. This provides a mechanism
  1244. for applications to receive and possibly display live transcription
  1245. updates, which could be partial and still subject to change.
  1246. Args:
  1247. text (str): The updated transcription text.
  1248. """
  1249. if self.on_realtime_transcription_update:
  1250. if self.is_recording:
  1251. self.on_realtime_transcription_update(text)
  1252. def __enter__(self):
  1253. """
  1254. Method to setup the context manager protocol.
  1255. This enables the instance to be used in a `with` statement, ensuring
  1256. proper resource management. When the `with` block is entered, this
  1257. method is automatically called.
  1258. Returns:
  1259. self: The current instance of the class.
  1260. """
  1261. return self
  1262. def __exit__(self, exc_type, exc_value, traceback):
  1263. """
  1264. Method to define behavior when the context manager protocol exits.
  1265. This is called when exiting the `with` block and ensures that any
  1266. necessary cleanup or resource release processes are executed, such as
  1267. shutting down the system properly.
  1268. Args:
  1269. exc_type (Exception or None): The type of the exception that
  1270. caused the context to be exited, if any.
  1271. exc_value (Exception or None): The exception instance that caused
  1272. the context to be exited, if any.
  1273. traceback (Traceback or None): The traceback corresponding to the
  1274. exception, if any.
  1275. """
  1276. self.shutdown()