audio_recorder.py 52 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159
  1. """
  2. The AudioToTextRecorder class in the provided code facilitates fast speech-to-text transcription.
  3. The class employs the faster_whisper library to transcribe the recorded audio
  4. into text using machine learning models, which can be run either on a GPU or CPU.
  5. Voice activity detection (VAD) is built in, meaning the software can automatically
  6. start or stop recording based on the presence or absence of speech.
  7. It integrates wake word detection through the pvporcupine library, allowing the
  8. software to initiate recording when a specific word or phrase is spoken.
  9. The system provides real-time feedback and can be further customized.
  10. Features:
  11. - Voice Activity Detection: Automatically starts/stops recording when speech is detected or when speech ends.
  12. - Wake Word Detection: Starts recording when a specified wake word (or words) is detected.
  13. - Event Callbacks: Customizable callbacks for when recording starts or finishes.
  14. - Fast Transcription: Returns the transcribed text from the audio as fast as possible.
  15. Author: Kolja Beigel
  16. """
  17. from multiprocessing import Process, Pipe, Queue, Event
  18. import faster_whisper
  19. import collections
  20. import numpy as np
  21. import pvporcupine
  22. import collections
  23. import traceback
  24. import threading
  25. import webrtcvad
  26. import itertools
  27. import pyaudio
  28. import logging
  29. import struct
  30. import torch
  31. import halo
  32. import time
  33. import os
  34. import re
  35. INIT_MODEL_TRANSCRIPTION = "tiny"
  36. INIT_MODEL_TRANSCRIPTION_REALTIME = "tiny"
  37. INIT_REALTIME_PROCESSING_PAUSE = 0.2
  38. INIT_SILERO_SENSITIVITY = 0.4
  39. INIT_WEBRTC_SENSITIVITY = 3
  40. INIT_POST_SPEECH_SILENCE_DURATION = 0.6
  41. INIT_MIN_LENGTH_OF_RECORDING = 0.5
  42. INIT_MIN_GAP_BETWEEN_RECORDINGS = 0
  43. INIT_WAKE_WORDS_SENSITIVITY = 0.6
  44. INIT_PRE_RECORDING_BUFFER_DURATION = 1.0
  45. INIT_WAKE_WORD_ACTIVATION_DELAY = 0.0
  46. INIT_WAKE_WORD_TIMEOUT = 5.0
  47. ALLOWED_LATENCY_LIMIT = 10
  48. TIME_SLEEP = 0.02
  49. SAMPLE_RATE = 16000
  50. BUFFER_SIZE = 512
  51. INT16_MAX_ABS_VALUE = 32768.0
  52. class AudioToTextRecorder:
  53. """
  54. A class responsible for capturing audio from the microphone, detecting voice activity, and then transcribing the captured audio using the `faster_whisper` model.
  55. """
  56. def __init__(self,
  57. model: str = INIT_MODEL_TRANSCRIPTION,
  58. language: str = "",
  59. on_recording_start = None,
  60. on_recording_stop = None,
  61. on_transcription_start = None,
  62. ensure_sentence_starting_uppercase = True,
  63. ensure_sentence_ends_with_period = True,
  64. spinner = True,
  65. level=logging.WARNING,
  66. # Realtime transcription parameters
  67. enable_realtime_transcription = False,
  68. realtime_model_type = INIT_MODEL_TRANSCRIPTION_REALTIME,
  69. realtime_processing_pause = INIT_REALTIME_PROCESSING_PAUSE,
  70. on_realtime_transcription_update = None,
  71. on_realtime_transcription_stabilized = None,
  72. # Voice activation parameters
  73. silero_sensitivity: float = INIT_SILERO_SENSITIVITY,
  74. silero_use_onnx: bool = False,
  75. webrtc_sensitivity: int = INIT_WEBRTC_SENSITIVITY,
  76. post_speech_silence_duration: float = INIT_POST_SPEECH_SILENCE_DURATION,
  77. min_length_of_recording: float = INIT_MIN_LENGTH_OF_RECORDING,
  78. min_gap_between_recordings: float = INIT_MIN_GAP_BETWEEN_RECORDINGS,
  79. pre_recording_buffer_duration: float = INIT_PRE_RECORDING_BUFFER_DURATION,
  80. on_vad_detect_start = None,
  81. on_vad_detect_stop = None,
  82. # Wake word parameters
  83. wake_words: str = "",
  84. wake_words_sensitivity: float = INIT_WAKE_WORDS_SENSITIVITY,
  85. wake_word_activation_delay: float = INIT_WAKE_WORD_ACTIVATION_DELAY,
  86. wake_word_timeout: float = INIT_WAKE_WORD_TIMEOUT,
  87. on_wakeword_detected = None,
  88. on_wakeword_timeout = None,
  89. on_wakeword_detection_start = None,
  90. on_wakeword_detection_end = None,
  91. ):
  92. """
  93. Initializes an audio recorder and transcription and wake word detection.
  94. Args:
  95. - model (str, default="tiny"): Specifies the size of the transcription model to use or the path to a converted model directory.
  96. Valid options are 'tiny', 'tiny.en', 'base', 'base.en', 'small', 'small.en', 'medium', 'medium.en', 'large-v1', 'large-v2'.
  97. If a specific size is provided, the model is downloaded from the Hugging Face Hub.
  98. - language (str, default=""): Language code for speech-to-text engine. If not specified, the model will attempt to detect the language automatically.
  99. - on_recording_start (callable, default=None): Callback function to be called when recording of audio to be transcripted starts.
  100. - on_recording_stop (callable, default=None): Callback function to be called when recording of audio to be transcripted stops.
  101. - on_transcription_start (callable, default=None): Callback function to be called when transcription of audio to text starts.
  102. - ensure_sentence_starting_uppercase (bool, default=True): Ensures that every sentence detected by the algorithm starts with an uppercase letter.
  103. - ensure_sentence_ends_with_period (bool, default=True): Ensures that every sentence that doesn't end with punctuation such as "?", "!" ends with a period
  104. - spinner (bool, default=True): Show spinner animation with current state.
  105. - level (int, default=logging.WARNING): Logging level.
  106. - enable_realtime_transcription (bool, default=False): Enables or disables real-time transcription of audio. When set to True, the audio will be transcribed continuously as it is being recorded.
  107. - realtime_model_type (str, default="tiny"): Specifies the machine learning model to be used for real-time transcription. Valid options include 'tiny', 'tiny.en', 'base', 'base.en', 'small', 'small.en', 'medium', 'medium.en', 'large-v1', 'large-v2'.
  108. - realtime_processing_pause (float, default=0.1): Specifies the time interval in seconds after a chunk of audio gets transcribed. Lower values will result in more "real-time" (frequent) transcription updates but may increase computational load.
  109. - on_realtime_transcription_update = A callback function that is triggered whenever there's an update in the real-time transcription. The function is called with the newly transcribed text as its argument.
  110. - on_realtime_transcription_stabilized = A callback function that is triggered when the transcribed text stabilizes in quality. The stabilized text is generally more accurate but may arrive with a slight delay compared to the regular real-time updates.
  111. - silero_sensitivity (float, default=SILERO_SENSITIVITY): Sensitivity for the Silero Voice Activity Detection model ranging from 0 (least sensitive) to 1 (most sensitive). Default is 0.5.
  112. - silero_use_onnx (bool, default=False): Enables usage of the pre-trained model from Silero in the ONNX (Open Neural Network Exchange) format instead of the PyTorch format. This is recommended for faster performance.
  113. - webrtc_sensitivity (int, default=WEBRTC_SENSITIVITY): Sensitivity for the WebRTC Voice Activity Detection engine ranging from 0 (least aggressive / most sensitive) to 3 (most aggressive, least sensitive). Default is 3.
  114. - post_speech_silence_duration (float, default=0.2): Duration in seconds of silence that must follow speech before the recording is considered to be completed. This ensures that any brief pauses during speech don't prematurely end the recording.
  115. - min_gap_between_recordings (float, default=1.0): Specifies the minimum time interval in seconds that should exist between the end of one recording session and the beginning of another to prevent rapid consecutive recordings.
  116. - min_length_of_recording (float, default=1.0): Specifies the minimum duration in seconds that a recording session should last to ensure meaningful audio capture, preventing excessively short or fragmented recordings.
  117. - pre_recording_buffer_duration (float, default=0.2): Duration in seconds for the audio buffer to maintain pre-roll audio (compensates speech activity detection latency)
  118. - on_vad_detect_start (callable, default=None): Callback function to be called when the system listens for voice activity.
  119. - on_vad_detect_stop (callable, default=None): Callback function to be called when the system stops listening for voice activity.
  120. - wake_words (str, default=""): Comma-separated string of wake words to initiate recording. Supported wake words include:
  121. 'alexa', 'americano', 'blueberry', 'bumblebee', 'computer', 'grapefruits', 'grasshopper', 'hey google', 'hey siri', 'jarvis', 'ok google', 'picovoice', 'porcupine', 'terminator'.
  122. - wake_words_sensitivity (float, default=0.5): Sensitivity for wake word detection, ranging from 0 (least sensitive) to 1 (most sensitive). Default is 0.5.
  123. - wake_word_activation_delay (float, default=0): Duration in seconds after the start of monitoring before the system switches to wake word activation if no voice is initially detected. If set to zero, the system uses wake word activation immediately.
  124. - wake_word_timeout (float, default=5): Duration in seconds after a wake word is recognized. If no subsequent voice activity is detected within this window, the system transitions back to an inactive state, awaiting the next wake word or voice activation.
  125. - on_wakeword_detected (callable, default=None): Callback function to be called when a wake word is detected.
  126. - on_wakeword_timeout (callable, default=None): Callback function to be called when the system goes back to an inactive state after when no speech was detected after wake word activation
  127. - on_wakeword_detection_start (callable, default=None): Callback function to be called when the system starts to listen for wake words
  128. - on_wakeword_detection_end (callable, default=None): Callback function to be called when the system stops to listen for wake words (e.g. because of timeout or wake word detected)
  129. Raises:
  130. Exception: Errors related to initializing transcription model, wake word detection, or audio recording.
  131. """
  132. self.language = language
  133. self.wake_words = wake_words
  134. self.wake_word_activation_delay = wake_word_activation_delay
  135. self.wake_word_timeout = wake_word_timeout
  136. self.ensure_sentence_starting_uppercase = ensure_sentence_starting_uppercase
  137. self.ensure_sentence_ends_with_period = ensure_sentence_ends_with_period
  138. self.min_gap_between_recordings = min_gap_between_recordings
  139. self.min_length_of_recording = min_length_of_recording
  140. self.pre_recording_buffer_duration = pre_recording_buffer_duration
  141. self.post_speech_silence_duration = post_speech_silence_duration
  142. self.on_recording_start = on_recording_start
  143. self.on_recording_stop = on_recording_stop
  144. self.on_wakeword_detected = on_wakeword_detected
  145. self.on_wakeword_timeout = on_wakeword_timeout
  146. self.on_vad_detect_start = on_vad_detect_start
  147. self.on_vad_detect_stop = on_vad_detect_stop
  148. self.on_wakeword_detection_start = on_wakeword_detection_start
  149. self.on_wakeword_detection_end = on_wakeword_detection_end
  150. self.on_transcription_start = on_transcription_start
  151. self.enable_realtime_transcription = enable_realtime_transcription
  152. self.realtime_model_type = realtime_model_type
  153. self.realtime_processing_pause = realtime_processing_pause
  154. self.on_realtime_transcription_update = on_realtime_transcription_update
  155. self.on_realtime_transcription_stabilized = on_realtime_transcription_stabilized
  156. self.allowed_latency_limit = ALLOWED_LATENCY_LIMIT
  157. self.level = level
  158. self.audio_queue = Queue()
  159. self.buffer_size = BUFFER_SIZE
  160. self.sample_rate = SAMPLE_RATE
  161. self.recording_start_time = 0
  162. self.recording_stop_time = 0
  163. self.wake_word_detect_time = 0
  164. self.silero_check_time = 0
  165. self.silero_working = False
  166. self.speech_end_silence_start = 0
  167. self.silero_sensitivity = silero_sensitivity
  168. self.listen_start = 0
  169. self.spinner = spinner
  170. self.halo = None
  171. self.state = "inactive"
  172. self.wakeword_detected = False
  173. self.text_storage = []
  174. self.realtime_stabilized_text = ""
  175. self.realtime_stabilized_safetext = ""
  176. self.is_webrtc_speech_active = False
  177. self.is_silero_speech_active = False
  178. self.recording_thread = None
  179. self.realtime_thread = None
  180. self.audio_interface = None
  181. self.audio = None
  182. self.stream = None
  183. self.start_recording_event = threading.Event()
  184. self.stop_recording_event = threading.Event()
  185. # Initialize the logging configuration with the specified level
  186. log_format = 'RealTimeSTT: %(name)s - %(levelname)s - %(message)s'
  187. # Create a logger
  188. logger = logging.getLogger()
  189. logger.setLevel(level) # Set the root logger's level
  190. # Create a file handler and set its level
  191. file_handler = logging.FileHandler('realtimesst.log')
  192. file_handler.setLevel(logging.DEBUG)
  193. file_handler.setFormatter(logging.Formatter(log_format))
  194. # Create a console handler and set its level
  195. console_handler = logging.StreamHandler()
  196. console_handler.setLevel(level)
  197. console_handler.setFormatter(logging.Formatter(log_format))
  198. # Add the handlers to the logger
  199. logger.addHandler(file_handler)
  200. logger.addHandler(console_handler)
  201. self.is_shut_down = False
  202. self.shutdown_event = Event()
  203. logging.info(f"Starting RealTimeSTT")
  204. # Start transcription process
  205. self.main_transcription_ready_event = Event()
  206. self.parent_transcription_pipe, child_transcription_pipe = Pipe()
  207. self.transcript_process = Process(target=AudioToTextRecorder._transcription_worker, args=(child_transcription_pipe, model, self.main_transcription_ready_event, self.shutdown_event))
  208. self.transcript_process.start()
  209. # Start audio data reading process
  210. self.reader_process = Process(target=AudioToTextRecorder._audio_data_worker, args=(self.audio_queue, self.sample_rate, self.buffer_size, self.shutdown_event))
  211. self.reader_process.start()
  212. # Initialize the realtime transcription model
  213. if self.enable_realtime_transcription:
  214. try:
  215. logging.info(f"Initializing faster_whisper realtime transcription model {self.realtime_model_type}")
  216. self.realtime_model_type = faster_whisper.WhisperModel(model_size_or_path=self.realtime_model_type, device='cuda' if torch.cuda.is_available() else 'cpu')
  217. except Exception as e:
  218. logging.exception(f"Error initializing faster_whisper realtime transcription model: {e}")
  219. raise
  220. logging.debug('Faster_whisper realtime speech to text transcription model initialized successfully')
  221. # Setup wake word detection
  222. if wake_words:
  223. self.wake_words_list = [word.strip() for word in wake_words.lower().split(',')]
  224. sensitivity_list = [float(wake_words_sensitivity) for _ in range(len(self.wake_words_list))]
  225. try:
  226. self.porcupine = pvporcupine.create(keywords=self.wake_words_list, sensitivities=sensitivity_list)
  227. self.buffer_size = self.porcupine.frame_length
  228. self.sample_rate = self.porcupine.sample_rate
  229. except Exception as e:
  230. logging.exception(f"Error initializing porcupine wake word detection engine: {e}")
  231. raise
  232. logging.debug('Porcupine wake word detection engine initialized successfully')
  233. # Setup voice activity detection model WebRTC
  234. try:
  235. logging.info(f"Initializing WebRTC voice with Sensitivity {webrtc_sensitivity}")
  236. self.webrtc_vad_model = webrtcvad.Vad()
  237. self.webrtc_vad_model.set_mode(webrtc_sensitivity)
  238. except Exception as e:
  239. logging.exception(f"Error initializing WebRTC voice activity detection engine: {e}")
  240. raise
  241. logging.debug('WebRTC VAD voice activity detection engine initialized successfully')
  242. # Setup voice activity detection model Silero VAD
  243. try:
  244. self.silero_vad_model, _ = torch.hub.load(
  245. repo_or_dir="snakers4/silero-vad",
  246. model="silero_vad",
  247. verbose=False,
  248. onnx=silero_use_onnx
  249. )
  250. except Exception as e:
  251. logging.exception(f"Error initializing Silero VAD voice activity detection engine: {e}")
  252. raise
  253. logging.debug('Silero VAD voice activity detection engine initialized successfully')
  254. self.audio_buffer = collections.deque(maxlen=int((self.sample_rate // self.buffer_size) * self.pre_recording_buffer_duration))
  255. self.frames = []
  256. # Recording control flags
  257. self.is_recording = False
  258. self.is_running = True
  259. self.start_recording_on_voice_activity = False
  260. self.stop_recording_on_voice_deactivity = False
  261. # Start the recording worker thread
  262. self.recording_thread = threading.Thread(target=self._recording_worker)
  263. self.recording_thread.daemon = True
  264. self.recording_thread.start()
  265. # Start the realtime transcription worker thread
  266. self.realtime_thread = threading.Thread(target=self._realtime_worker)
  267. self.realtime_thread.daemon = True
  268. self.realtime_thread.start()
  269. # Wait for transcription models to start
  270. logging.debug('Waiting for main transcription model to start')
  271. self.main_transcription_ready_event.wait()
  272. logging.debug('Main transcription model ready')
  273. logging.debug('RealtimeSTT initialization completed successfully')
  274. @staticmethod
  275. def _transcription_worker(conn, model_path, ready_event, shutdown_event):
  276. """
  277. Worker method that handles the continuous process of transcribing audio data.
  278. This method runs in a separate process and is responsible for:
  279. - Initializing the `faster_whisper` model used for transcription.
  280. - Receiving audio data sent through a pipe and using the model to transcribe it.
  281. - Sending transcription results back through the pipe.
  282. - Continuously checking for a shutdown event to gracefully terminate the transcription process.
  283. Args:
  284. conn (multiprocessing.Connection): The connection endpoint used for receiving audio data and sending transcription results.
  285. model_path (str): The path to the pre-trained faster_whisper model for transcription.
  286. ready_event (threading.Event): An event that is set when the transcription model is successfully initialized and ready.
  287. shutdown_event (threading.Event): An event that, when set, signals this worker method to terminate.
  288. Raises:
  289. Exception: If there is an error while initializing the transcription model.
  290. """
  291. logging.info(f"Initializing faster_whisper main transcription model {model_path}")
  292. try:
  293. model = faster_whisper.WhisperModel(
  294. model_size_or_path=model_path,
  295. device='cuda' if torch.cuda.is_available() else 'cpu'
  296. )
  297. except Exception as e:
  298. logging.exception(f"Error initializing main faster_whisper transcription model: {e}")
  299. raise
  300. ready_event.set()
  301. logging.debug('Faster_whisper main speech to text transcription model initialized successfully')
  302. while not shutdown_event.is_set():
  303. if conn.poll(0.5):
  304. audio, language = conn.recv()
  305. try:
  306. segments = model.transcribe(audio, language=language if language else None)[0]
  307. transcription = " ".join(seg.text for seg in segments).strip()
  308. conn.send(('success', transcription))
  309. except faster_whisper.WhisperError as e:
  310. logging.error(f"Whisper transcription error: {e}")
  311. conn.send(('error', str(e)))
  312. except Exception as e:
  313. logging.error(f"General transcription error: {e}")
  314. conn.send(('error', str(e)))
  315. else:
  316. # If there's no data, sleep for a short while to prevent busy waiting
  317. time.sleep(0.02)
  318. @staticmethod
  319. def _audio_data_worker(audio_queue, sample_rate, buffer_size, shutdown_event):
  320. """
  321. Worker method that handles the audio recording process.
  322. This method runs in a separate process and is responsible for:
  323. - Setting up the audio input stream for recording.
  324. - Continuously reading audio data from the input stream and placing it in a queue.
  325. - Handling errors during the recording process, including input overflow.
  326. - Gracefully terminating the recording process when a shutdown event is set.
  327. Args:
  328. audio_queue (queue.Queue): A queue where recorded audio data is placed.
  329. sample_rate (int): The sample rate of the audio input stream.
  330. buffer_size (int): The size of the buffer used in the audio input stream.
  331. shutdown_event (threading.Event): An event that, when set, signals this worker method to terminate.
  332. Raises:
  333. Exception: If there is an error while initializing the audio recording.
  334. """
  335. logging.info("Initializing audio recording (creating pyAudio input stream)")
  336. try:
  337. audio_interface = pyaudio.PyAudio()
  338. stream = audio_interface.open(rate=sample_rate, format=pyaudio.paInt16, channels=1, input=True, frames_per_buffer=buffer_size)
  339. except Exception as e:
  340. logging.exception(f"Error initializing pyaudio audio recording: {e}")
  341. raise
  342. logging.debug('Audio recording (pyAudio input stream) initialized successfully')
  343. try:
  344. while not shutdown_event.is_set():
  345. try:
  346. data = stream.read(buffer_size)
  347. except OSError as e:
  348. if e.errno == pyaudio.paInputOverflowed:
  349. logging.warning("Input overflowed. Frame dropped.")
  350. else:
  351. logging.error(f"Error during recording: {e}")
  352. tb_str = traceback.format_exc()
  353. print (f"Traceback: {tb_str}")
  354. print (f"Error: {e}")
  355. continue
  356. except Exception as e:
  357. logging.error(f"Error during recording: {e}")
  358. tb_str = traceback.format_exc()
  359. print (f"Traceback: {tb_str}")
  360. print (f"Error: {e}")
  361. continue
  362. audio_queue.put(data)
  363. finally:
  364. stream.stop_stream()
  365. stream.close()
  366. audio_interface.terminate()
  367. def wait_audio(self):
  368. """
  369. Waits for the start and completion of the audio recording process.
  370. This method is responsible for:
  371. - Waiting for voice activity to begin recording if not yet started.
  372. - Waiting for voice inactivity to complete the recording.
  373. - Setting the audio buffer from the recorded frames.
  374. - Resetting recording-related attributes.
  375. Side effects:
  376. - Updates the state of the instance.
  377. - Modifies the audio attribute to contain the processed audio data.
  378. """
  379. self.listen_start = time.time()
  380. # If not yet started recording, wait for voice activity to initiate.
  381. if not self.is_recording and not self.frames:
  382. self._set_state("listening")
  383. self.start_recording_on_voice_activity = True
  384. # Wait until recording starts
  385. self.start_recording_event.wait()
  386. # If recording is ongoing, wait for voice inactivity to finish recording.
  387. if self.is_recording:
  388. self.stop_recording_on_voice_deactivity = True
  389. # Wait until recording stops
  390. self.stop_recording_event.wait()
  391. # Convert recorded frames to the appropriate audio format.
  392. audio_array = np.frombuffer(b''.join(self.frames), dtype=np.int16)
  393. self.audio = audio_array.astype(np.float32) / INT16_MAX_ABS_VALUE
  394. self.frames.clear()
  395. # Reset recording-related timestamps
  396. self.recording_stop_time = 0
  397. self.listen_start = 0
  398. self._set_state("inactive")
  399. def transcribe(self):
  400. """
  401. Transcribes audio captured by this class instance using the `faster_whisper` model.
  402. Automatically starts recording upon voice activity if not manually started using `recorder.start()`.
  403. Automatically stops recording upon voice deactivity if not manually stopped with `recorder.stop()`.
  404. Processes the recorded audio to generate transcription.
  405. Args:
  406. on_transcription_finished (callable, optional): Callback function to be executed when transcription is ready.
  407. If provided, transcription will be performed asynchronously, and the callback will receive the transcription
  408. as its argument. If omitted, the transcription will be performed synchronously, and the result will be returned.
  409. Returns (if no callback is set):
  410. str: The transcription of the recorded audio.
  411. Raises:
  412. Exception: If there is an error during the transcription process.
  413. """
  414. self._set_state("transcribing")
  415. self.parent_transcription_pipe.send((self.audio, self.language))
  416. status, result = self.parent_transcription_pipe.recv()
  417. self._set_state("inactive")
  418. if status == 'success':
  419. return self._preprocess_output(result)
  420. else:
  421. logging.error(result)
  422. raise Exception(result)
  423. def text(self,
  424. on_transcription_finished = None,
  425. ):
  426. """
  427. Transcribes audio captured by this class instance using the `faster_whisper` model.
  428. - Automatically starts recording upon voice activity if not manually started using `recorder.start()`.
  429. - Automatically stops recording upon voice deactivity if not manually stopped with `recorder.stop()`.
  430. - Processes the recorded audio to generate transcription.
  431. Args:
  432. on_transcription_finished (callable, optional): Callback function to be executed when transcription is ready.
  433. If provided, transcription will be performed asynchronously, and the callback will receive the transcription
  434. as its argument. If omitted, the transcription will be performed synchronously, and the result will be returned.
  435. Returns (if not callback is set):
  436. str: The transcription of the recorded audio
  437. """
  438. self.wait_audio()
  439. if self.is_shut_down:
  440. return ""
  441. if on_transcription_finished:
  442. threading.Thread(target=on_transcription_finished, args=(self.transcribe(),)).start()
  443. else:
  444. return self.transcribe()
  445. def start(self):
  446. """
  447. Starts recording audio directly without waiting for voice activity.
  448. """
  449. # Ensure there's a minimum interval between stopping and starting recording
  450. if time.time() - self.recording_stop_time < self.min_gap_between_recordings:
  451. logging.info("Attempted to start recording too soon after stopping.")
  452. return self
  453. logging.info("recording started")
  454. self._set_state("recording")
  455. self.text_storage = []
  456. self.realtime_stabilized_text = ""
  457. self.realtime_stabilized_safetext = ""
  458. self.wakeword_detected = False
  459. self.wake_word_detect_time = 0
  460. self.frames = []
  461. self.is_recording = True
  462. self.recording_start_time = time.time()
  463. self.is_silero_speech_active = False
  464. self.is_webrtc_speech_active = False
  465. self.stop_recording_event.clear()
  466. self.start_recording_event.set()
  467. if self.on_recording_start:
  468. self.on_recording_start()
  469. return self
  470. def stop(self):
  471. """
  472. Stops recording audio.
  473. """
  474. # Ensure there's a minimum interval between starting and stopping recording
  475. if time.time() - self.recording_start_time < self.min_length_of_recording:
  476. logging.info("Attempted to stop recording too soon after starting.")
  477. return self
  478. logging.info("recording stopped")
  479. self.is_recording = False
  480. self.recording_stop_time = time.time()
  481. self.is_silero_speech_active = False
  482. self.is_webrtc_speech_active = False
  483. self.silero_check_time = 0
  484. self.start_recording_event.clear()
  485. self.stop_recording_event.set()
  486. if self.on_recording_stop:
  487. self.on_recording_stop()
  488. return self
  489. def shutdown(self):
  490. """
  491. Safely shuts down the audio recording by stopping the recording worker and closing the audio stream.
  492. """
  493. # Force wait_audio() and text() to exit
  494. self.is_shut_down = True
  495. self.start_recording_event.set()
  496. self.stop_recording_event.set()
  497. self.shutdown_event.set()
  498. self.is_recording = False
  499. self.is_running = False
  500. logging.debug('Finishing recording thread')
  501. if self.recording_thread:
  502. self.recording_thread.join()
  503. logging.debug('Terminating reader process')
  504. # Give it some time to finish the loop and cleanup.
  505. self.reader_process.join(timeout=10)
  506. if self.reader_process.is_alive():
  507. logging.warning("Reader process did not terminate in time. Terminating forcefully.")
  508. self.reader_process.terminate()
  509. logging.debug('Terminating transcription process')
  510. self.transcript_process.join(timeout=10)
  511. if self.transcript_process.is_alive():
  512. logging.warning("Transcript process did not terminate in time. Terminating forcefully.")
  513. self.transcript_process.terminate()
  514. self.parent_transcription_pipe.close()
  515. logging.debug('Finishing realtime thread')
  516. if self.realtime_thread:
  517. self.realtime_thread.join()
  518. def _recording_worker(self):
  519. """
  520. The main worker method which constantly monitors the audio input for voice activity and accordingly starts/stops the recording.
  521. """
  522. logging.debug('Starting recording worker')
  523. try:
  524. was_recording = False
  525. delay_was_passed = False
  526. # Continuously monitor audio for voice activity
  527. while self.is_running:
  528. data = self.audio_queue.get()
  529. # Handle queue overflow
  530. queue_overflow_logged = False
  531. while self.audio_queue.qsize() > self.allowed_latency_limit:
  532. if not queue_overflow_logged:
  533. logging.warning(f"Audio queue size exceeds latency limit. Current size: {self.audio_queue.qsize()}. Discarding old audio chunks.")
  534. queue_overflow_logged = True
  535. data = self.audio_queue.get()
  536. if not self.is_recording:
  537. # Handle not recording state
  538. time_since_listen_start = time.time() - self.listen_start if self.listen_start else 0
  539. wake_word_activation_delay_passed = (time_since_listen_start > self.wake_word_activation_delay)
  540. # Handle wake-word timeout callback
  541. if wake_word_activation_delay_passed and not delay_was_passed:
  542. if self.wake_words and self.wake_word_activation_delay:
  543. if self.on_wakeword_timeout:
  544. self.on_wakeword_timeout()
  545. delay_was_passed = wake_word_activation_delay_passed
  546. # Set state and spinner text
  547. if not self.recording_stop_time:
  548. if self.wake_words and wake_word_activation_delay_passed and not self.wakeword_detected:
  549. self._set_state("wakeword")
  550. else:
  551. if self.listen_start:
  552. self._set_state("listening")
  553. else:
  554. self._set_state("inactive")
  555. # Detect wake words if applicable
  556. if self.wake_words and wake_word_activation_delay_passed:
  557. try:
  558. pcm = struct.unpack_from("h" * self.buffer_size, data)
  559. wakeword_index = self.porcupine.process(pcm)
  560. except struct.error:
  561. logging.error("Error unpacking audio data for wake word processing.")
  562. continue
  563. except Exception as e:
  564. logging.error(f"Wake word processing error: {e}")
  565. continue
  566. # If a wake word is detected
  567. if wakeword_index >= 0:
  568. # Removing the wake word from the recording
  569. samples_for_0_1_sec = int(self.sample_rate * 0.1)
  570. start_index = max(0, len(self.audio_buffer) - samples_for_0_1_sec)
  571. temp_samples = collections.deque(itertools.islice(self.audio_buffer, start_index, None))
  572. self.audio_buffer.clear()
  573. self.audio_buffer.extend(temp_samples)
  574. self.wake_word_detect_time = time.time()
  575. self.wakeword_detected = True
  576. if self.on_wakeword_detected:
  577. self.on_wakeword_detected()
  578. # Check for voice activity to trigger the start of recording
  579. if ((not self.wake_words or not wake_word_activation_delay_passed) and self.start_recording_on_voice_activity) or self.wakeword_detected:
  580. if self._is_voice_active():
  581. logging.info("voice activity detected")
  582. self.start()
  583. if self.is_recording:
  584. self.start_recording_on_voice_activity = False
  585. # Add the buffered audio to the recording frames
  586. self.frames.extend(list(self.audio_buffer))
  587. self.audio_buffer.clear()
  588. self.silero_vad_model.reset_states()
  589. else:
  590. data_copy = data[:]
  591. self._check_voice_activity(data_copy)
  592. self.speech_end_silence_start = 0
  593. else:
  594. # If we are currently recording
  595. # Stop the recording if silence is detected after speech
  596. if self.stop_recording_on_voice_deactivity:
  597. if not self._is_webrtc_speech(data, True):
  598. # Voice deactivity was detected, so we start measuring silence time before stopping recording
  599. if self.speech_end_silence_start == 0:
  600. self.speech_end_silence_start = time.time()
  601. else:
  602. self.speech_end_silence_start = 0
  603. # Wait for silence to stop recording after speech
  604. if self.speech_end_silence_start and time.time() - self.speech_end_silence_start > self.post_speech_silence_duration:
  605. logging.info("voice deactivity detected")
  606. self.stop()
  607. if not self.is_recording and was_recording:
  608. # Reset after stopping recording to ensure clean state
  609. self.stop_recording_on_voice_deactivity = False
  610. if time.time() - self.silero_check_time > 0.1:
  611. self.silero_check_time = 0
  612. # Handle wake word timeout (waited to long initiating speech after wake word detection)
  613. if self.wake_word_detect_time and time.time() - self.wake_word_detect_time > self.wake_word_timeout:
  614. self.wake_word_detect_time = 0
  615. if self.wakeword_detected and self.on_wakeword_timeout:
  616. self.on_wakeword_timeout()
  617. self.wakeword_detected = False
  618. was_recording = self.is_recording
  619. if self.is_recording:
  620. self.frames.append(data)
  621. if not self.is_recording or self.speech_end_silence_start:
  622. self.audio_buffer.append(data)
  623. except Exception as e:
  624. logging.error(f"Unhandled exeption in _recording_worker: {e}")
  625. raise
  626. def _realtime_worker(self):
  627. """
  628. Performs real-time transcription if the feature is enabled.
  629. The method is responsible transcribing recorded audio frames in real-time
  630. based on the specified resolution interval.
  631. The transcribed text is stored in `self.realtime_transcription_text` and a callback
  632. function is invoked with this text if specified.
  633. """
  634. try:
  635. logging.debug('Starting realtime worker')
  636. # Return immediately if real-time transcription is not enabled
  637. if not self.enable_realtime_transcription:
  638. return
  639. # Continue running as long as the main process is active
  640. while self.is_running:
  641. # Check if the recording is active
  642. if self.is_recording:
  643. # Sleep for the duration of the transcription resolution
  644. time.sleep(self.realtime_processing_pause)
  645. # Convert the buffer frames to a NumPy array
  646. audio_array = np.frombuffer(b''.join(self.frames), dtype=np.int16)
  647. # Normalize the array to a [-1, 1] range
  648. audio_array = audio_array.astype(np.float32) / INT16_MAX_ABS_VALUE
  649. # Perform transcription and assemble the text
  650. segments = self.realtime_model_type.transcribe(
  651. audio_array,
  652. language=self.language if self.language else None
  653. )
  654. # double check recording state because it could have changed mid-transcription
  655. if self.is_recording and time.time() - self.recording_start_time > 0.5:
  656. logging.debug('Starting realtime transcription')
  657. self.realtime_transcription_text = " ".join(seg.text for seg in segments[0]).strip()
  658. self.text_storage.append(self.realtime_transcription_text)
  659. # Take the last two texts in storage, if they exist
  660. if len(self.text_storage) >= 2:
  661. last_two_texts = self.text_storage[-2:]
  662. # Find the longest common prefix between the two texts
  663. prefix = os.path.commonprefix([last_two_texts[0], last_two_texts[1]])
  664. # This prefix is the text that was transcripted two times in the same way
  665. # Store as "safely detected text"
  666. if len(prefix) >= len(self.realtime_stabilized_safetext):
  667. # Only store when longer than the previous as additional security
  668. self.realtime_stabilized_safetext = prefix
  669. # Find parts of the stabilized text in the freshly transscripted text
  670. matching_position = self._find_tail_match_in_text(self.realtime_stabilized_safetext, self.realtime_transcription_text)
  671. if matching_position < 0:
  672. if self.realtime_stabilized_safetext:
  673. self._on_realtime_transcription_stabilized(self._preprocess_output(self.realtime_stabilized_safetext, True))
  674. else:
  675. self._on_realtime_transcription_stabilized(self._preprocess_output(self.realtime_transcription_text, True))
  676. else:
  677. # We found parts of the stabilized text in the transcripted text
  678. # We now take the stabilized text and add only the freshly transcripted part to it
  679. output_text = self.realtime_stabilized_safetext + self.realtime_transcription_text[matching_position:]
  680. # This yields us the "left" text part as stabilized AND at the same time delivers fresh detected parts
  681. # on the first run without the need for two transcriptions
  682. self._on_realtime_transcription_stabilized(self._preprocess_output(output_text, True))
  683. # Invoke the callback with the transcribed text
  684. self._on_realtime_transcription_update(self._preprocess_output(self.realtime_transcription_text, True))
  685. # If not recording, sleep briefly before checking again
  686. else:
  687. time.sleep(TIME_SLEEP)
  688. except Exception as e:
  689. logging.error(f"Unhandled exeption in _realtime_worker: {e}")
  690. raise
  691. def _is_silero_speech(self, data):
  692. """
  693. Returns true if speech is detected in the provided audio data
  694. Args:
  695. data (bytes): raw bytes of audio data (1024 raw bytes with 16000 sample rate and 16 bits per sample)
  696. """
  697. self.silero_working = True
  698. audio_chunk = np.frombuffer(data, dtype=np.int16)
  699. audio_chunk = audio_chunk.astype(np.float32) / INT16_MAX_ABS_VALUE # Convert to float and normalize
  700. vad_prob = self.silero_vad_model(torch.from_numpy(audio_chunk), SAMPLE_RATE).item()
  701. is_silero_speech_active = vad_prob > (1 - self.silero_sensitivity)
  702. if is_silero_speech_active:
  703. self.is_silero_speech_active = True
  704. self.silero_working = False
  705. return is_silero_speech_active
  706. def _is_webrtc_speech(self, data, all_frames_must_be_true=False):
  707. """
  708. Returns true if speech is detected in the provided audio data
  709. Args:
  710. data (bytes): raw bytes of audio data (1024 raw bytes with 16000 sample rate and 16 bits per sample)
  711. """
  712. # Number of audio frames per millisecond
  713. frame_length = int(self.sample_rate * 0.01) # for 10ms frame
  714. num_frames = int(len(data) / (2 * frame_length))
  715. speech_frames = 0
  716. for i in range(num_frames):
  717. start_byte = i * frame_length * 2
  718. end_byte = start_byte + frame_length * 2
  719. frame = data[start_byte:end_byte]
  720. if self.webrtc_vad_model.is_speech(frame, self.sample_rate):
  721. speech_frames += 1
  722. if not all_frames_must_be_true:
  723. return True
  724. if all_frames_must_be_true:
  725. return speech_frames == num_frames
  726. else:
  727. return False
  728. def _check_voice_activity(self, data):
  729. """
  730. Initiate check if voice is active based on the provided data.
  731. Args:
  732. data: The audio data to be checked for voice activity.
  733. """
  734. self.is_webrtc_speech_active = self._is_webrtc_speech(data)
  735. # First quick performing check for voice activity using WebRTC
  736. if self.is_webrtc_speech_active:
  737. if not self.silero_working:
  738. self.silero_working = True
  739. # Run the intensive check in a separate thread
  740. threading.Thread(target=self._is_silero_speech, args=(data,)).start()
  741. def _is_voice_active(self):
  742. """
  743. Determine if voice is active.
  744. Returns:
  745. bool: True if voice is active, False otherwise.
  746. """
  747. return self.is_webrtc_speech_active and self.is_silero_speech_active
  748. def _set_state(self, new_state):
  749. """
  750. Update the current state of the recorder and execute corresponding state-change callbacks.
  751. Args:
  752. new_state (str): The new state to set.
  753. """
  754. # Check if the state has actually changed
  755. if new_state == self.state:
  756. return
  757. # Store the current state for later comparison
  758. old_state = self.state
  759. # Update to the new state
  760. self.state = new_state
  761. # Execute callbacks based on transitioning FROM a particular state
  762. if old_state == "listening":
  763. if self.on_vad_detect_stop:
  764. self.on_vad_detect_stop()
  765. elif old_state == "wakeword":
  766. if self.on_wakeword_detection_end:
  767. self.on_wakeword_detection_end()
  768. # Execute callbacks based on transitioning TO a particular state
  769. if new_state == "listening":
  770. if self.on_vad_detect_start:
  771. self.on_vad_detect_start()
  772. self._set_spinner("speak now")
  773. if self.spinner:
  774. self.halo._interval = 250
  775. elif new_state == "wakeword":
  776. if self.on_wakeword_detection_start:
  777. self.on_wakeword_detection_start()
  778. self._set_spinner(f"say {self.wake_words}")
  779. if self.spinner:
  780. self.halo._interval = 500
  781. elif new_state == "transcribing":
  782. if self.on_transcription_start:
  783. self.on_transcription_start()
  784. self._set_spinner("transcribing")
  785. if self.spinner:
  786. self.halo._interval = 50
  787. elif new_state == "recording":
  788. self._set_spinner("recording")
  789. if self.spinner:
  790. self.halo._interval = 100
  791. elif new_state == "inactive":
  792. if self.spinner and self.halo:
  793. self.halo.stop()
  794. self.halo = None
  795. def _set_spinner(self, text):
  796. """
  797. Update the spinner's text or create a new spinner with the provided text.
  798. Args:
  799. text (str): The text to be displayed alongside the spinner.
  800. """
  801. if self.spinner:
  802. # If the Halo spinner doesn't exist, create and start it
  803. if self.halo is None:
  804. self.halo = halo.Halo(text=text)
  805. self.halo.start()
  806. # If the Halo spinner already exists, just update the text
  807. else:
  808. self.halo.text = text
  809. def _preprocess_output(self, text, preview=False):
  810. """
  811. Preprocesses the output text by removing any leading or trailing whitespace,
  812. converting all whitespace sequences to a single space character, and capitalizing
  813. the first character of the text.
  814. Args:
  815. text (str): The text to be preprocessed.
  816. Returns:
  817. str: The preprocessed text.
  818. """
  819. text = re.sub(r'\s+', ' ', text.strip())
  820. if self.ensure_sentence_starting_uppercase:
  821. if text:
  822. text = text[0].upper() + text[1:]
  823. # Ensure the text ends with a proper punctuation if it ends with an alphanumeric character
  824. if not preview:
  825. if self.ensure_sentence_ends_with_period:
  826. if text and text[-1].isalnum():
  827. text += '.'
  828. return text
  829. def _find_tail_match_in_text(self, text1, text2, length_of_match=10):
  830. """
  831. Find the position where the last 'n' characters of text1 match with a substring in text2.
  832. This method takes two texts, extracts the last 'n' characters from text1 (where 'n' is determined
  833. by the variable 'length_of_match'), and searches for an occurrence of this substring in text2,
  834. starting from the end of text2 and moving towards the beginning.
  835. Parameters:
  836. - text1 (str): The text containing the substring that we want to find in text2.
  837. - text2 (str): The text in which we want to find the matching substring.
  838. - length_of_match(int): The length of the matching string that we are looking for
  839. Returns:
  840. int: The position (0-based index) in text2 where the matching substring starts.
  841. If no match is found or either of the texts is too short, returns -1.
  842. """
  843. # Check if either of the texts is too short
  844. if len(text1) < length_of_match or len(text2) < length_of_match:
  845. return -1
  846. # The end portion of the first text that we want to compare
  847. target_substring = text1[-length_of_match:]
  848. # Loop through text2 from right to left
  849. for i in range(len(text2) - length_of_match + 1):
  850. # Extract the substring from text2 to compare with the target_substring
  851. current_substring = text2[len(text2) - i - length_of_match:len(text2) - i]
  852. # Compare the current_substring with the target_substring
  853. if current_substring == target_substring:
  854. return len(text2) - i # Position in text2 where the match starts
  855. return -1
  856. def _on_realtime_transcription_stabilized(self, text):
  857. """
  858. Callback method invoked when the real-time transcription stabilizes.
  859. This method is called internally when the transcription text is considered "stable"
  860. meaning it's less likely to change significantly with additional audio input. It
  861. notifies any registered external listener about the stabilized text if recording is
  862. still ongoing. This is particularly useful for applications that need to display
  863. live transcription results to users and want to highlight parts of the transcription
  864. that are less likely to change.
  865. Args:
  866. text (str): The stabilized transcription text.
  867. """
  868. if self.on_realtime_transcription_stabilized:
  869. if self.is_recording:
  870. self.on_realtime_transcription_stabilized(text)
  871. def _on_realtime_transcription_update(self, text):
  872. """
  873. Callback method invoked when there's an update in the real-time transcription.
  874. This method is called internally whenever there's a change in the transcription text,
  875. notifying any registered external listener about the update if recording is still
  876. ongoing. This provides a mechanism for applications to receive and possibly display
  877. live transcription updates, which could be partial and still subject to change.
  878. Args:
  879. text (str): The updated transcription text.
  880. """
  881. if self.on_realtime_transcription_update:
  882. if self.is_recording:
  883. self.on_realtime_transcription_update(text)
  884. def __enter__(self):
  885. """
  886. Method to setup the context manager protocol.
  887. This enables the instance to be used in a `with` statement, ensuring proper
  888. resource management. When the `with` block is entered, this method is
  889. automatically called.
  890. Returns:
  891. self: The current instance of the class.
  892. """
  893. return self
  894. def __exit__(self, exc_type, exc_value, traceback):
  895. """
  896. Method to define behavior when the context manager protocol exits.
  897. This is called when exiting the `with` block and ensures that any necessary
  898. cleanup or resource release processes are executed, such as shutting down
  899. the system properly.
  900. Args:
  901. exc_type (Exception or None): The type of the exception that caused the context to be exited, if any.
  902. exc_value (Exception or None): The exception instance that caused the context to be exited, if any.
  903. traceback (Traceback or None): The traceback corresponding to the exception, if any.
  904. """
  905. self.shutdown()