audio_recorder.py 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726
  1. """
  2. The AudioToTextRecorder class in the provided code facilitates fast speech-to-text transcription.
  3. The class employs the faster_whisper library to transcribe the recorded audio
  4. into text using machine learning models, which can be run either on a GPU or CPU.
  5. Voice activity detection (VAD) is built in, meaning the software can automatically
  6. start or stop recording based on the presence or absence of speech.
  7. It integrates wake word detection through the pvporcupine library, allowing the
  8. software to initiate recording when a specific word or phrase is spoken.
  9. The system provides real-time feedback and can be further customized.
  10. Features:
  11. - Voice Activity Detection: Automatically starts/stops recording when speech is detected or when speech ends.
  12. - Wake Word Detection: Starts recording when a specified wake word (or words) is detected.
  13. - Event Callbacks: Customizable callbacks for when recording starts or finishes.
  14. - Fast Transcription: Returns the transcribed text from the audio as fast as possible.
  15. Author: Kolja Beigel
  16. """
  17. import pyaudio
  18. import collections
  19. import faster_whisper
  20. import torch
  21. import numpy as np
  22. import struct
  23. import pvporcupine
  24. import threading
  25. import time
  26. import logging
  27. import webrtcvad
  28. import itertools
  29. from collections import deque
  30. from halo import Halo
  31. SAMPLE_RATE = 16000
  32. BUFFER_SIZE = 512
  33. SILERO_SENSITIVITY = 0.4
  34. WEBRTC_SENSITIVITY = 3
  35. WAKE_WORDS_SENSITIVITY = 0.6
  36. TIME_SLEEP = 0.02
  37. class AudioToTextRecorder:
  38. """
  39. A class responsible for capturing audio from the microphone, detecting voice activity, and then transcribing the captured audio using the `faster_whisper` model.
  40. """
  41. def __init__(self,
  42. model: str = "tiny",
  43. language: str = "",
  44. on_recording_start = None,
  45. on_recording_stop = None,
  46. on_transcription_start = None,
  47. spinner = True,
  48. level=logging.WARNING,
  49. # Realtime transcription parameters
  50. realtime_preview = False,
  51. realtime_preview_model = "tiny",
  52. realtime_preview_resolution = 0.1,
  53. on_realtime_preview = None,
  54. # Voice activation parameters
  55. silero_sensitivity: float = SILERO_SENSITIVITY,
  56. webrtc_sensitivity: int = WEBRTC_SENSITIVITY,
  57. post_speech_silence_duration: float = 0.2,
  58. min_length_of_recording: float = 1.0,
  59. min_gap_between_recordings: float = 1.0,
  60. pre_recording_buffer_duration: float = 1,
  61. on_vad_detect_start = None,
  62. on_vad_detect_stop = None,
  63. # Wake word parameters
  64. wake_words: str = "",
  65. wake_words_sensitivity: float = WAKE_WORDS_SENSITIVITY,
  66. wake_word_activation_delay: float = 0,
  67. wake_word_timeout: float = 5.0,
  68. on_wakeword_detected = None,
  69. on_wakeword_timeout = None,
  70. on_wakeword_detection_start = None,
  71. on_wakeword_detection_end = None,
  72. ):
  73. """
  74. Initializes an audio recorder and transcription and wake word detection.
  75. Args:
  76. - model (str, default="tiny"): Specifies the size of the transcription model to use or the path to a converted model directory.
  77. Valid options are 'tiny', 'tiny.en', 'base', 'base.en', 'small', 'small.en', 'medium', 'medium.en', 'large-v1', 'large-v2'.
  78. If a specific size is provided, the model is downloaded from the Hugging Face Hub.
  79. - language (str, default=""): Language code for speech-to-text engine. If not specified, the model will attempt to detect the language automatically.
  80. - on_recording_start (callable, default=None): Callback function to be called when recording of audio to be transcripted starts.
  81. - on_recording_stop (callable, default=None): Callback function to be called when recording of audio to be transcripted stops.
  82. - on_transcription_start (callable, default=None): Callback function to be called when transcription of audio to text starts.
  83. - spinner (bool, default=True): Show spinner animation with current state.
  84. - level (int, default=logging.WARNING): Logging level.
  85. - realtime_preview (bool, default=False): Specifies whether a preview of the transcription should occur in real-time. If set to True, the audio will be transcribed as it is recorded.
  86. - realtime_preview_model (str, default="tiny"): Specifies the size or path of the machine learning model to be used for real-time transcription. Valid options include 'tiny', 'tiny.en', 'base', 'base.en', 'small', 'small.en', 'medium', 'medium.en', 'large-v1', 'large-v2'.
  87. - realtime_preview_resolution (float, default=0.1): Specifies the time interval in seconds after a chunk of audio gets transcribed. Lower values will result in more "real-time" (frequent) transcription updates but may increase computational load.
  88. - on_realtime_preview = A callable function triggered during real-time transcription. The function is invoked with the transcribed text as its argument.
  89. - silero_sensitivity (float, default=SILERO_SENSITIVITY): Sensitivity for the Silero Voice Activity Detection model ranging from 0 (least sensitive) to 1 (most sensitive). Default is 0.5.
  90. - webrtc_sensitivity (int, default=WEBRTC_SENSITIVITY): Sensitivity for the WebRTC Voice Activity Detection engine ranging from 1 (least sensitive) to 3 (most sensitive). Default is 3.
  91. - post_speech_silence_duration (float, default=0.2): Duration in seconds of silence that must follow speech before the recording is considered to be completed. This ensures that any brief pauses during speech don't prematurely end the recording.
  92. - min_gap_between_recordings (float, default=1.0): Specifies the minimum time interval in seconds that should exist between the end of one recording session and the beginning of another to prevent rapid consecutive recordings.
  93. - min_length_of_recording (float, default=1.0): Specifies the minimum duration in seconds that a recording session should last to ensure meaningful audio capture, preventing excessively short or fragmented recordings.
  94. - pre_recording_buffer_duration (float, default=0.2): Duration in seconds for the audio buffer to maintain pre-roll audio (compensates speech activity detection latency)
  95. - on_vad_detect_start (callable, default=None): Callback function to be called when the system listens for voice activity.
  96. - on_vad_detect_stop (callable, default=None): Callback function to be called when the system stops listening for voice activity.
  97. - wake_words (str, default=""): Comma-separated string of wake words to initiate recording. Supported wake words include:
  98. 'alexa', 'americano', 'blueberry', 'bumblebee', 'computer', 'grapefruits', 'grasshopper', 'hey google', 'hey siri', 'jarvis', 'ok google', 'picovoice', 'porcupine', 'terminator'.
  99. - wake_words_sensitivity (float, default=0.5): Sensitivity for wake word detection, ranging from 0 (least sensitive) to 1 (most sensitive). Default is 0.5.
  100. - wake_word_activation_delay (float, default=0): Duration in seconds after the start of monitoring before the system switches to wake word activation if no voice is initially detected. If set to zero, the system uses wake word activation immediately.
  101. - wake_word_timeout (float, default=5): Duration in seconds after a wake word is recognized. If no subsequent voice activity is detected within this window, the system transitions back to an inactive state, awaiting the next wake word or voice activation.
  102. - on_wakeword_detected (callable, default=None): Callback function to be called when a wake word is detected.
  103. - on_wakeword_timeout (callable, default=None): Callback function to be called when the system goes back to an inactive state after when no speech was detected after wake word activation
  104. - on_wakeword_detection_start (callable, default=None): Callback function to be called when the system starts to listen for wake words
  105. - on_wakeword_detection_end (callable, default=None): Callback function to be called when the system stops to listen for wake words (e.g. because of timeout or wake word detected)
  106. Raises:
  107. Exception: Errors related to initializing transcription model, wake word detection, or audio recording.
  108. """
  109. self.language = language
  110. self.wake_words = wake_words
  111. self.wake_word_activation_delay = wake_word_activation_delay
  112. self.wake_word_timeout = wake_word_timeout
  113. self.min_gap_between_recordings = min_gap_between_recordings
  114. self.min_length_of_recording = min_length_of_recording
  115. self.pre_recording_buffer_duration = pre_recording_buffer_duration
  116. self.post_speech_silence_duration = post_speech_silence_duration
  117. self.on_recording_start = on_recording_start
  118. self.on_recording_stop = on_recording_stop
  119. self.on_wakeword_detected = on_wakeword_detected
  120. self.on_wakeword_timeout = on_wakeword_timeout
  121. self.on_vad_detect_start = on_vad_detect_start
  122. self.on_vad_detect_stop = on_vad_detect_stop
  123. self.on_wakeword_detection_start = on_wakeword_detection_start
  124. self.on_wakeword_detection_end = on_wakeword_detection_end
  125. self.on_transcription_start = on_transcription_start
  126. self.realtime_preview = realtime_preview
  127. self.realtime_preview_model = realtime_preview_model
  128. self.realtime_preview_resolution = realtime_preview_resolution
  129. self.on_realtime_preview = on_realtime_preview
  130. self.level = level
  131. self.buffer_size = BUFFER_SIZE
  132. self.sample_rate = SAMPLE_RATE
  133. self.recording_start_time = 0
  134. self.recording_stop_time = 0
  135. self.wake_word_detect_time = 0
  136. self.silero_check_time = 0
  137. self.speech_end_silence_start = 0
  138. self.silero_sensitivity = silero_sensitivity
  139. self.listen_start = 0
  140. self.spinner = spinner
  141. self.halo = None
  142. self.state = "inactive"
  143. self.wakeword_detected = False
  144. # Initialize the logging configuration with the specified level
  145. logging.basicConfig(format='RealTimeSTT: %(message)s', level=level)
  146. # Initialize the transcription model
  147. try:
  148. self.model = faster_whisper.WhisperModel(model_size_or_path=model, device='cuda' if torch.cuda.is_available() else 'cpu')
  149. if self.realtime_preview:
  150. self.realtime_preview_model = faster_whisper.WhisperModel(model_size_or_path=self.realtime_preview_model, device='cuda' if torch.cuda.is_available() else 'cpu')
  151. except Exception as e:
  152. logging.exception(f"Error initializing faster_whisper transcription model: {e}")
  153. raise
  154. # Setup wake word detection
  155. if wake_words:
  156. self.wake_words_list = [word.strip() for word in wake_words.lower().split(',')]
  157. sensitivity_list = [float(wake_words_sensitivity) for _ in range(len(self.wake_words_list))]
  158. try:
  159. self.porcupine = pvporcupine.create(keywords=self.wake_words_list, sensitivities=sensitivity_list)
  160. self.buffer_size = self.porcupine.frame_length
  161. self.sample_rate = self.porcupine.sample_rate
  162. except Exception as e:
  163. logging.exception(f"Error initializing porcupine wake word detection engine: {e}")
  164. raise
  165. # Setup audio recording infrastructure
  166. try:
  167. self.audio = pyaudio.PyAudio()
  168. self.stream = self.audio.open(rate=self.sample_rate, format=pyaudio.paInt16, channels=1, input=True, frames_per_buffer=self.buffer_size)
  169. except Exception as e:
  170. logging.exception(f"Error initializing pyaudio audio recording: {e}")
  171. raise
  172. # Setup voice activity detection model WebRTC
  173. try:
  174. self.webrtc_vad_model = webrtcvad.Vad()
  175. self.webrtc_vad_model.set_mode(webrtc_sensitivity)
  176. except Exception as e:
  177. logging.exception(f"Error initializing WebRTC voice activity detection engine: {e}")
  178. raise
  179. # Setup voice activity detection model Silero VAD
  180. try:
  181. self.silero_vad_model, _ = torch.hub.load(
  182. repo_or_dir="snakers4/silero-vad",
  183. model="silero_vad",
  184. verbose=False
  185. )
  186. except Exception as e:
  187. logging.exception(f"Error initializing Silero VAD voice activity detection engine: {e}")
  188. raise
  189. self.audio_buffer = collections.deque(maxlen=int((self.sample_rate // self.buffer_size) * self.pre_recording_buffer_duration))
  190. self.frames = []
  191. # Recording control flags
  192. self.is_recording = False
  193. self.is_running = True
  194. self.start_recording_on_voice_activity = False
  195. self.stop_recording_on_voice_deactivity = False
  196. # Start the recording worker thread
  197. self.recording_thread = threading.Thread(target=self._recording_worker)
  198. self.recording_thread.daemon = True
  199. self.recording_thread.start()
  200. # Start the realtime transcription worker thread
  201. self.realtime_thread = threading.Thread(target=self._realtime_worker)
  202. self.realtime_thread.daemon = True
  203. self.realtime_thread.start()
  204. def text(self):
  205. """
  206. Transcribes audio captured by the class instance using the `faster_whisper` model.
  207. - Waits for voice activity if not yet started recording
  208. - Waits for voice deactivity if not yet stopped recording
  209. - Transcribes the recorded audio.
  210. Returns:
  211. str: The transcription of the recorded audio or an empty string in case of an error.
  212. """
  213. self.listen_start = time.time()
  214. # If not yet started to record, wait for voice activity to initiate recording.
  215. if not self.is_recording and len(self.frames) == 0:
  216. self._set_state("listening")
  217. self.start_recording_on_voice_activity = True
  218. while not self.is_recording:
  219. time.sleep(TIME_SLEEP)
  220. # If still recording, wait for voice deactivity to finish recording.
  221. if self.is_recording:
  222. self.stop_recording_on_voice_deactivity = True
  223. while self.is_recording:
  224. time.sleep(TIME_SLEEP)
  225. # Convert the concatenated frames into text
  226. try:
  227. audio_array = np.frombuffer(b''.join(self.frames), dtype=np.int16)
  228. audio_array = audio_array.astype(np.float32) / 32768.0
  229. self.frames = []
  230. # perform transcription
  231. transcription = " ".join(seg.text for seg in self.model.transcribe(audio_array, language=self.language if self.language else None)[0]).strip()
  232. self.recording_stop_time = 0
  233. self.listen_start = 0
  234. self._set_state("inactive")
  235. return transcription
  236. except ValueError:
  237. logging.error("Error converting audio buffer to numpy array.")
  238. raise
  239. except faster_whisper.WhisperError as e:
  240. logging.error(f"Whisper transcription error: {e}")
  241. raise
  242. except Exception as e:
  243. logging.error(f"General transcription error: {e}")
  244. raise
  245. def start(self):
  246. """
  247. Starts recording audio directly without waiting for voice activity.
  248. """
  249. # Ensure there's a minimum interval between stopping and starting recording
  250. if time.time() - self.recording_stop_time < self.min_gap_between_recordings:
  251. logging.info("Attempted to start recording too soon after stopping.")
  252. return self
  253. logging.info("recording started")
  254. self.wakeword_detected = False
  255. self.wake_word_detect_time = 0
  256. self.frames = []
  257. self.is_recording = True
  258. self.recording_start_time = time.time()
  259. self._set_state("recording")
  260. if self.on_recording_start:
  261. self.on_recording_start()
  262. return self
  263. def stop(self):
  264. """
  265. Stops recording audio.
  266. """
  267. # Ensure there's a minimum interval between starting and stopping recording
  268. if time.time() - self.recording_start_time < self.min_length_of_recording:
  269. logging.info("Attempted to stop recording too soon after starting.")
  270. return self
  271. logging.info("recording stopped")
  272. self.is_recording = False
  273. self.recording_stop_time = time.time()
  274. self._set_state("transcribing")
  275. if self.on_recording_stop:
  276. self.on_recording_stop()
  277. return self
  278. def shutdown(self):
  279. """
  280. Safely shuts down the audio recording by stopping the recording worker and closing the audio stream.
  281. """
  282. self.is_recording = False
  283. self.is_running = False
  284. self.recording_thread.join()
  285. try:
  286. self.stream.stop_stream()
  287. self.stream.close()
  288. self.audio.terminate()
  289. except Exception as e:
  290. logging.error(f"Error closing the audio stream: {e}")
  291. def _calculate_percentile_mean(self, buffer, percentile, upper=True):
  292. """
  293. Calculates the mean of the specified percentile from the provided buffer of
  294. noise levels. If upper is True, it calculates from the upper side,
  295. otherwise from the lower side.
  296. Args:
  297. - buffer (list): The buffer containing the history of noise levels.
  298. - percentile (float): The desired percentile (0.0 <= percentile <= 1.0). E.g., 0.125 for 1/8.
  299. - upper (bool): Determines if the function considers the upper or lower portion of data.
  300. Returns:
  301. - float: The mean value of the desired portion.
  302. """
  303. sorted_buffer = sorted(buffer)
  304. index = int(len(sorted_buffer) * percentile)
  305. if upper:
  306. values = sorted_buffer[-index:] # Get values from the top
  307. else:
  308. values = sorted_buffer[:index] # Get values from the bottom
  309. if len(values) == 0:
  310. return 0.0
  311. return sum(values) / len(values)
  312. def _is_silero_speech(self, data):
  313. """
  314. Returns true if speech is detected in the provided audio data
  315. Args:
  316. data (bytes): raw bytes of audio data (1024 raw bytes with 16000 sample rate and 16 bits per sample)
  317. """
  318. audio_chunk = np.frombuffer(data, dtype=np.int16)
  319. audio_chunk = audio_chunk.astype(np.float32) / 32768.0 # Convert to float and normalize
  320. vad_prob = self.silero_vad_model(torch.from_numpy(audio_chunk), SAMPLE_RATE).item()
  321. return vad_prob > (1 - self.silero_sensitivity)
  322. def _is_webrtc_speech(self, data):
  323. """
  324. Returns true if speech is detected in the provided audio data
  325. Args:
  326. data (bytes): raw bytes of audio data (1024 raw bytes with 16000 sample rate and 16 bits per sample)
  327. """
  328. # Number of audio frames per millisecond
  329. frame_length = int(self.sample_rate * 0.01) # for 10ms frame
  330. num_frames = int(len(data) / (2 * frame_length))
  331. for i in range(num_frames):
  332. start_byte = i * frame_length * 2
  333. end_byte = start_byte + frame_length * 2
  334. frame = data[start_byte:end_byte]
  335. if self.webrtc_vad_model.is_speech(frame, self.sample_rate):
  336. return True
  337. return False
  338. def _is_voice_active(self, data):
  339. """
  340. Determine if voice is active based on the provided data.
  341. Args:
  342. data: The audio data to be checked for voice activity.
  343. Returns:
  344. bool: True if voice is active, False otherwise.
  345. """
  346. # Define a constant for the time threshold
  347. TIME_THRESHOLD = 0.1
  348. # Check if enough time has passed to reset the Silero check time
  349. if time.time() - self.silero_check_time > TIME_THRESHOLD:
  350. self.silero_check_time = 0
  351. # First quick performing check for voice activity using WebRTC
  352. if self._is_webrtc_speech(data):
  353. # If silero check time not set
  354. if self.silero_check_time == 0:
  355. self.silero_check_time = time.time()
  356. # Perform a more intensive check using Silero
  357. if self._is_silero_speech(data):
  358. return True # Voice is active
  359. return False # Voice is not active
  360. def _set_state(self, new_state):
  361. """
  362. Update the current state of the recorder and execute corresponding state-change callbacks.
  363. Args:
  364. new_state (str): The new state to set.
  365. """
  366. # Check if the state has actually changed
  367. if new_state == self.state:
  368. return
  369. # Store the current state for later comparison
  370. old_state = self.state
  371. # Update to the new state
  372. self.state = new_state
  373. # Execute callbacks based on transitioning FROM a particular state
  374. if old_state == "listening":
  375. if self.on_vad_detect_stop:
  376. self.on_vad_detect_stop()
  377. elif old_state == "wakeword":
  378. if self.on_wakeword_detection_end:
  379. self.on_wakeword_detection_end()
  380. # Execute callbacks based on transitioning TO a particular state
  381. if new_state == "listening":
  382. if self.on_vad_detect_start:
  383. self.on_vad_detect_start()
  384. self._set_spinner("speak now")
  385. if self.spinner:
  386. self.halo._interval = 250
  387. elif new_state == "wakeword":
  388. if self.on_wakeword_detection_start:
  389. self.on_wakeword_detection_start()
  390. self._set_spinner(f"say {self.wake_words}")
  391. if self.spinner:
  392. self.halo._interval = 500
  393. elif new_state == "transcribing":
  394. if self.on_transcription_start:
  395. self.on_transcription_start()
  396. self._set_spinner("transcribing")
  397. if self.spinner:
  398. self.halo._interval = 50
  399. elif new_state == "recording":
  400. self._set_spinner("recording")
  401. if self.spinner:
  402. self.halo._interval = 100
  403. elif new_state == "inactive":
  404. if self.spinner and self.halo:
  405. self.halo.stop()
  406. self.halo = None
  407. def _set_spinner(self, text):
  408. """
  409. Update the spinner's text or create a new spinner with the provided text.
  410. Args:
  411. text (str): The text to be displayed alongside the spinner.
  412. """
  413. if self.spinner:
  414. # If the Halo spinner doesn't exist, create and start it
  415. if self.halo is None:
  416. self.halo = Halo(text=text)
  417. self.halo.start()
  418. # If the Halo spinner already exists, just update the text
  419. else:
  420. self.halo.text = text
  421. def _recording_worker(self):
  422. """
  423. The main worker method which constantly monitors the audio input for voice activity and accordingly starts/stops the recording.
  424. """
  425. was_recording = False
  426. delay_was_passed = False
  427. # Continuously monitor audio for voice activity
  428. while self.is_running:
  429. try:
  430. data = self.stream.read(self.buffer_size)
  431. except pyaudio.paInputOverflowed:
  432. logging.warning("Input overflowed. Frame dropped.")
  433. continue
  434. except Exception as e:
  435. logging.error(f"Error during recording: {e}")
  436. time.sleep(1)
  437. continue
  438. if not self.is_recording:
  439. # handle not recording state
  440. time_since_listen_start = time.time() - self.listen_start if self.listen_start else 0
  441. wake_word_activation_delay_passed = (time_since_listen_start > self.wake_word_activation_delay)
  442. # handle wake-word timeout callback
  443. if wake_word_activation_delay_passed and not delay_was_passed:
  444. if self.wake_words and self.wake_word_activation_delay:
  445. if self.on_wakeword_timeout:
  446. self.on_wakeword_timeout()
  447. delay_was_passed = wake_word_activation_delay_passed
  448. # Set state and spinner text
  449. if not self.recording_stop_time:
  450. if self.wake_words and wake_word_activation_delay_passed and not self.wakeword_detected:
  451. self._set_state("wakeword")
  452. else:
  453. if self.listen_start:
  454. self._set_state("listening")
  455. else:
  456. self._set_state("inactive")
  457. # Detect wake words if applicable
  458. if self.wake_words and wake_word_activation_delay_passed:
  459. try:
  460. pcm = struct.unpack_from("h" * self.buffer_size, data)
  461. wakeword_index = self.porcupine.process(pcm)
  462. except struct.error:
  463. logging.error("Error unpacking audio data for wake word processing.")
  464. continue
  465. except Exception as e:
  466. logging.error(f"Wake word processing error: {e}")
  467. continue
  468. # If a wake word is detected
  469. if wakeword_index >= 0:
  470. # Removing the wake word from the recording
  471. samples_for_0_1_sec = int(self.sample_rate * 0.1)
  472. start_index = max(0, len(self.audio_buffer) - samples_for_0_1_sec)
  473. temp_samples = collections.deque(itertools.islice(self.audio_buffer, start_index, None))
  474. self.audio_buffer.clear()
  475. self.audio_buffer.extend(temp_samples)
  476. self.wake_word_detect_time = time.time()
  477. self.wakeword_detected = True
  478. if self.on_wakeword_detected:
  479. self.on_wakeword_detected()
  480. # Check for voice activity to trigger the start of recording
  481. if ((not self.wake_words or not wake_word_activation_delay_passed) and self.start_recording_on_voice_activity) or self.wakeword_detected:
  482. if self._is_voice_active(data):
  483. logging.info("voice activity detected")
  484. self.start()
  485. if self.is_recording:
  486. self.start_recording_on_voice_activity = False
  487. # Add the buffered audio to the recording frames
  488. self.frames.extend(list(self.audio_buffer))
  489. self.audio_buffer.clear()
  490. self.silero_vad_model.reset_states()
  491. self.speech_end_silence_start = 0
  492. else:
  493. # If we are currently recording
  494. # Stop the recording if silence is detected after speech
  495. if self.stop_recording_on_voice_deactivity:
  496. if not self._is_webrtc_speech(data):
  497. # Voice deactivity was detected, so we start measuring silence time before stopping recording
  498. if self.speech_end_silence_start == 0:
  499. self.speech_end_silence_start = time.time()
  500. else:
  501. self.speech_end_silence_start = 0
  502. # Wait for silence to stop recording after speech
  503. if self.speech_end_silence_start and time.time() - self.speech_end_silence_start > self.post_speech_silence_duration:
  504. logging.info("voice deactivity detected")
  505. self.stop()
  506. if not self.is_recording and was_recording:
  507. # Reset after stopping recording to ensure clean state
  508. self.stop_recording_on_voice_deactivity = False
  509. if time.time() - self.silero_check_time > 0.1:
  510. self.silero_check_time = 0
  511. if self.wake_word_detect_time and time.time() - self.wake_word_detect_time > self.wake_word_timeout:
  512. self.wake_word_detect_time = 0
  513. if self.wakeword_detected and self.on_wakeword_timeout:
  514. self.on_wakeword_timeout()
  515. self.wakeword_detected = False
  516. if self.is_recording:
  517. self.frames.append(data)
  518. else:
  519. self.audio_buffer.append(data)
  520. was_recording = self.is_recording
  521. time.sleep(TIME_SLEEP)
  522. def _realtime_worker(self):
  523. """
  524. Performs real-time transcription if the feature is enabled.
  525. The method is responsible transcribing recorded audio frames in real-time
  526. based on the specified resolution interval.
  527. The transcribed text is stored in `self.realtime_preview_text` and a callback
  528. function is invoked with this text if specified.
  529. """
  530. # Return immediately if real-time transcription is not enabled
  531. if not self.realtime_preview:
  532. return
  533. # Continue running as long as the main process is active
  534. while self.is_running:
  535. # Check if the recording is active
  536. if self.is_recording:
  537. # Sleep for the duration of the transcription resolution
  538. time.sleep(self.realtime_preview_resolution)
  539. # Convert the buffer frames to a NumPy array
  540. audio_array = np.frombuffer(b''.join(self.frames), dtype=np.int16)
  541. # Normalize the array to a [-1, 1] range
  542. audio_array = audio_array.astype(np.float32) / 32768.0
  543. # Perform transcription and assemble the text
  544. segments = self.realtime_preview_model.transcribe(
  545. audio_array,
  546. language=self.language if self.language else None
  547. )
  548. self.realtime_preview_text = " ".join(seg.text for seg in segments[0]).strip()
  549. # Invoke the callback with the transcribed text
  550. if self.is_recording:
  551. if self.on_realtime_preview:
  552. self.on_realtime_preview(self.realtime_preview_text)
  553. # If not recording, sleep briefly before checking again
  554. else:
  555. time.sleep(0.1)
  556. def __del__(self):
  557. """
  558. Destructor method ensures safe shutdown of the recorder when the instance is destroyed.
  559. """
  560. self.shutdown()