audio_recorder.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534
  1. """
  2. The AudioToTextRecorder class in the provided code facilitates fast speech-to-text transcription.
  3. The class employs the faster_whisper library to transcribe the recorded audio
  4. into text using machine learning models, which can be run either on a GPU or CPU.
  5. Voice activity detection (VAD) is built in, meaning the software can automatically
  6. start or stop recording based on the presence or absence of speech.
  7. It integrates wake word detection through the pvporcupine library, allowing the
  8. software to initiate recording when a specific word or phrase is spoken.
  9. The system provides real-time feedback and can be further customized.
  10. Features:
  11. - Voice Activity Detection: Automatically starts/stops recording when speech is detected or when speech ends.
  12. - Wake Word Detection: Starts recording when a specified wake word (or words) is detected.
  13. - Event Callbacks: Customizable callbacks for when recording starts or finishes.
  14. - Fast Transcription: Returns the transcribed text from the audio as fast as possible.
  15. Author: Kolja Beigel
  16. """
  17. import pyaudio
  18. import collections
  19. import faster_whisper
  20. import torch
  21. import numpy as np
  22. import struct
  23. import pvporcupine
  24. import threading
  25. import time
  26. import logging
  27. import webrtcvad
  28. import itertools
  29. from collections import deque
  30. from halo import Halo
  31. SAMPLE_RATE = 16000
  32. BUFFER_SIZE = 512
  33. SILERO_SENSITIVITY = 0.6
  34. WEBRTC_SENSITIVITY = 3
  35. WAKE_WORDS_SENSITIVITY = 0.6
  36. TIME_SLEEP = 0.02
  37. class AudioToTextRecorder:
  38. """
  39. A class responsible for capturing audio from the microphone, detecting voice activity, and then transcribing the captured audio using the `faster_whisper` model.
  40. """
  41. def __init__(self,
  42. model: str = "tiny",
  43. language: str = "",
  44. on_recording_start = None,
  45. on_recording_stop = None,
  46. spinner = True,
  47. level=logging.WARNING,
  48. # Voice activation parameters
  49. silero_sensitivity: float = SILERO_SENSITIVITY,
  50. webrtc_sensitivity: int = WEBRTC_SENSITIVITY,
  51. post_speech_silence_duration: float = 0.2,
  52. min_length_of_recording: float = 1.0,
  53. min_gap_between_recordings: float = 1.0,
  54. pre_recording_buffer_duration: float = 1,
  55. # Wake word parameters
  56. wake_words: str = "",
  57. wake_words_sensitivity: float = WAKE_WORDS_SENSITIVITY,
  58. wake_word_activation_delay: float = 0,
  59. wake_word_timeout: float = 5,
  60. on_wakeword_detected = None,
  61. on_wakeword_timeout = None,
  62. ):
  63. """
  64. Initializes an audio recorder and transcription and wake word detection.
  65. Args:
  66. - model (str, default="tiny"): Specifies the size of the transcription model to use or the path to a converted model directory.
  67. Valid options are 'tiny', 'tiny.en', 'base', 'base.en', 'small', 'small.en', 'medium', 'medium.en', 'large-v1', 'large-v2'.
  68. If a specific size is provided, the model is downloaded from the Hugging Face Hub.
  69. - language (str, default=""): Language code for speech-to-text engine. If not specified, the model will attempt to detect the language automatically.
  70. - on_recording_start (callable, default=None): Callback function to be called when recording starts.
  71. - on_recording_stop (callable, default=None): Callback function to be called when recording stops.
  72. - spinner (bool, default=True): Show spinner animation with current state.
  73. - level (int, default=logging.WARNING): Logging level.
  74. - silero_sensitivity (float, default=SILERO_SENSITIVITY): Sensitivity for the Silero Voice Activity Detection model ranging from 0 (least sensitive) to 1 (most sensitive). Default is 0.5.
  75. - webrtc_sensitivity (int, default=WEBRTC_SENSITIVITY): Sensitivity for the WebRTC Voice Activity Detection engine ranging from 1 (least sensitive) to 3 (most sensitive). Default is 3.
  76. - post_speech_silence_duration (float, default=0.2): Duration in seconds of silence that must follow speech before the recording is considered to be completed. This ensures that any brief pauses during speech don't prematurely end the recording.
  77. - min_gap_between_recordings (float, default=1.0): Specifies the minimum time interval in seconds that should exist between the end of one recording session and the beginning of another to prevent rapid consecutive recordings.
  78. - min_length_of_recording (float, default=1.0): Specifies the minimum duration in seconds that a recording session should last to ensure meaningful audio capture, preventing excessively short or fragmented recordings.
  79. - pre_recording_buffer_duration (float, default=0.2): Duration in seconds for the audio buffer to maintain pre-roll audio (compensates speech activity detection latency)
  80. - wake_words (str, default=""): Comma-separated string of wake words to initiate recording. Supported wake words include:
  81. 'alexa', 'americano', 'blueberry', 'bumblebee', 'computer', 'grapefruits', 'grasshopper', 'hey google', 'hey siri', 'jarvis', 'ok google', 'picovoice', 'porcupine', 'terminator'.
  82. - wake_words_sensitivity (float, default=0.5): Sensitivity for wake word detection, ranging from 0 (least sensitive) to 1 (most sensitive). Default is 0.5.
  83. - wake_word_activation_delay (float, default=0): Duration in seconds after the start of monitoring before the system switches to wake word activation if no voice is initially detected. If set to zero, the system uses wake word activation immediately.
  84. - wake_word_timeout (float, default=5): Duration in seconds after a wake word is recognized. If no subsequent voice activity is detected within this window, the system transitions back to an inactive state, awaiting the next wake word or voice activation.
  85. - on_wakeword_detected (callable, default=None): Callback function to be called when a wake word is detected.
  86. - on_wakeword_timeout (callable, default=None): Callback function to be called when the system goes back to an inactive state after when no speech was detected after wake word activation
  87. Raises:
  88. Exception: Errors related to initializing transcription model, wake word detection, or audio recording.
  89. """
  90. self.language = language
  91. self.wake_words = wake_words
  92. self.wake_word_activation_delay = wake_word_activation_delay
  93. self.wake_word_timeout = wake_word_timeout
  94. self.min_gap_between_recordings = min_gap_between_recordings
  95. self.min_length_of_recording = min_length_of_recording
  96. self.pre_recording_buffer_duration = pre_recording_buffer_duration
  97. self.post_speech_silence_duration = post_speech_silence_duration
  98. self.on_recording_start = on_recording_start
  99. self.on_recording_stop = on_recording_stop
  100. self.on_wakeword_detected = on_wakeword_detected
  101. self.on_wakeword_timeout = on_wakeword_timeout
  102. self.level = level
  103. self.buffer_size = BUFFER_SIZE
  104. self.sample_rate = SAMPLE_RATE
  105. self.recording_start_time = 0
  106. self.recording_stop_time = 0
  107. self.wake_word_detect_time = 0
  108. self.silero_check_time = 0
  109. self.speech_end_silence_start = 0
  110. self.silero_sensitivity = silero_sensitivity
  111. self.listen_start = 0
  112. self.spinner = spinner
  113. self.halo = None
  114. #self.spinner = Halo(text=spinner) if spinner else None
  115. self.wakeword_detected = False
  116. # Initialize the logging configuration with the specified level
  117. logging.basicConfig(format='RealTimeSTT: %(message)s', level=level)
  118. # Initialize the transcription model
  119. try:
  120. self.model = faster_whisper.WhisperModel(model_size_or_path=model, device='cuda' if torch.cuda.is_available() else 'cpu')
  121. except Exception as e:
  122. logging.exception(f"Error initializing faster_whisper transcription model: {e}")
  123. raise
  124. # Setup wake word detection
  125. if wake_words:
  126. self.wake_words_list = [word.strip() for word in wake_words.lower().split(',')]
  127. sensitivity_list = [float(wake_words_sensitivity) for _ in range(len(self.wake_words_list))]
  128. try:
  129. self.porcupine = pvporcupine.create(keywords=self.wake_words_list, sensitivities=sensitivity_list)
  130. self.buffer_size = self.porcupine.frame_length
  131. self.sample_rate = self.porcupine.sample_rate
  132. except Exception as e:
  133. logging.exception(f"Error initializing porcupine wake word detection engine: {e}")
  134. raise
  135. # Setup audio recording infrastructure
  136. try:
  137. self.audio = pyaudio.PyAudio()
  138. self.stream = self.audio.open(rate=self.sample_rate, format=pyaudio.paInt16, channels=1, input=True, frames_per_buffer=self.buffer_size)
  139. except Exception as e:
  140. logging.exception(f"Error initializing pyaudio audio recording: {e}")
  141. raise
  142. # Setup voice activity detection model WebRTC
  143. try:
  144. self.webrtc_vad_model = webrtcvad.Vad()
  145. self.webrtc_vad_model.set_mode(webrtc_sensitivity)
  146. except Exception as e:
  147. logging.exception(f"Error initializing WebRTC voice activity detection engine: {e}")
  148. raise
  149. # Setup voice activity detection model Silero VAD
  150. try:
  151. self.silero_vad_model, _ = torch.hub.load(
  152. repo_or_dir="snakers4/silero-vad",
  153. model="silero_vad",
  154. verbose=False
  155. #force_reload=True,
  156. #onnx=True,
  157. )
  158. except Exception as e:
  159. logging.exception(f"Error initializing Silero VAD voice activity detection engine: {e}")
  160. raise
  161. self.audio_buffer = collections.deque(maxlen=int((self.sample_rate // self.buffer_size) * self.pre_recording_buffer_duration))
  162. self.frames = []
  163. # Recording control flags
  164. self.is_recording = False
  165. self.is_running = True
  166. self.start_recording_on_voice_activity = False
  167. self.stop_recording_on_voice_deactivity = False
  168. # Start the recording worker thread
  169. self.recording_thread = threading.Thread(target=self._recording_worker)
  170. self.recording_thread.daemon = True
  171. self.recording_thread.start()
  172. def text(self):
  173. """
  174. Transcribes audio captured by the class instance using the `faster_whisper` model.
  175. - Waits for voice activity if not yet started recording
  176. - Waits for voice deactivity if not yet stopped recording
  177. - Transcribes the recorded audio.
  178. Returns:
  179. str: The transcription of the recorded audio or an empty string in case of an error.
  180. """
  181. self.listen_start = time.time()
  182. # If not yet started to record, wait for voice activity to initiate recording.
  183. if not self.is_recording and len(self.frames) == 0:
  184. self.set_spinner("speak now")
  185. self.start_recording_on_voice_activity = True
  186. while not self.is_recording:
  187. time.sleep(TIME_SLEEP)
  188. # If still recording, wait for voice deactivity to finish recording.
  189. if self.is_recording:
  190. self.stop_recording_on_voice_deactivity = True
  191. while self.is_recording:
  192. time.sleep(TIME_SLEEP)
  193. # Convert the concatenated frames into text
  194. try:
  195. audio_array = np.frombuffer(b''.join(self.frames), dtype=np.int16)
  196. audio_array = audio_array.astype(np.float32) / 32768.0
  197. self.frames = []
  198. transcription = " ".join(seg.text for seg in self.model.transcribe(audio_array, language=self.language if self.language else None)[0]).strip()
  199. if self.spinner and self.halo:
  200. self.halo.stop()
  201. self.halo = None
  202. self.recording_stop_time = 0
  203. return transcription
  204. except ValueError:
  205. logging.error("Error converting audio buffer to numpy array.")
  206. raise
  207. except faster_whisper.WhisperError as e:
  208. logging.error(f"Whisper transcription error: {e}")
  209. raise
  210. except Exception as e:
  211. logging.error(f"General transcription error: {e}")
  212. raise
  213. def set_spinner(self, text):
  214. if self.spinner:
  215. if not self.halo:
  216. self.halo = Halo(text=text)
  217. self.halo.start()
  218. else:
  219. self.halo.text = text
  220. def start(self):
  221. """
  222. Starts recording audio directly without waiting for voice activity.
  223. """
  224. # Ensure there's a minimum interval between stopping and starting recording
  225. if time.time() - self.recording_stop_time < self.min_gap_between_recordings:
  226. logging.info("Attempted to start recording too soon after stopping.")
  227. return self
  228. logging.info("recording started")
  229. self.wakeword_detected = False
  230. self.wake_word_detect_time = 0
  231. self.frames = []
  232. self.is_recording = True
  233. self.recording_start_time = time.time()
  234. self.set_spinner("recording")
  235. if self.halo: self.halo._interval = 100
  236. if self.on_recording_start:
  237. self.on_recording_start()
  238. return self
  239. def stop(self):
  240. """
  241. Stops recording audio.
  242. """
  243. # Ensure there's a minimum interval between starting and stopping recording
  244. if time.time() - self.recording_start_time < self.min_length_of_recording:
  245. logging.info("Attempted to stop recording too soon after starting.")
  246. return self
  247. logging.info("recording stopped")
  248. self.is_recording = False
  249. self.recording_stop_time = time.time()
  250. self.set_spinner("transcribing")
  251. if self.on_recording_stop:
  252. self.on_recording_stop()
  253. return self
  254. def shutdown(self):
  255. """
  256. Safely shuts down the audio recording by stopping the recording worker and closing the audio stream.
  257. """
  258. self.is_recording = False
  259. self.is_running = False
  260. self.recording_thread.join()
  261. try:
  262. self.stream.stop_stream()
  263. self.stream.close()
  264. self.audio.terminate()
  265. except Exception as e:
  266. logging.error(f"Error closing the audio stream: {e}")
  267. def _calculate_percentile_mean(self, buffer, percentile, upper=True):
  268. """
  269. Calculates the mean of the specified percentile from the provided buffer of
  270. noise levels. If upper is True, it calculates from the upper side,
  271. otherwise from the lower side.
  272. Args:
  273. - buffer (list): The buffer containing the history of noise levels.
  274. - percentile (float): The desired percentile (0.0 <= percentile <= 1.0). E.g., 0.125 for 1/8.
  275. - upper (bool): Determines if the function considers the upper or lower portion of data.
  276. Returns:
  277. - float: The mean value of the desired portion.
  278. """
  279. sorted_buffer = sorted(buffer)
  280. index = int(len(sorted_buffer) * percentile)
  281. if upper:
  282. values = sorted_buffer[-index:] # Get values from the top
  283. else:
  284. values = sorted_buffer[:index] # Get values from the bottom
  285. if len(values) == 0:
  286. return 0.0
  287. return sum(values) / len(values)
  288. def is_silero_speech(self, data):
  289. """
  290. Returns true if speech is detected in the provided audio data
  291. Args:
  292. data (bytes): raw bytes of audio data (1024 raw bytes with 16000 sample rate and 16 bits per sample)
  293. """
  294. audio_chunk = np.frombuffer(data, dtype=np.int16)
  295. audio_chunk = audio_chunk.astype(np.float32) / 32768.0 # Convert to float and normalize
  296. vad_prob = self.silero_vad_model(torch.from_numpy(audio_chunk), SAMPLE_RATE).item()
  297. return vad_prob > self.silero_sensitivity
  298. def is_webrtc_speech(self, data):
  299. """
  300. Returns true if speech is detected in the provided audio data
  301. Args:
  302. data (bytes): raw bytes of audio data (1024 raw bytes with 16000 sample rate and 16 bits per sample)
  303. """
  304. # Number of audio frames per millisecond
  305. frame_length = int(self.sample_rate * 0.01) # for 10ms frame
  306. num_frames = int(len(data) / (2 * frame_length))
  307. for i in range(num_frames):
  308. start_byte = i * frame_length * 2
  309. end_byte = start_byte + frame_length * 2
  310. frame = data[start_byte:end_byte]
  311. if self.webrtc_vad_model.is_speech(frame, self.sample_rate):
  312. return True
  313. return False
  314. def is_voice_active(self, data):
  315. if time.time() - self.silero_check_time > 0.1:
  316. self.silero_check_time = 0
  317. if self.is_webrtc_speech(data):
  318. if not self.silero_check_time:
  319. self.silero_check_time = time.time()
  320. if self.is_silero_speech(data):
  321. return True
  322. return False
  323. def _recording_worker(self):
  324. """
  325. The main worker method which constantly monitors the audio input for voice activity and accordingly starts/stops the recording.
  326. """
  327. was_recording = False
  328. # Continuously monitor audio for voice activity
  329. while self.is_running:
  330. try:
  331. data = self.stream.read(self.buffer_size)
  332. except pyaudio.paInputOverflowed:
  333. logging.warning("Input overflowed. Frame dropped.")
  334. continue
  335. except Exception as e:
  336. logging.error(f"Error during recording: {e}")
  337. time.sleep(1)
  338. continue
  339. # Check if we're not currently recording
  340. if not self.is_recording:
  341. wake_word_activation_delay_passed = (time.time() - self.listen_start > self.wake_word_activation_delay)
  342. if self.spinner and self.halo and not self.recording_stop_time:
  343. if self.wake_words and wake_word_activation_delay_passed and not self.wakeword_detected:
  344. self.halo.text = f"say {self.wake_words}"
  345. self.halo._interval = 500
  346. else:
  347. self.halo.text = "speak now"
  348. self.halo._interval = 200
  349. if self.wake_words and wake_word_activation_delay_passed:
  350. try:
  351. pcm = struct.unpack_from("h" * self.buffer_size, data)
  352. wakeword_index = self.porcupine.process(pcm)
  353. except struct.error:
  354. logging.error("Error unpacking audio data for wake word processing.")
  355. continue
  356. except Exception as e:
  357. logging.error(f"Wake word processing error: {e}")
  358. continue
  359. if wakeword_index >= 0:
  360. # prevent the wake word from being included in the recording
  361. samples_for_0_1_sec = int(self.sample_rate * 0.1)
  362. start_index = max(0, len(self.audio_buffer) - samples_for_0_1_sec)
  363. temp_samples = collections.deque(itertools.islice(self.audio_buffer, start_index, None))
  364. self.audio_buffer.clear()
  365. self.audio_buffer.extend(temp_samples)
  366. self.wake_word_detect_time = time.time()
  367. self.wakeword_detected = True
  368. if self.on_wakeword_detected:
  369. self.on_wakeword_detected()
  370. # Check for voice activity to trigger the start of recording
  371. if ((not self.wake_words or not wake_word_activation_delay_passed) and self.start_recording_on_voice_activity) or self.wakeword_detected:
  372. if self.is_voice_active(data):
  373. logging.info("voice activity detected")
  374. self.start()
  375. if self.is_recording:
  376. self.start_recording_on_voice_activity = False
  377. # Add the buffered audio to the recording frames
  378. self.frames.extend(list(self.audio_buffer))
  379. self.silero_vad_model.reset_states()
  380. self.speech_end_silence_start = 0
  381. # If we're currently recording and voice deactivity is detected, stop the recording
  382. else:
  383. if self.stop_recording_on_voice_deactivity:
  384. if not self.is_webrtc_speech(data):
  385. # silence detected (after voice detected while recording)
  386. if self.speech_end_silence_start == 0:
  387. self.speech_end_silence_start = time.time()
  388. else:
  389. self.speech_end_silence_start = 0
  390. if self.speech_end_silence_start and time.time() - self.speech_end_silence_start > self.post_speech_silence_duration:
  391. logging.info("voice deactivity detected")
  392. self.stop()
  393. if not self.is_recording and was_recording:
  394. # Reset after stopping recording to ensure clean state
  395. self.stop_recording_on_voice_deactivity = False
  396. if time.time() - self.silero_check_time > 0.1:
  397. self.silero_check_time = 0
  398. if self.wake_word_detect_time and time.time() - self.wake_word_detect_time > self.wake_word_timeout:
  399. self.wake_word_detect_time = 0
  400. if self.wakeword_detected and self.on_wakeword_timeout:
  401. self.on_wakeword_timeout()
  402. self.wakeword_detected = False
  403. if self.is_recording:
  404. self.frames.append(data)
  405. self.audio_buffer.append(data)
  406. was_recording = self.is_recording
  407. time.sleep(TIME_SLEEP)
  408. def __del__(self):
  409. """
  410. Destructor method ensures safe shutdown of the recorder when the instance is destroyed.
  411. """
  412. self.shutdown()