audio_recorder.py 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894
  1. """
  2. The AudioToTextRecorder class in the provided code facilitates fast speech-to-text transcription.
  3. The class employs the faster_whisper library to transcribe the recorded audio
  4. into text using machine learning models, which can be run either on a GPU or CPU.
  5. Voice activity detection (VAD) is built in, meaning the software can automatically
  6. start or stop recording based on the presence or absence of speech.
  7. It integrates wake word detection through the pvporcupine library, allowing the
  8. software to initiate recording when a specific word or phrase is spoken.
  9. The system provides real-time feedback and can be further customized.
  10. Features:
  11. - Voice Activity Detection: Automatically starts/stops recording when speech is detected or when speech ends.
  12. - Wake Word Detection: Starts recording when a specified wake word (or words) is detected.
  13. - Event Callbacks: Customizable callbacks for when recording starts or finishes.
  14. - Fast Transcription: Returns the transcribed text from the audio as fast as possible.
  15. Author: Kolja Beigel
  16. """
  17. import pyaudio
  18. import collections
  19. import faster_whisper
  20. import torch
  21. import numpy as np
  22. import struct
  23. import pvporcupine
  24. import threading
  25. import time
  26. import logging
  27. import webrtcvad
  28. import itertools
  29. import os
  30. import re
  31. import collections
  32. import halo
  33. import traceback
  34. INIT_MODEL_TRANSCRIPTION = "tiny"
  35. INIT_MODEL_TRANSCRIPTION_REALTIME = "tiny"
  36. INIT_REALTIME_PROCESSING_PAUSE = 0.2
  37. INIT_SILERO_SENSITIVITY = 0.4
  38. INIT_WEBRTC_SENSITIVITY = 3
  39. INIT_POST_SPEECH_SILENCE_DURATION = 0.6
  40. INIT_MIN_LENGTH_OF_RECORDING = 0.5
  41. INIT_MIN_GAP_BETWEEN_RECORDINGS = 0
  42. INIT_WAKE_WORDS_SENSITIVITY = 0.6
  43. INIT_PRE_RECORDING_BUFFER_DURATION = 1.0
  44. INIT_WAKE_WORD_ACTIVATION_DELAY = 0.0
  45. INIT_WAKE_WORD_TIMEOUT = 5.0
  46. TIME_SLEEP = 0.02
  47. SAMPLE_RATE = 16000
  48. BUFFER_SIZE = 512
  49. INT16_MAX_ABS_VALUE = 32768.0
  50. class AudioToTextRecorder:
  51. """
  52. A class responsible for capturing audio from the microphone, detecting voice activity, and then transcribing the captured audio using the `faster_whisper` model.
  53. """
  54. def __init__(self,
  55. model: str = INIT_MODEL_TRANSCRIPTION,
  56. language: str = "",
  57. on_recording_start = None,
  58. on_recording_stop = None,
  59. on_transcription_start = None,
  60. ensure_sentence_starting_uppercase = True,
  61. ensure_sentence_ends_with_period = True,
  62. spinner = True,
  63. level=logging.WARNING,
  64. # Realtime transcription parameters
  65. enable_realtime_transcription = False,
  66. realtime_model_type = INIT_MODEL_TRANSCRIPTION_REALTIME,
  67. realtime_processing_pause = INIT_REALTIME_PROCESSING_PAUSE,
  68. on_realtime_transcription_update = None,
  69. on_realtime_transcription_stabilized = None,
  70. # Voice activation parameters
  71. silero_sensitivity: float = INIT_SILERO_SENSITIVITY,
  72. webrtc_sensitivity: int = INIT_WEBRTC_SENSITIVITY,
  73. post_speech_silence_duration: float = INIT_POST_SPEECH_SILENCE_DURATION,
  74. min_length_of_recording: float = INIT_MIN_LENGTH_OF_RECORDING,
  75. min_gap_between_recordings: float = INIT_MIN_GAP_BETWEEN_RECORDINGS,
  76. pre_recording_buffer_duration: float = INIT_PRE_RECORDING_BUFFER_DURATION,
  77. on_vad_detect_start = None,
  78. on_vad_detect_stop = None,
  79. # Wake word parameters
  80. wake_words: str = "",
  81. wake_words_sensitivity: float = INIT_WAKE_WORDS_SENSITIVITY,
  82. wake_word_activation_delay: float = INIT_WAKE_WORD_ACTIVATION_DELAY,
  83. wake_word_timeout: float = INIT_WAKE_WORD_TIMEOUT,
  84. on_wakeword_detected = None,
  85. on_wakeword_timeout = None,
  86. on_wakeword_detection_start = None,
  87. on_wakeword_detection_end = None,
  88. ):
  89. """
  90. Initializes an audio recorder and transcription and wake word detection.
  91. Args:
  92. - model (str, default="tiny"): Specifies the size of the transcription model to use or the path to a converted model directory.
  93. Valid options are 'tiny', 'tiny.en', 'base', 'base.en', 'small', 'small.en', 'medium', 'medium.en', 'large-v1', 'large-v2'.
  94. If a specific size is provided, the model is downloaded from the Hugging Face Hub.
  95. - language (str, default=""): Language code for speech-to-text engine. If not specified, the model will attempt to detect the language automatically.
  96. - on_recording_start (callable, default=None): Callback function to be called when recording of audio to be transcripted starts.
  97. - on_recording_stop (callable, default=None): Callback function to be called when recording of audio to be transcripted stops.
  98. - on_transcription_start (callable, default=None): Callback function to be called when transcription of audio to text starts.
  99. - ensure_sentence_starting_uppercase (bool, default=True): Ensures that every sentence detected by the algorithm starts with an uppercase letter.
  100. - ensure_sentence_ends_with_period (bool, default=True): Ensures that every sentence that doesn't end with punctuation such as "?", "!" ends with a period
  101. - spinner (bool, default=True): Show spinner animation with current state.
  102. - level (int, default=logging.WARNING): Logging level.
  103. - enable_realtime_transcription (bool, default=False): Enables or disables real-time transcription of audio. When set to True, the audio will be transcribed continuously as it is being recorded.
  104. - realtime_model_type (str, default="tiny"): Specifies the machine learning model to be used for real-time transcription. Valid options include 'tiny', 'tiny.en', 'base', 'base.en', 'small', 'small.en', 'medium', 'medium.en', 'large-v1', 'large-v2'.
  105. - realtime_processing_pause (float, default=0.1): Specifies the time interval in seconds after a chunk of audio gets transcribed. Lower values will result in more "real-time" (frequent) transcription updates but may increase computational load.
  106. - on_realtime_transcription_update = A callback function that is triggered whenever there's an update in the real-time transcription. The function is called with the newly transcribed text as its argument.
  107. - on_realtime_transcription_stabilized = A callback function that is triggered when the transcribed text stabilizes in quality. The stabilized text is generally more accurate but may arrive with a slight delay compared to the regular real-time updates.
  108. - silero_sensitivity (float, default=SILERO_SENSITIVITY): Sensitivity for the Silero Voice Activity Detection model ranging from 0 (least sensitive) to 1 (most sensitive). Default is 0.5.
  109. - webrtc_sensitivity (int, default=WEBRTC_SENSITIVITY): Sensitivity for the WebRTC Voice Activity Detection engine ranging from 0 (least aggressive / most sensitive) to 3 (most aggressive, least sensitive). Default is 3.
  110. - post_speech_silence_duration (float, default=0.2): Duration in seconds of silence that must follow speech before the recording is considered to be completed. This ensures that any brief pauses during speech don't prematurely end the recording.
  111. - min_gap_between_recordings (float, default=1.0): Specifies the minimum time interval in seconds that should exist between the end of one recording session and the beginning of another to prevent rapid consecutive recordings.
  112. - min_length_of_recording (float, default=1.0): Specifies the minimum duration in seconds that a recording session should last to ensure meaningful audio capture, preventing excessively short or fragmented recordings.
  113. - pre_recording_buffer_duration (float, default=0.2): Duration in seconds for the audio buffer to maintain pre-roll audio (compensates speech activity detection latency)
  114. - on_vad_detect_start (callable, default=None): Callback function to be called when the system listens for voice activity.
  115. - on_vad_detect_stop (callable, default=None): Callback function to be called when the system stops listening for voice activity.
  116. - wake_words (str, default=""): Comma-separated string of wake words to initiate recording. Supported wake words include:
  117. 'alexa', 'americano', 'blueberry', 'bumblebee', 'computer', 'grapefruits', 'grasshopper', 'hey google', 'hey siri', 'jarvis', 'ok google', 'picovoice', 'porcupine', 'terminator'.
  118. - wake_words_sensitivity (float, default=0.5): Sensitivity for wake word detection, ranging from 0 (least sensitive) to 1 (most sensitive). Default is 0.5.
  119. - wake_word_activation_delay (float, default=0): Duration in seconds after the start of monitoring before the system switches to wake word activation if no voice is initially detected. If set to zero, the system uses wake word activation immediately.
  120. - wake_word_timeout (float, default=5): Duration in seconds after a wake word is recognized. If no subsequent voice activity is detected within this window, the system transitions back to an inactive state, awaiting the next wake word or voice activation.
  121. - on_wakeword_detected (callable, default=None): Callback function to be called when a wake word is detected.
  122. - on_wakeword_timeout (callable, default=None): Callback function to be called when the system goes back to an inactive state after when no speech was detected after wake word activation
  123. - on_wakeword_detection_start (callable, default=None): Callback function to be called when the system starts to listen for wake words
  124. - on_wakeword_detection_end (callable, default=None): Callback function to be called when the system stops to listen for wake words (e.g. because of timeout or wake word detected)
  125. Raises:
  126. Exception: Errors related to initializing transcription model, wake word detection, or audio recording.
  127. """
  128. self.language = language
  129. self.wake_words = wake_words
  130. self.wake_word_activation_delay = wake_word_activation_delay
  131. self.wake_word_timeout = wake_word_timeout
  132. self.ensure_sentence_starting_uppercase = ensure_sentence_starting_uppercase
  133. self.ensure_sentence_ends_with_period = ensure_sentence_ends_with_period
  134. self.min_gap_between_recordings = min_gap_between_recordings
  135. self.min_length_of_recording = min_length_of_recording
  136. self.pre_recording_buffer_duration = pre_recording_buffer_duration
  137. self.post_speech_silence_duration = post_speech_silence_duration
  138. self.on_recording_start = on_recording_start
  139. self.on_recording_stop = on_recording_stop
  140. self.on_wakeword_detected = on_wakeword_detected
  141. self.on_wakeword_timeout = on_wakeword_timeout
  142. self.on_vad_detect_start = on_vad_detect_start
  143. self.on_vad_detect_stop = on_vad_detect_stop
  144. self.on_wakeword_detection_start = on_wakeword_detection_start
  145. self.on_wakeword_detection_end = on_wakeword_detection_end
  146. self.on_transcription_start = on_transcription_start
  147. self.enable_realtime_transcription = enable_realtime_transcription
  148. self.realtime_model_type = realtime_model_type
  149. self.realtime_processing_pause = realtime_processing_pause
  150. self.on_realtime_transcription_update = on_realtime_transcription_update
  151. self.on_realtime_transcription_stabilized = on_realtime_transcription_stabilized
  152. self.level = level
  153. self.buffer_size = BUFFER_SIZE
  154. self.sample_rate = SAMPLE_RATE
  155. self.recording_start_time = 0
  156. self.recording_stop_time = 0
  157. self.wake_word_detect_time = 0
  158. self.silero_check_time = 0
  159. self.silero_working = False
  160. self.speech_end_silence_start = 0
  161. self.silero_sensitivity = silero_sensitivity
  162. self.listen_start = 0
  163. self.spinner = spinner
  164. self.halo = None
  165. self.state = "inactive"
  166. self.wakeword_detected = False
  167. self.text_storage = []
  168. self.realtime_stabilized_text = ""
  169. self.realtime_stabilized_safetext = ""
  170. self.is_webrtc_speech_active = False
  171. self.is_silero_speech_active = False
  172. # Initialize the logging configuration with the specified level
  173. logging.basicConfig(format='RealTimeSTT: %(name)s - %(levelname)s - %(message)s', level=level) # filename='audio_recorder.log'
  174. # Initialize the transcription model
  175. try:
  176. self.model = faster_whisper.WhisperModel(model_size_or_path=model, device='cuda' if torch.cuda.is_available() else 'cpu')
  177. if self.enable_realtime_transcription:
  178. self.realtime_model_type = faster_whisper.WhisperModel(model_size_or_path=self.realtime_model_type, device='cuda' if torch.cuda.is_available() else 'cpu')
  179. except Exception as e:
  180. logging.exception(f"Error initializing faster_whisper transcription model: {e}")
  181. raise
  182. # Setup wake word detection
  183. if wake_words:
  184. self.wake_words_list = [word.strip() for word in wake_words.lower().split(',')]
  185. sensitivity_list = [float(wake_words_sensitivity) for _ in range(len(self.wake_words_list))]
  186. try:
  187. self.porcupine = pvporcupine.create(keywords=self.wake_words_list, sensitivities=sensitivity_list)
  188. self.buffer_size = self.porcupine.frame_length
  189. self.sample_rate = self.porcupine.sample_rate
  190. except Exception as e:
  191. logging.exception(f"Error initializing porcupine wake word detection engine: {e}")
  192. raise
  193. # Setup audio recording infrastructure
  194. try:
  195. self.audio = pyaudio.PyAudio()
  196. self.stream = self.audio.open(rate=self.sample_rate, format=pyaudio.paInt16, channels=1, input=True, frames_per_buffer=self.buffer_size)
  197. except Exception as e:
  198. logging.exception(f"Error initializing pyaudio audio recording: {e}")
  199. raise
  200. # Setup voice activity detection model WebRTC
  201. try:
  202. logging.info(f"Initializing WebRTC voice with Sensitivity {webrtc_sensitivity}")
  203. self.webrtc_vad_model = webrtcvad.Vad()
  204. self.webrtc_vad_model.set_mode(webrtc_sensitivity)
  205. except Exception as e:
  206. logging.exception(f"Error initializing WebRTC voice activity detection engine: {e}")
  207. raise
  208. # Setup voice activity detection model Silero VAD
  209. try:
  210. self.silero_vad_model, _ = torch.hub.load(
  211. repo_or_dir="snakers4/silero-vad",
  212. model="silero_vad",
  213. verbose=False
  214. )
  215. except Exception as e:
  216. logging.exception(f"Error initializing Silero VAD voice activity detection engine: {e}")
  217. raise
  218. self.audio_buffer = collections.deque(maxlen=int((self.sample_rate // self.buffer_size) * self.pre_recording_buffer_duration))
  219. self.frames = []
  220. # Recording control flags
  221. self.is_recording = False
  222. self.is_running = True
  223. self.start_recording_on_voice_activity = False
  224. self.stop_recording_on_voice_deactivity = False
  225. # Start the recording worker thread
  226. self.recording_thread = threading.Thread(target=self._recording_worker)
  227. self.recording_thread.daemon = True
  228. self.recording_thread.start()
  229. # Start the realtime transcription worker thread
  230. self.realtime_thread = threading.Thread(target=self._realtime_worker)
  231. self.realtime_thread.daemon = True
  232. self.realtime_thread.start()
  233. logging.debug('Constructor finished')
  234. def text(self):
  235. """
  236. Transcribes audio captured by the class instance using the `faster_whisper` model.
  237. - Waits for voice activity if not yet started recording
  238. - Waits for voice deactivity if not yet stopped recording
  239. - Transcribes the recorded audio.
  240. Returns:
  241. str: The transcription of the recorded audio or an empty string in case of an error.
  242. """
  243. self.listen_start = time.time()
  244. # If not yet started to record, wait for voice activity to initiate recording.
  245. if not self.is_recording and len(self.frames) == 0:
  246. self._set_state("listening")
  247. self.start_recording_on_voice_activity = True
  248. while not self.is_recording:
  249. time.sleep(TIME_SLEEP)
  250. # If still recording, wait for voice deactivity to finish recording.
  251. if self.is_recording:
  252. self.stop_recording_on_voice_deactivity = True
  253. while self.is_recording:
  254. time.sleep(TIME_SLEEP)
  255. # Convert the concatenated frames into text
  256. try:
  257. audio_array = np.frombuffer(b''.join(self.frames), dtype=np.int16)
  258. audio_array = audio_array.astype(np.float32) / INT16_MAX_ABS_VALUE
  259. self.frames = []
  260. # perform transcription
  261. transcription = " ".join(seg.text for seg in self.model.transcribe(audio_array, language=self.language if self.language else None)[0]).strip()
  262. self.recording_stop_time = 0
  263. self.listen_start = 0
  264. self._set_state("inactive")
  265. return self._preprocess_output(transcription)
  266. except ValueError:
  267. logging.error("Error converting audio buffer to numpy array.")
  268. raise
  269. except faster_whisper.WhisperError as e:
  270. logging.error(f"Whisper transcription error: {e}")
  271. raise
  272. except Exception as e:
  273. logging.error(f"General transcription error: {e}")
  274. raise
  275. def start(self):
  276. """
  277. Starts recording audio directly without waiting for voice activity.
  278. """
  279. # Ensure there's a minimum interval between stopping and starting recording
  280. if time.time() - self.recording_stop_time < self.min_gap_between_recordings:
  281. logging.info("Attempted to start recording too soon after stopping.")
  282. return self
  283. logging.info("recording started")
  284. self.text_storage = []
  285. self.realtime_stabilized_text = ""
  286. self.realtime_stabilized_safetext = ""
  287. self.wakeword_detected = False
  288. self.wake_word_detect_time = 0
  289. self.frames = []
  290. self.is_recording = True
  291. self.recording_start_time = time.time()
  292. self._set_state("recording")
  293. self.is_silero_speech_active = False
  294. self.is_webrtc_speech_active = False
  295. if self.on_recording_start:
  296. self.on_recording_start()
  297. return self
  298. def stop(self):
  299. """
  300. Stops recording audio.
  301. """
  302. # Ensure there's a minimum interval between starting and stopping recording
  303. if time.time() - self.recording_start_time < self.min_length_of_recording:
  304. logging.info("Attempted to stop recording too soon after starting.")
  305. return self
  306. logging.info("recording stopped")
  307. self.is_recording = False
  308. self.recording_stop_time = time.time()
  309. self.is_silero_speech_active = False
  310. self.is_webrtc_speech_active = False
  311. self.silero_check_time = 0
  312. self._set_state("transcribing")
  313. if self.on_recording_stop:
  314. self.on_recording_stop()
  315. return self
  316. def shutdown(self):
  317. """
  318. Safely shuts down the audio recording by stopping the recording worker and closing the audio stream.
  319. """
  320. self.is_recording = False
  321. self.is_running = False
  322. self.recording_thread.join()
  323. try:
  324. self.stream.stop_stream()
  325. self.stream.close()
  326. self.audio.terminate()
  327. except Exception as e:
  328. logging.error(f"Error closing the audio stream: {e}")
  329. def _is_silero_speech(self, data):
  330. """
  331. Returns true if speech is detected in the provided audio data
  332. Args:
  333. data (bytes): raw bytes of audio data (1024 raw bytes with 16000 sample rate and 16 bits per sample)
  334. """
  335. logging.debug('Performing silero speech activity check')
  336. self.silero_working = True
  337. audio_chunk = np.frombuffer(data, dtype=np.int16)
  338. audio_chunk = audio_chunk.astype(np.float32) / INT16_MAX_ABS_VALUE # Convert to float and normalize
  339. # print ("S", end="", flush=True)
  340. vad_prob = self.silero_vad_model(torch.from_numpy(audio_chunk), SAMPLE_RATE).item()
  341. is_silero_speech_active = vad_prob > (1 - self.silero_sensitivity)
  342. if is_silero_speech_active:
  343. # print ("+", end="", flush=True)
  344. self.is_silero_speech_active = True
  345. # else:
  346. # print ("-", end="", flush=True)
  347. self.silero_working = False
  348. return is_silero_speech_active
  349. def _is_webrtc_speech(self, data, all_frames_must_be_true=False):
  350. """
  351. Returns true if speech is detected in the provided audio data
  352. Args:
  353. data (bytes): raw bytes of audio data (1024 raw bytes with 16000 sample rate and 16 bits per sample)
  354. """
  355. # Number of audio frames per millisecond
  356. frame_length = int(self.sample_rate * 0.01) # for 10ms frame
  357. num_frames = int(len(data) / (2 * frame_length))
  358. speech_frames = 0
  359. for i in range(num_frames):
  360. start_byte = i * frame_length * 2
  361. end_byte = start_byte + frame_length * 2
  362. frame = data[start_byte:end_byte]
  363. if self.webrtc_vad_model.is_speech(frame, self.sample_rate):
  364. speech_frames += 1
  365. if not all_frames_must_be_true:
  366. return True
  367. if all_frames_must_be_true:
  368. return speech_frames == num_frames
  369. else:
  370. return False
  371. def _check_voice_activity(self, data):
  372. """
  373. Initiate check if voice is active based on the provided data.
  374. Args:
  375. data: The audio data to be checked for voice activity.
  376. """
  377. # # Define a constant for the time threshold
  378. # TIME_THRESHOLD = 0.1
  379. # # Check if enough time has passed to reset the Silero check time
  380. # if time.time() - self.silero_check_time > TIME_THRESHOLD:
  381. # self.silero_check_time = 0
  382. self.is_webrtc_speech_active = self._is_webrtc_speech(data)
  383. # First quick performing check for voice activity using WebRTC
  384. if self.is_webrtc_speech_active:
  385. if not self.silero_working:
  386. self.silero_working = True
  387. # Run the intensive check in a separate thread
  388. threading.Thread(target=self._is_silero_speech, args=(data,)).start()
  389. # # If silero check time not set
  390. # if self.silero_check_time == 0:
  391. # self.silero_check_time = time.time()
  392. def _is_voice_active(self):
  393. """
  394. Determine if voice is active.
  395. Returns:
  396. bool: True if voice is active, False otherwise.
  397. """
  398. #print("C", end="", flush=True)
  399. # if not self.is_webrtc_speech_active and not self.is_silero_speech_active:
  400. # print (".", end="", flush=True)
  401. # elif self.is_webrtc_speech_active and not self.is_silero_speech_active:
  402. # print ("W", end="", flush=True)
  403. # elif not self.is_webrtc_speech_active and self.is_silero_speech_active:
  404. # print ("S", end="", flush=True)
  405. # elif self.is_webrtc_speech_active and self.is_silero_speech_active:
  406. # print ("#", end="", flush=True)
  407. return self.is_webrtc_speech_active and self.is_silero_speech_active
  408. def _set_state(self, new_state):
  409. """
  410. Update the current state of the recorder and execute corresponding state-change callbacks.
  411. Args:
  412. new_state (str): The new state to set.
  413. """
  414. # Check if the state has actually changed
  415. if new_state == self.state:
  416. return
  417. # Store the current state for later comparison
  418. old_state = self.state
  419. # Update to the new state
  420. self.state = new_state
  421. # Execute callbacks based on transitioning FROM a particular state
  422. if old_state == "listening":
  423. if self.on_vad_detect_stop:
  424. self.on_vad_detect_stop()
  425. elif old_state == "wakeword":
  426. if self.on_wakeword_detection_end:
  427. self.on_wakeword_detection_end()
  428. # Execute callbacks based on transitioning TO a particular state
  429. if new_state == "listening":
  430. if self.on_vad_detect_start:
  431. self.on_vad_detect_start()
  432. self._set_spinner("speak now")
  433. if self.spinner:
  434. self.halo._interval = 250
  435. elif new_state == "wakeword":
  436. if self.on_wakeword_detection_start:
  437. self.on_wakeword_detection_start()
  438. self._set_spinner(f"say {self.wake_words}")
  439. if self.spinner:
  440. self.halo._interval = 500
  441. elif new_state == "transcribing":
  442. if self.on_transcription_start:
  443. self.on_transcription_start()
  444. self._set_spinner("transcribing")
  445. if self.spinner:
  446. self.halo._interval = 50
  447. elif new_state == "recording":
  448. self._set_spinner("recording")
  449. if self.spinner:
  450. self.halo._interval = 100
  451. elif new_state == "inactive":
  452. if self.spinner and self.halo:
  453. self.halo.stop()
  454. self.halo = None
  455. def _set_spinner(self, text):
  456. """
  457. Update the spinner's text or create a new spinner with the provided text.
  458. Args:
  459. text (str): The text to be displayed alongside the spinner.
  460. """
  461. if self.spinner:
  462. # If the Halo spinner doesn't exist, create and start it
  463. if self.halo is None:
  464. self.halo = halo.Halo(text=text)
  465. self.halo.start()
  466. # If the Halo spinner already exists, just update the text
  467. else:
  468. self.halo.text = text
  469. def _recording_worker(self):
  470. """
  471. The main worker method which constantly monitors the audio input for voice activity and accordingly starts/stops the recording.
  472. """
  473. logging.debug('Starting recording worker')
  474. try:
  475. was_recording = False
  476. delay_was_passed = False
  477. # Continuously monitor audio for voice activity
  478. while self.is_running:
  479. try:
  480. data = self.stream.read(self.buffer_size)
  481. except OSError as e:
  482. if e.errno == pyaudio.paInputOverflowed:
  483. logging.warning("Input overflowed. Frame dropped.")
  484. else:
  485. logging.error(f"Error during recording: {e}")
  486. tb_str = traceback.format_exc()
  487. print (f"Traceback: {tb_str}")
  488. print (f"Error: {e}")
  489. continue
  490. except Exception as e:
  491. logging.error(f"Error during recording: {e}")
  492. time.sleep(1)
  493. tb_str = traceback.format_exc()
  494. print (f"Traceback: {tb_str}")
  495. print (f"Error: {e}")
  496. continue
  497. if not self.is_recording:
  498. # handle not recording state
  499. time_since_listen_start = time.time() - self.listen_start if self.listen_start else 0
  500. wake_word_activation_delay_passed = (time_since_listen_start > self.wake_word_activation_delay)
  501. # handle wake-word timeout callback
  502. if wake_word_activation_delay_passed and not delay_was_passed:
  503. if self.wake_words and self.wake_word_activation_delay:
  504. if self.on_wakeword_timeout:
  505. self.on_wakeword_timeout()
  506. delay_was_passed = wake_word_activation_delay_passed
  507. # Set state and spinner text
  508. if not self.recording_stop_time:
  509. if self.wake_words and wake_word_activation_delay_passed and not self.wakeword_detected:
  510. self._set_state("wakeword")
  511. else:
  512. if self.listen_start:
  513. self._set_state("listening")
  514. else:
  515. self._set_state("inactive")
  516. # Detect wake words if applicable
  517. if self.wake_words and wake_word_activation_delay_passed:
  518. try:
  519. pcm = struct.unpack_from("h" * self.buffer_size, data)
  520. wakeword_index = self.porcupine.process(pcm)
  521. except struct.error:
  522. logging.error("Error unpacking audio data for wake word processing.")
  523. continue
  524. except Exception as e:
  525. logging.error(f"Wake word processing error: {e}")
  526. continue
  527. # If a wake word is detected
  528. if wakeword_index >= 0:
  529. # Removing the wake word from the recording
  530. samples_for_0_1_sec = int(self.sample_rate * 0.1)
  531. start_index = max(0, len(self.audio_buffer) - samples_for_0_1_sec)
  532. temp_samples = collections.deque(itertools.islice(self.audio_buffer, start_index, None))
  533. self.audio_buffer.clear()
  534. self.audio_buffer.extend(temp_samples)
  535. self.wake_word_detect_time = time.time()
  536. self.wakeword_detected = True
  537. if self.on_wakeword_detected:
  538. self.on_wakeword_detected()
  539. # Check for voice activity to trigger the start of recording
  540. if ((not self.wake_words or not wake_word_activation_delay_passed) and self.start_recording_on_voice_activity) or self.wakeword_detected:
  541. if self._is_voice_active():
  542. logging.info("voice activity detected")
  543. self.start()
  544. if self.is_recording:
  545. self.start_recording_on_voice_activity = False
  546. # Add the buffered audio to the recording frames
  547. self.frames.extend(list(self.audio_buffer))
  548. self.audio_buffer.clear()
  549. self.silero_vad_model.reset_states()
  550. else:
  551. data_copy = data[:]
  552. self._check_voice_activity(data_copy)
  553. self.speech_end_silence_start = 0
  554. else:
  555. # If we are currently recording
  556. # Stop the recording if silence is detected after speech
  557. if self.stop_recording_on_voice_deactivity:
  558. if not self._is_webrtc_speech(data, True):
  559. # Voice deactivity was detected, so we start measuring silence time before stopping recording
  560. if self.speech_end_silence_start == 0:
  561. self.speech_end_silence_start = time.time()
  562. else:
  563. self.speech_end_silence_start = 0
  564. # Wait for silence to stop recording after speech
  565. if self.speech_end_silence_start and time.time() - self.speech_end_silence_start > self.post_speech_silence_duration:
  566. logging.info("voice deactivity detected")
  567. self.stop()
  568. if not self.is_recording and was_recording:
  569. # Reset after stopping recording to ensure clean state
  570. self.stop_recording_on_voice_deactivity = False
  571. if time.time() - self.silero_check_time > 0.1:
  572. self.silero_check_time = 0
  573. if self.wake_word_detect_time and time.time() - self.wake_word_detect_time > self.wake_word_timeout:
  574. self.wake_word_detect_time = 0
  575. if self.wakeword_detected and self.on_wakeword_timeout:
  576. self.on_wakeword_timeout()
  577. self.wakeword_detected = False
  578. if self.is_recording:
  579. self.frames.append(data)
  580. if not self.is_recording or self.speech_end_silence_start:
  581. self.audio_buffer.append(data)
  582. was_recording = self.is_recording
  583. time.sleep(TIME_SLEEP)
  584. except Exception as e:
  585. logging.error(f"Unhandled exeption in _recording_worker: {e}")
  586. raise
  587. def _preprocess_output(self, text, preview=False):
  588. """
  589. Preprocesses the output text by removing any leading or trailing whitespace,
  590. converting all whitespace sequences to a single space character, and capitalizing
  591. the first character of the text.
  592. Args:
  593. text (str): The text to be preprocessed.
  594. Returns:
  595. str: The preprocessed text.
  596. """
  597. text = re.sub(r'\s+', ' ', text.strip())
  598. if self.ensure_sentence_starting_uppercase:
  599. if text:
  600. text = text[0].upper() + text[1:]
  601. # Ensure the text ends with a proper punctuation if it ends with an alphanumeric character
  602. if not preview:
  603. if self.ensure_sentence_ends_with_period:
  604. if text and text[-1].isalnum():
  605. text += '.'
  606. return text
  607. def find_tail_match_in_text(self, text1, text2, length_of_match=10):
  608. """
  609. Find the position where the last 'n' characters of text1 match with a substring in text2.
  610. This method takes two texts, extracts the last 'n' characters from text1 (where 'n' is determined
  611. by the variable 'length_of_match'), and searches for an occurrence of this substring in text2,
  612. starting from the end of text2 and moving towards the beginning.
  613. Parameters:
  614. - text1 (str): The text containing the substring that we want to find in text2.
  615. - text2 (str): The text in which we want to find the matching substring.
  616. - length_of_match(int): The length of the matching string that we are looking for
  617. Returns:
  618. int: The position (0-based index) in text2 where the matching substring starts.
  619. If no match is found or either of the texts is too short, returns -1.
  620. """
  621. # Check if either of the texts is too short
  622. if len(text1) < length_of_match or len(text2) < length_of_match:
  623. return -1
  624. # The end portion of the first text that we want to compare
  625. target_substring = text1[-length_of_match:]
  626. # Loop through text2 from right to left
  627. for i in range(len(text2) - length_of_match + 1):
  628. # Extract the substring from text2 to compare with the target_substring
  629. current_substring = text2[len(text2) - i - length_of_match:len(text2) - i]
  630. # Compare the current_substring with the target_substring
  631. if current_substring == target_substring:
  632. return len(text2) - i # Position in text2 where the match starts
  633. return -1
  634. def _realtime_worker(self):
  635. """
  636. Performs real-time transcription if the feature is enabled.
  637. The method is responsible transcribing recorded audio frames in real-time
  638. based on the specified resolution interval.
  639. The transcribed text is stored in `self.realtime_transcription_text` and a callback
  640. function is invoked with this text if specified.
  641. """
  642. try:
  643. logging.debug('Starting realtime worker')
  644. # Return immediately if real-time transcription is not enabled
  645. if not self.enable_realtime_transcription:
  646. return
  647. # Continue running as long as the main process is active
  648. while self.is_running:
  649. # Check if the recording is active
  650. if self.is_recording:
  651. # Sleep for the duration of the transcription resolution
  652. time.sleep(self.realtime_processing_pause)
  653. # Convert the buffer frames to a NumPy array
  654. audio_array = np.frombuffer(b''.join(self.frames), dtype=np.int16)
  655. # Normalize the array to a [-1, 1] range
  656. audio_array = audio_array.astype(np.float32) / INT16_MAX_ABS_VALUE
  657. # Perform transcription and assemble the text
  658. segments = self.realtime_model_type.transcribe(
  659. audio_array,
  660. language=self.language if self.language else None
  661. )
  662. # double check recording state because it could have changed mid-transcription
  663. if self.is_recording and time.time() - self.recording_start_time > 0.5:
  664. logging.debug('Starting realtime transcription')
  665. self.realtime_transcription_text = " ".join(seg.text for seg in segments[0]).strip()
  666. self.text_storage.append(self.realtime_transcription_text)
  667. # Take the last two texts in storage, if they exist
  668. if len(self.text_storage) >= 2:
  669. last_two_texts = self.text_storage[-2:]
  670. # Find the longest common prefix between the two texts
  671. prefix = os.path.commonprefix([last_two_texts[0], last_two_texts[1]])
  672. # This prefix is the text that was transcripted two times in the same way
  673. # Store as "safely detected text"
  674. if len(prefix) >= len(self.realtime_stabilized_safetext):
  675. # Only store when longer than the previous as additional security
  676. self.realtime_stabilized_safetext = prefix
  677. # Find parts of the stabilized text in the freshly transscripted text
  678. matching_position = self.find_tail_match_in_text(self.realtime_stabilized_safetext, self.realtime_transcription_text)
  679. if matching_position < 0:
  680. if self.realtime_stabilized_safetext:
  681. if self.on_realtime_transcription_stabilized:
  682. self.on_realtime_transcription_stabilized(self._preprocess_output(self.realtime_stabilized_safetext, True))
  683. else:
  684. if self.on_realtime_transcription_stabilized:
  685. self.on_realtime_transcription_stabilized(self._preprocess_output(self.realtime_transcription_text, True))
  686. else:
  687. # We found parts of the stabilized text in the transcripted text
  688. # We now take the stabilized text and add only the freshly transcripted part to it
  689. output_text = self.realtime_stabilized_safetext + self.realtime_transcription_text[matching_position:]
  690. # This yields us the "left" text part as stabilized AND at the same time delivers fresh detected parts
  691. # on the first run without the need for two transcriptions
  692. if self.on_realtime_transcription_stabilized:
  693. self.on_realtime_transcription_stabilized(self._preprocess_output(output_text, True))
  694. # Invoke the callback with the transcribed text
  695. if self.on_realtime_transcription_update:
  696. self.on_realtime_transcription_update(self._preprocess_output(self.realtime_transcription_text, True))
  697. # If not recording, sleep briefly before checking again
  698. else:
  699. time.sleep(TIME_SLEEP)
  700. except Exception as e:
  701. logging.error(f"Unhandled exeption in _realtime_worker: {e}")
  702. raise
  703. def __del__(self):
  704. """
  705. Destructor method ensures safe shutdown of the recorder when the instance is destroyed.
  706. """
  707. self.shutdown()