Process freezes after 9 - 16 loops of VACOnlineASRProcessor
Greetings, I'm a fellow developer. I just modified the script so that it can be run on the terminal AND added support for GROQ API (That is working fine u don't have to worry bout). I firstly noticed that when using the demo on the web app, the loop of VACOnlineASRProcessor where it processes 1 sec or so chunks was freezing after 9-12 loops, I recreated the script but now as a terminal based script... However the same issue persists. I have been able to narrow the issue down through some print statements and it is in the VACOnlineASRProcessor class. So I request you to please look into this matter. I am providing my modified scripts below:
whisper_online_terminal.py
import numpy as np
import sounddevice as sd
import ffmpeg
import threading
from whisper_online import asr_factory, VACOnlineASRProcessor
debug_num = 0
# Constants
SAMPLE_RATE = 16000
CHANNELS = 1
CHUNK_DURATION = 0.04 # in seconds
CHUNK_SIZE = int(SAMPLE_RATE * CHUNK_DURATION)
BYTES_PER_SAMPLE = 2
BYTES_PER_SEC = SAMPLE_RATE * BYTES_PER_SAMPLE
# Initialize ASR with VAC
asr, online = asr_factory()
vac_processor = VACOnlineASRProcessor(online_chunk_size=1, asr=asr)
# Function to process chunks asynchronously
def process_audio_chunks():
pcm_buffer = bytearray()
def handle_chunk(chunk):
try:
pcm_buffer.extend(chunk)
while len(pcm_buffer) >= BYTES_PER_SEC: # Process chunks of 1 second
global debug_num
debug_num += 1
print(debug_num, "Processing chunk...")
audio_chunk = pcm_buffer[:BYTES_PER_SEC]
del pcm_buffer[:BYTES_PER_SEC]
# Convert int16 -> float32 for processing
pcm_array = np.frombuffer(audio_chunk, dtype=np.int16).astype(np.float32) / 32768.0
print("(sending to vac)Processing chunk...")
vac_processor.insert_audio_chunk(pcm_array)
print("(sending to vac)Processing complete.")
# Fetch results
transcription = vac_processor.process_iter()[2]
if transcription:
print("Transcription:", transcription)
print(debug_num, "Processed chunk.")
except Exception as e:
print("Error handling audio chunk:", e)
return handle_chunk
# Function to start FFmpeg process
def start_ffmpeg_decoder():
return (
ffmpeg.input('pipe:0', format='s16le', ac=CHANNELS, ar=str(SAMPLE_RATE))
.output('pipe:1', format='s16le', acodec='pcm_s16le', ac=CHANNELS, ar=str(SAMPLE_RATE))
.run_async(pipe_stdin=True, pipe_stdout=True, pipe_stderr=True)
)
# Function to handle FFmpeg stdout
def handle_ffmpeg_stdout(ffmpeg_process, handle_chunk):
while True:
try:
chunk = ffmpeg_process.stdout.read(4096) # Read audio data from FFmpeg
if not chunk:
break
handle_chunk(chunk)
except Exception as e:
print("Error reading from FFmpeg stdout:", e)
break
# Start audio capture
try:
ffmpeg_process = start_ffmpeg_decoder()
# Start FFmpeg stdout reader thread
handle_chunk = process_audio_chunks()
stdout_thread = threading.Thread(target=handle_ffmpeg_stdout, args=(ffmpeg_process, handle_chunk))
stdout_thread.start()
# Stream microphone audio to FFmpeg stdin
with sd.InputStream(samplerate=SAMPLE_RATE, channels=CHANNELS, dtype='int16') as stream:
print("Listening... Press Ctrl+C to stop.")
while True:
indata, _ = stream.read(CHUNK_SIZE)
ffmpeg_process.stdin.write(indata.tobytes())
except KeyboardInterrupt:
print("Exiting application.")
finally:
# Clean up resources
ffmpeg_process.stdin.close()
ffmpeg_process.stdout.close()
ffmpeg_process.wait()
print("Application stopped.")
whisper_online.py
#!/usr/bin/env python3
import re
import sys
import numpy as np
import librosa
from functools import lru_cache
import time
import logging
import io
import soundfile as sf
import math
logger = logging.getLogger(__name__)
min_chunk_size = 1.0
vad = True
vac = True
vac_chunk_size = 0.04
task = "transcribe"
buffer_trimming = "segment"
buffer_trimming_sec: float = 15
log_level = 'DEBUG'
@lru_cache(10**6)
def load_audio(fname):
a, _ = librosa.load(fname, sr=16000, dtype=np.float32)
return a
def load_audio_chunk(fname, beg, end):
audio = load_audio(fname)
beg_s = int(beg*16000)
end_s = int(end*16000)
return audio[beg_s:end_s]
# Whisper backend
class ASRBase:
sep = " " # join transcribe words with this character (" " for whisper_timestamped,
# "" for faster-whisper because it emits the spaces when neeeded)
def __init__(self, lan, modelsize=None, cache_dir=None, model_dir=None, logfile=sys.stderr):
self.logfile = logfile
self.transcribe_kargs = {}
if lan == "auto":
self.original_language = None
else:
self.original_language = lan
self.model = self.load_model(modelsize, cache_dir, model_dir)
def load_model(self, modelsize, cache_dir):
raise NotImplemented("must be implemented in the child class")
def transcribe(self, audio, init_prompt=""):
raise NotImplemented("must be implemented in the child class")
def use_vad(self):
raise NotImplemented("must be implemented in the child class")
class GroqApiASR(ASRBase):
"""Uses Groq's Whisper API for audio transcription."""
def __init__(self, lan=None, temperature=0, logfile=sys.stderr):
self.logfile = logfile
self.modelname = "whisper-large-v3"
self.original_language = None if lan == "auto" else lan # ISO-639-1 language code
self.response_format = "verbose_json"
self.temperature = temperature
self.load_model()
self.use_vad_opt = False
# reset the task in set_translate_task
self.task = "transcribe"
def load_model(self, *args, **kwargs):
from groq import Groq
self.client = Groq(
api_key="API_KEY"
)
self.transcribed_seconds = 0 # for logging how many seconds were processed by API, to know the cost
def ts_words(self, segments):
print(segments)
no_speech_segments = []
if self.use_vad_opt:
for segment in segments.segments:
# TODO: threshold can be set from outside
if segment["no_speech_prob"] > 0.8:
no_speech_segments.append((segment.get("start"), segment.get("end")))
o = []
for word in segments.segments[0]['words']:
start = word['start']
end = word['end']
if any(s[0] <= start <= s[1] for s in no_speech_segments):
print("Skipping word", word.get("word"), "because it's in a no-speech segment")
continue
o.append((start, end, word['word']))
return o
def segments_end_ts(self, res):
return [word['end'] for word in res.segments[0]['word']]
def format_transcription_obj(self, transcript_obj):
from groq.types.audio.transcription import Transcription
# Regex to match words along with their following spaces or punctuation
tokens = re.findall(r'(\S+[\s.,;!?]*)', transcript_obj.text)
transcript_obj.segments[0]['words'] = [
{'word': token, 'start': transcript_obj.segments[0]['start'], 'end': transcript_obj.segments[0]['end']} for token in tokens
]
return Transcription(
text=transcript_obj.text,
task=transcript_obj.task,
language=transcript_obj.language,
duration=transcript_obj.duration,
segments=transcript_obj.segments,
x_groq=transcript_obj.x_groq,
)
def transcribe(self, audio_data, prompt=None, *args, **kwargs):
# Write the audio data to a buffer
print("Transcribing")
buffer = io.BytesIO()
buffer.name = "temp.wav"
sf.write(buffer, audio_data, samplerate=16000, format='WAV', subtype='PCM_16')
buffer.seek(0) # Reset buffer's position to the beginning
self.transcribed_seconds += math.ceil(len(audio_data)/16000) # it rounds up to the whole seconds
params = {
"model": self.modelname,
"file": buffer,
"response_format": self.response_format,
"temperature": self.temperature,
"timestamp_granularities": ["word", "segment"]
}
if self.task != "translate" and self.original_language:
params["language"] = self.original_language
if prompt:
params["prompt"] = prompt
if self.task == "translate":
with buffer as file:
# Create a translation of the audio file
translation = self.client.audio.translations.create(
file=buffer, # Required audio file
model="whisper-large-v3", # Required model to use for translation
prompt="Specify context or spelling", # Optional
response_format="verbose_json", # Optional
temperature=0.0 # Optional
)
# Print the translation text
transcript = translation
else:
with buffer as file:
# Create a transcription of the audio file
transcription = self.client.audio.transcriptions.create(
file=buffer, # Required audio file
model="whisper-large-v3", # Required model to use for transcription
prompt="Specify context or spelling", # Optional
response_format="verbose_json", # Optional
language="en", # Optional
temperature=0.0 # Optional
)
# Print the transcription text
transcript = transcription
# Process transcription/translation
logger.debug(f"Groq API processed accumulated {self.transcribed_seconds} seconds")
print(f"Groq API processed accumulated {self.transcribed_seconds} seconds")
transcript = self.format_transcription_obj(transcript)
print(transcript)
return transcript
def use_vad(self):
self.use_vad_opt = True
def set_translate_task(self):
self.task = "translate"
class HypothesisBuffer:
def __init__(self, logfile=sys.stderr):
self.commited_in_buffer = []
self.buffer = []
self.new = []
self.last_commited_time = 0
self.last_commited_word = None
self.logfile = logfile
def insert(self, new, offset):
# compare self.commited_in_buffer and new. It inserts only the words in new that extend the commited_in_buffer, it means they are roughly behind last_commited_time and new in content
# the new tail is added to self.new
new = [(a+offset,b+offset,t) for a,b,t in new]
self.new = [(a,b,t) for a,b,t in new if a > self.last_commited_time-0.1]
if len(self.new) >= 1:
a,b,t = self.new[0]
if abs(a - self.last_commited_time) < 1:
if self.commited_in_buffer:
# it's going to search for 1, 2, ..., 5 consecutive words (n-grams) that are identical in commited and new. If they are, they're dropped.
cn = len(self.commited_in_buffer)
nn = len(self.new)
for i in range(1,min(min(cn,nn),5)+1): # 5 is the maximum
c = " ".join([self.commited_in_buffer[-j][2] for j in range(1,i+1)][::-1])
tail = " ".join(self.new[j-1][2] for j in range(1,i+1))
if c == tail:
words = []
for j in range(i):
words.append(repr(self.new.pop(0)))
words_msg = " ".join(words)
logger.debug(f"removing last {i} words: {words_msg}")
break
def flush(self):
# returns commited chunk = the longest common prefix of 2 last inserts.
commit = []
while self.new:
na, nb, nt = self.new[0]
if len(self.buffer) == 0:
break
if nt == self.buffer[0][2]:
commit.append((na,nb,nt))
self.last_commited_word = nt
self.last_commited_time = nb
self.buffer.pop(0)
self.new.pop(0)
else:
break
self.buffer = self.new
self.new = []
self.commited_in_buffer.extend(commit)
return commit
def pop_commited(self, time):
while self.commited_in_buffer and self.commited_in_buffer[0][1] <= time:
self.commited_in_buffer.pop(0)
def complete(self):
return self.buffer
class OnlineASRProcessor:
SAMPLING_RATE = 16000
def __init__(self, asr, tokenizer=None, buffer_trimming=("segment", 15), logfile=sys.stderr):
"""asr: WhisperASR object
tokenizer: sentence tokenizer object for the target language. Must have a method *split* that behaves like the one of MosesTokenizer. It can be None, if "segment" buffer trimming option is used, then tokenizer is not used at all.
("segment", 15)
buffer_trimming: a pair of (option, seconds), where option is either "sentence" or "segment", and seconds is a number. Buffer is trimmed if it is longer than "seconds" threshold. Default is the most recommended option.
logfile: where to store the log.
"""
self.asr = asr
self.tokenizer = tokenizer
self.logfile = logfile
self.init()
self.buffer_trimming_way, self.buffer_trimming_sec = buffer_trimming
def init(self, offset=None):
"""run this when starting or restarting processing"""
self.audio_buffer = np.array([],dtype=np.float32)
self.transcript_buffer = HypothesisBuffer(logfile=self.logfile)
self.buffer_time_offset = 0
if offset is not None:
self.buffer_time_offset = offset
self.transcript_buffer.last_commited_time = self.buffer_time_offset
self.commited = []
def insert_audio_chunk(self, audio):
self.audio_buffer = np.append(self.audio_buffer, audio)
def prompt(self):
"""Returns a tuple: (prompt, context), where "prompt" is a 200-character suffix of commited text that is inside of the scrolled away part of audio buffer.
"context" is the commited text that is inside the audio buffer. It is transcribed again and skipped. It is returned only for debugging and logging reasons.
"""
k = max(0,len(self.commited)-1)
while k > 0 and self.commited[k-1][1] > self.buffer_time_offset:
k -= 1
p = self.commited[:k]
p = [t for _,_,t in p]
prompt = []
l = 0
while p and l < 200: # 200 characters prompt size
x = p.pop(-1)
l += len(x)+1
prompt.append(x)
non_prompt = self.commited[k:]
return self.asr.sep.join(prompt[::-1]), self.asr.sep.join(t for _,_,t in non_prompt)
def process_iter(self):
"""Runs on the current audio buffer.
Returns: a tuple (beg_timestamp, end_timestamp, "text"), or (None, None, "").
The non-emty text is confirmed (committed) partial transcript.
"""
prompt, non_prompt = self.prompt()
logger.debug(f"PROMPT: {prompt}")
logger.debug(f"CONTEXT: {non_prompt}")
logger.debug(f"transcribing {len(self.audio_buffer)/self.SAMPLING_RATE:2.2f} seconds from {self.buffer_time_offset:2.2f}")
res = self.asr.transcribe(self.audio_buffer, init_prompt=prompt)
# transform to [(beg,end,"word1"), ...]
tsw = self.asr.ts_words(res)
self.transcript_buffer.insert(tsw, self.buffer_time_offset)
o = self.transcript_buffer.flush()
self.commited.extend(o)
completed = self.to_flush(o)
logger.debug(f">>>>COMPLETE NOW: {completed}")
the_rest = self.to_flush(self.transcript_buffer.complete())
logger.debug(f"INCOMPLETE: {the_rest}")
# there is a newly confirmed text
if o and self.buffer_trimming_way == "sentence": # trim the completed sentences
if len(self.audio_buffer)/self.SAMPLING_RATE > self.buffer_trimming_sec: # longer than this
self.chunk_completed_sentence()
if self.buffer_trimming_way == "segment":
s = self.buffer_trimming_sec # trim the completed segments longer than s,
else:
s = 30 # if the audio buffer is longer than 30s, trim it
if len(self.audio_buffer)/self.SAMPLING_RATE > s:
self.chunk_completed_segment(res)
# alternative: on any word
#l = self.buffer_time_offset + len(self.audio_buffer)/self.SAMPLING_RATE - 10
# let's find commited word that is less
#k = len(self.commited)-1
#while k>0 and self.commited[k][1] > l:
# k -= 1
#t = self.commited[k][1]
logger.debug("chunking segment")
#self.chunk_at(t)
logger.debug(f"len of buffer now: {len(self.audio_buffer)/self.SAMPLING_RATE:2.2f}")
return self.to_flush(o)
def chunk_completed_sentence(self):
if self.commited == []: return
logger.debug(self.commited)
sents = self.words_to_sentences(self.commited)
for s in sents:
logger.debug(f"\t\tSENT: {s}")
if len(sents) < 2:
return
while len(sents) > 2:
sents.pop(0)
# we will continue with audio processing at this timestamp
chunk_at = sents[-2][1]
logger.debug(f"--- sentence chunked at {chunk_at:2.2f}")
self.chunk_at(chunk_at)
def chunk_completed_segment(self, res):
if self.commited == []: return
ends = self.asr.segments_end_ts(res)
t = self.commited[-1][1]
if len(ends) > 1:
e = ends[-2]+self.buffer_time_offset
while len(ends) > 2 and e > t:
ends.pop(-1)
e = ends[-2]+self.buffer_time_offset
if e <= t:
logger.debug(f"--- segment chunked at {e:2.2f}")
self.chunk_at(e)
else:
logger.debug(f"--- last segment not within commited area")
else:
logger.debug(f"--- not enough segments to chunk")
def chunk_at(self, time):
"""trims the hypothesis and audio buffer at "time"
"""
self.transcript_buffer.pop_commited(time)
cut_seconds = time - self.buffer_time_offset
self.audio_buffer = self.audio_buffer[int(cut_seconds*self.SAMPLING_RATE):]
self.buffer_time_offset = time
def words_to_sentences(self, words):
"""Uses self.tokenizer for sentence segmentation of words.
Returns: [(beg,end,"sentence 1"),...]
"""
cwords = [w for w in words]
t = " ".join(o[2] for o in cwords)
s = self.tokenizer.split(t)
out = []
while s:
beg = None
end = None
sent = s.pop(0).strip()
fsent = sent
while cwords:
b,e,w = cwords.pop(0)
w = w.strip()
if beg is None and sent.startswith(w):
beg = b
elif end is None and sent == w:
end = e
out.append((beg,end,fsent))
break
sent = sent[len(w):].strip()
return out
def finish(self):
"""Flush the incomplete text when the whole processing ends.
Returns: the same format as self.process_iter()
"""
o = self.transcript_buffer.complete()
f = self.to_flush(o)
logger.debug(f"last, noncommited: {f}")
self.buffer_time_offset += len(self.audio_buffer)/16000
return f
def to_flush(self, sents, sep=None, offset=0, ):
# concatenates the timestamped words or sentences into one sequence that is flushed in one line
# sents: [(beg1, end1, "sentence1"), ...] or [] if empty
# return: (beg1,end-of-last-sentence,"concatenation of sentences") or (None, None, "") if empty
if sep is None:
sep = self.asr.sep
t = sep.join(s[2] for s in sents)
if len(sents) == 0:
b = None
e = None
else:
b = offset + sents[0][0]
e = offset + sents[-1][1]
return (b,e,t)
class VACOnlineASRProcessor(OnlineASRProcessor):
'''Wraps OnlineASRProcessor with VAC (Voice Activity Controller).
It works the same way as OnlineASRProcessor: it receives chunks of audio (e.g. 0.04 seconds),
it runs VAD and continuously detects whether there is speech or not.
When it detects end of speech (non-voice for 500ms), it makes OnlineASRProcessor to end the utterance immediately.
'''
def __init__(self, online_chunk_size, *a, **kw):
self.online_chunk_size = online_chunk_size
self.online = OnlineASRProcessor(*a, **kw)
# VAC:
import torch
model, _ = torch.hub.load(
repo_or_dir='snakers4/silero-vad',
model='silero_vad'
)
from silero_vad_iterator import FixedVADIterator
self.vac = FixedVADIterator(model) # we use the default options there: 500ms silence, 100ms padding, etc.
self.logfile = self.online.logfile
self.init()
def init(self):
self.online.init()
self.vac.reset_states()
self.current_online_chunk_buffer_size = 0
self.is_currently_final = False
self.status = None # or "voice" or "nonvoice"
self.audio_buffer = np.array([],dtype=np.float32)
self.buffer_offset = 0 # in frames
def clear_buffer(self):
self.buffer_offset += len(self.audio_buffer)
self.audio_buffer = np.array([],dtype=np.float32)
def insert_audio_chunk(self, audio):
res = self.vac(audio)
self.audio_buffer = np.append(self.audio_buffer, audio)
if res is not None:
frame = list(res.values())[0]-self.buffer_offset
if 'start' in res and 'end' not in res:
self.status = 'voice'
send_audio = self.audio_buffer[frame:]
self.online.init(offset=(frame+self.buffer_offset)/self.SAMPLING_RATE)
self.online.insert_audio_chunk(send_audio)
self.current_online_chunk_buffer_size += len(send_audio)
self.clear_buffer()
elif 'end' in res and 'start' not in res:
self.status = 'nonvoice'
send_audio = self.audio_buffer[:frame]
self.online.insert_audio_chunk(send_audio)
self.current_online_chunk_buffer_size += len(send_audio)
self.is_currently_final = True
self.clear_buffer()
else:
beg = res["start"]-self.buffer_offset
end = res["end"]-self.buffer_offset
self.status = 'nonvoice'
send_audio = self.audio_buffer[beg:end]
self.online.init(offset=(beg+self.buffer_offset)/self.SAMPLING_RATE)
self.online.insert_audio_chunk(send_audio)
self.current_online_chunk_buffer_size += len(send_audio)
self.is_currently_final = True
self.clear_buffer()
else:
if self.status == 'voice':
self.online.insert_audio_chunk(self.audio_buffer)
self.current_online_chunk_buffer_size += len(self.audio_buffer)
self.clear_buffer()
else:
# We keep 1 second because VAD may later find start of voice in it.
# But we trim it to prevent OOM.
self.buffer_offset += max(0,len(self.audio_buffer)-self.SAMPLING_RATE)
self.audio_buffer = self.audio_buffer[-self.SAMPLING_RATE:]
def process_iter(self):
if self.is_currently_final:
return self.finish()
elif self.current_online_chunk_buffer_size > self.SAMPLING_RATE*self.online_chunk_size:
self.current_online_chunk_buffer_size = 0
ret = self.online.process_iter()
return ret
else:
print("VAD:", self.status, file=self.logfile)
return None, None, ""
def finish(self):
ret = self.online.finish()
self.current_online_chunk_buffer_size = 0
self.is_currently_final = False
return ret
def asr_factory(logfile=sys.stderr):
"""
Creates and configures an ASR and ASR Online instance based on the specified backend and arguments.
"""
logger.debug("Using Groq API.")
asr = GroqApiASR(lan="en")
language = "en"
if task == "translate":
asr.set_translate_task()
tgt_language = "en" # Whisper translates into English
else:
tgt_language = language # Whisper transcribes in this language
# Create the tokenizer
tokenizer = None
# Create the OnlineASRProcessor
if vac:
online = VACOnlineASRProcessor(min_chunk_size, asr,tokenizer,logfile=logfile,buffer_trimming=(buffer_trimming, buffer_trimming_sec))
else:
online = OnlineASRProcessor(asr,tokenizer,logfile=logfile,buffer_trimming=(buffer_trimming, buffer_trimming_sec))
return asr, online
def set_logging(logger,other="_server"):
logging.basicConfig(#format='%(name)s
format='%(levelname)s\t%(message)s')
logger.setLevel(log_level)
logging.getLogger("whisper_online"+other).setLevel(log_level)
I cannot reproduce your issue. Speaking for several minutes with VAC activated did not cause any problems on my side. How long was the recording that caused the script to freeze?
I cannot reproduce your issue. Speaking for several minutes with VAC activated did not cause any problems on my side. How long was the recording that caused the script to freeze?
around 15 - 20 seconds
would a video be of any help?
Only 16 chunks is being processed:
This may not be very helpful, but when my computer with the client webpage connected enters the sleep mode, same thing happened to me
I think, after some testing, I started to start audio and stop multiple times, and then vola! The server got frozen.
And also, this happens when VAC is off. This must be edge cases regarding processing silent audio.
https://pastebin.com/8tjZ6nvH
yeap, somthing is really off, is having buffer more than 30 sec is normal? and this is silent recording I put through, with occasional audio in the middle.
I have changed server startup argument --vad into --vac, and the error message says I do not have torchaudio, but I have installed torchaudio. that is weird.
I think that is conflict between conda env and the system env, ffmpeg is called from system, but torchaudio was not intended to called from system, that causes the conflict, but hey, VAC is working!
DEBUG: Loading FFmpeg%s
DEBUG: Failed to load FFmpeg%s extension.
DEBUG: Loading FFmpeg%s
DEBUG: Failed to load FFmpeg%s extension.
DEBUG: Loading FFmpeg%s
DEBUG: Failed to load FFmpeg%s extension.
DEBUG: Loading FFmpeg%s
DEBUG: Failed to load FFmpeg%s extension.
Online loaded.
INFO: connection open
no online update, only VAD None
no online update, only VAD None
no online update, only VAD None
no online update, only VAD None
no online update, only VAD None
no online update, only VAD None
no online update, only VAD None
no online update, only VAD None
WebSocket connection closed.
INFO: connection closed
VAD option did not envoke use of VAD uses, but VAC option did, which is strange. Also, I had to install ffmpeg with conda as per description on the torch document, not pip. And I had to remove shebangline on whisper_online.py
VAD option did not envoke use of VAD uses, but VAC option did, which is strange. Also, I had to install ffmpeg with conda as per description on the torch document, not pip. And I had to remove shebangline on whisper_online.py
@taken-yjyoon
So did u fix the issue?
VAD option did not envoke use of VAD uses, but VAC option did, which is strange.
VAD is handled internally by the whisper backend you are using (if the backend you are using has one. mlx whisper doesn't for example). VAC is handled via the https://github.com/snakers4/silero-vad implementation, before calling the whisper transcription method. So it is active regardless of the backend you are using
VAD option did not envoke use of VAD uses, but VAC option did, which is strange. Also, I had to install ffmpeg with conda as per description on the torch document, not pip. And I had to remove shebangline on whisper_online.py
@taken-yjyoon
So did u fix the issue?
It seems better, but envoking novoice and voice on VAD alternatively with connection makes the freeze, websocket left open for long time (at least an hour) makes the freeze. I am certain this is not caused by textgeneration part, but somewhere in the after that (specifically, somewhere after OnlineASRProcessor.prompt()). That is because I put print() on all over the place to confirm it, and then decided to just write new logger class.
This is the modified logger I have used since default logger is silent no matter what on my side.
class PrintLogger(logging.Logger):
def debug(self, msg, *args, **kwargs):
frame = inspect.currentframe().f_back
filename = frame.f_code.co_filename
lineno = frame.f_lineno
super().debug(msg, *args, **kwargs)
print(f"DEBUG: {filename}:{lineno} {msg}")
def info(self, msg, *args, **kwargs):
frame = inspect.currentframe().f_back
filename = frame.f_code.co_filename
lineno = frame.f_lineno
super().info(msg, *args, **kwargs)
print(f"INFO: {filename}:{lineno} {msg}")
def warning(self, msg, *args, **kwargs):
frame = inspect.currentframe().f_back
filename = frame.f_code.co_filename
lineno = frame.f_lineno
super().warning(msg, *args, **kwargs)
print(f"WARNING: {filename}:{lineno} {msg}")
def error(self, msg, *args, **kwargs):
frame = inspect.currentframe().f_back
filename = frame.f_code.co_filename
lineno = frame.f_lineno
super().error(msg, *args, **kwargs)
print(f"ERROR: {filename}:{lineno} {msg}")
def critical(self, msg, *args, **kwargs):
frame = inspect.currentframe().f_back
filename = frame.f_code.co_filename
lineno = frame.f_lineno
super().critical(msg, *args, **kwargs)
print(f"CRITICAL: {filename}:{lineno} {msg}")
put this before the logger declaration.
VAD option did not envoke use of VAD uses, but VAC option did, which is strange.
VAD is handled internally by the whisper backend you are using (if the backend you are using has one. mlx whisper doesn't for example). VAC is handled via the https://github.com/snakers4/silero-vad implementation, before calling the whisper transcription method. So it is active regardless of the backend you are using
Thank you for explaining it in detail, the confusion comes from -h description.
--vac Use VAC = voice activity controller. Recommended. Requires torch.
--vac-chunk-size VAC_CHUNK_SIZE
VAC sample size in seconds.
--vad Use VAD = voice activity detection, with the default parameters.