|
|
|
|
@ -201,8 +201,10 @@ export function FreeFlow(){ |
|
|
|
|
sendOsc(OSC_ADDRESS.COUNTDOWN, '0'); // Reset countdown for non-chat cues |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
console.log('~~~~ clear pause timer'); |
|
|
|
|
if(refPauseTimer.current) clearTimeout(refPauseTimer.current); |
|
|
|
|
refSpeechPaused.current=false; |
|
|
|
|
// refSpeechPaused.current=false; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
function onCueEnd() { |
|
|
|
|
@ -264,13 +266,16 @@ export function FreeFlow(){ |
|
|
|
|
function onSpeechEnd(){ |
|
|
|
|
|
|
|
|
|
if(currentCue?.type!='chat') return; // Only process if current cue is user input |
|
|
|
|
if(chatStatus!=ChatStatus.User) return; // Only process if chat status is User |
|
|
|
|
|
|
|
|
|
console.log('on speech end, start timer'); |
|
|
|
|
refSpeechPaused.current=true; |
|
|
|
|
console.log('~~~ on speech end, start pause timer',data.speech_idle_time); |
|
|
|
|
// refSpeechPaused.current=true; |
|
|
|
|
|
|
|
|
|
if(refPauseTimer.current) clearTimeout(refPauseTimer.current); |
|
|
|
|
refPauseTimer.current=setTimeout(()=>{ |
|
|
|
|
if(refSpeechPaused.current) processSpeech(); |
|
|
|
|
console.log('~~~ pause timer ended, process speech'); |
|
|
|
|
// if(refSpeechPaused.current) |
|
|
|
|
processSpeech(); |
|
|
|
|
}, data.speech_idle_time); |
|
|
|
|
|
|
|
|
|
} |
|
|
|
|
@ -297,24 +302,41 @@ export function FreeFlow(){ |
|
|
|
|
} |
|
|
|
|
useEffect(()=>{ |
|
|
|
|
|
|
|
|
|
onSpeechEnd(); // Call onSpeechEnd when finalTranscript changes |
|
|
|
|
console.log('Final transcript changed:', finalTranscript); |
|
|
|
|
if(finalTranscript.trim().length > 0) { |
|
|
|
|
onSpeechEnd(); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
},[finalTranscript]); |
|
|
|
|
|
|
|
|
|
function startRecognition() { |
|
|
|
|
|
|
|
|
|
SpeechRecognition.startListening({ continuous: true, language: 'zh-TW' }).then(() => { |
|
|
|
|
console.log("Speech recognition started."); |
|
|
|
|
}).catch(error => { |
|
|
|
|
console.error("Error starting speech recognition:", error); |
|
|
|
|
}); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
useEffect(()=>{ |
|
|
|
|
if(audioInput && isMicrophoneAvailable) { |
|
|
|
|
|
|
|
|
|
SpeechRecognition.startListening({ continuous: true, language: 'zh-TW' }).then(() => { |
|
|
|
|
console.log("Speech recognition started."); |
|
|
|
|
}).catch(error => { |
|
|
|
|
console.error("Error starting speech recognition:", error); |
|
|
|
|
}); |
|
|
|
|
startRecognition(); |
|
|
|
|
|
|
|
|
|
const recognition= SpeechRecognition.getRecognition(); |
|
|
|
|
|
|
|
|
|
recognition.onspeechstart=(e)=>{ |
|
|
|
|
console.log('Sound start:', e); |
|
|
|
|
refSpeechPaused.current=false; |
|
|
|
|
|
|
|
|
|
console.log('Speech start:', e); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
}; |
|
|
|
|
// recognition.onspeechend=(e)=>{ |
|
|
|
|
// console.log('Speech end:', e); |
|
|
|
|
// startRecognition(); |
|
|
|
|
// }; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
}else{ |
|
|
|
|
console.log('Stopping speech recognition...'); |
|
|
|
|
@ -326,11 +348,21 @@ export function FreeFlow(){ |
|
|
|
|
|
|
|
|
|
useEffect(()=>{ |
|
|
|
|
|
|
|
|
|
// if(listening){ |
|
|
|
|
if((currentCue?.type=='chat' && chatStatus==ChatStatus.User) || currentCue?.type=='user_input') { |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if((currentCue?.type=='chat' && chatStatus==ChatStatus.User) || currentCue?.type=='user_input') { |
|
|
|
|
|
|
|
|
|
// console.log('transcript state changed:', transcript); |
|
|
|
|
|
|
|
|
|
if(transcript!=finalTranscript){ |
|
|
|
|
refInput.current.value = transcript; |
|
|
|
|
|
|
|
|
|
// clear pause timer |
|
|
|
|
// console.log('~~~~ clear pause timer'); |
|
|
|
|
if(refPauseTimer.current) clearTimeout(refPauseTimer.current); |
|
|
|
|
refSpeechPaused.current=false; |
|
|
|
|
} |
|
|
|
|
// } |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
},[transcript]); |
|
|
|
|
|
|
|
|
|
|