import { useEffect, useRef, useState } from 'react'; import SpeechRecognition, { useSpeechRecognition } from 'react-speech-recognition'; import { gsap } from "gsap"; import { SplitText } from 'gsap/SplitText'; import Input from '../comps/input'; import { sendChatMessage } from '../util/chat'; import { textToSpeech } from '../util/tts'; import { useData } from '../util/useData'; gsap.registerPlugin(SplitText); export function Conversation() { const { data: params}=useData(); const [history, setHistory] = useState([]); const [processing, setProcessing] = useState(false); const [showProcessing, setShowProcessing] = useState(false); const [audioOutput, setAudioOutput] = useState(false); const [prompt, setPrompt] = useState([]); const refHistoryContainer= useRef(null); const refPrompContainer= useRef(null); const refInput=useRef(null); const { transcript, finalTranscript, listening, resetTranscript, browserSupportsSpeechRecognition, isMicrophoneAvailable, }=useSpeechRecognition(); function restart(){ console.log("Restarting..."); setHistory([]); setPrompt([]); refInput.current.value = ''; resetTranscript(); SpeechRecognition.stopListening(); // create start message const startTime=Date.now(); setProcessing(true); sendChatMessage([], params).then(response => { if (!response.ok) { throw new Error('Network response was not ok'); } let data=response; console.log('get reply: ', data, new Date(Date.now()-startTime).toISOString().slice(11, 19)); // add to history setHistory(() => [{ role: 'assistant', content: data.output_text, }]); setPrompt(()=>[ data.prompt, ]); // tts if(!audioOutput) { setProcessing(false); }else{ console.log('create speech:', data.output_text); textToSpeech(data.output_text, params?.voice_prompt).then(audioUrl => { const audio = new Audio(audioUrl); console.log('play audio...', new Date(Date.now()-startTime).toISOString().slice(11, 19)); audio.play().catch(error => { console.error('Audio playback failed:', error); }); setProcessing(false); }).catch(error => { console.error('TTS error:', error); }); } }); } function toggleAudio(value) { console.log("onclickAudio", listening, browserSupportsSpeechRecognition, isMicrophoneAvailable); if(!browserSupportsSpeechRecognition) { console.warn("Browser does not support speech recognition."); return; } if(!isMicrophoneAvailable) { console.warn("Microphone is not available."); return; } if(!listening && value){ SpeechRecognition.startListening({ continuous: true, language: 'zh-TW' }).then(() => { console.log("Speech recognition started."); }).catch(error => { console.error("Error starting speech recognition:", error); }); }else{ SpeechRecognition.stopListening(); } } function onSubmit(event) { event.preventDefault(); if(processing) { console.warn("Already processing, ignoring submission."); return; } setProcessing(true); setShowProcessing(true); const input = event.target.elements.input.value; if(!input.trim()?.length) { console.warn("Input is empty, ignoring submission."); return; } const startTime=Date.now(); console.log("Submit reply:", input); sendChatMessage([ ...history, { role:'user', content: input, } ], params).then(response => { if (!response.ok) { throw new Error('Network response was not ok'); setProcessing(false); } let data=response; console.log('get reply: ', data, new Date(Date.now()-startTime).toISOString().slice(11, 19)); // add to history setPrompt([ ...prompt, data.prompt, ]); if(!audioOutput) { setHistory(prev => [...prev, { role: 'assistant', content: data.output_text, }]); setProcessing(false); setShowProcessing(false); }else{ // tts console.log('create speech:', data.output_text); textToSpeech(data.output_text, params?.voice_prompt).then(audioUrl => { const audio = new Audio(audioUrl); console.log('play audio...', new Date(Date.now()-startTime).toISOString().slice(11, 19)); setShowProcessing(false); setHistory(prev => [...prev, { role: 'assistant', content: data.output_text, }]); audio.play().catch(error => { console.error('Audio playback failed:', error); }); audio.addEventListener('ended',() => { console.log('Audio playback ended'); setProcessing(()=>false); }); }).catch(error => { console.error('TTS error:', error); setProcessing(()=>false); }); } }); // clear input event.target.elements.input.value = ''; // setProcessing(()=>false); setHistory(prev => [...prev, { role: 'user', content:input, }]); } useEffect(()=>{ refHistoryContainer.current.scrollTop = refHistoryContainer.current.scrollHeight; // Animate the history items if(history.length === 0) return; let last_item=document.querySelector('.last_history'); if(!last_item) return; if(last_item.classList.contains('user')) return; // console.log('last_item', last_item); let split=SplitText.create(last_item, { type: "chars", aria:'hidden' }); console.log('split', split); gsap.fromTo(split.chars, { opacity: 0, }, { opacity: 1, y: 0, duration: 0.5, ease: "steps(1)", stagger: 0.1, onComplete:()=>{ } }); },[history]); useEffect(()=>{ refPrompContainer.current.scrollTop = refPrompContainer.current.scrollHeight; },[prompt]); useEffect(()=>{ if(listening){ refInput.current.value = transcript; } },[transcript]); useEffect(()=>{ if(finalTranscript){ refInput.current.value = finalTranscript; console.log('Final Transcript:', finalTranscript); if(processing) return; // Prevent submission if already processing // Submit the final transcript onSubmit({ preventDefault: () => {}, target: { elements: { input: refInput.current } } }); resetTranscript(); // Clear the transcript after submission } },[finalTranscript]); useEffect(()=>{ console.log('window.SpeechRecognition=', window.SpeechRecognition || window.webkitSpeechRecognition); // if (navigator.getUserMedia){ // navigator.getUserMedia({audio:true}, // function(stream) { // // start_microphone(stream); // console.log('Microphone access granted.'); // }, // function(e) { // alert('Error capturing audio.'); // } // ); // } else { alert('getUserMedia not supported in this browser.'); } },[]); return (
{prompt?.length==0 ? (
Promp will appear here...
):( prompt?.map((item, index) => (

{item}

)) )}
{history?.length==0 && !showProcessing? (
History will appear here...
):( history.map((item, index) => (

{item.content}

)) )} {showProcessing && (
...
)}
toggleAudio(e.target.checked)} /> setAudioOutput(e.target.checked)} />