reng 6 months ago
parent a894548c78
commit 742a52d293
  1. BIN
      vite/public/assets/q4.mp3
  2. BIN
      vite/public/assets/q5.mp3
  3. 45
      vite/public/cuelist.json
  4. 8
      vite/src/comps/input.jsx
  5. 2
      vite/src/index.css
  6. 5
      vite/src/main.jsx
  7. 33
      vite/src/pages/conversation.jsx
  8. 127
      vite/src/pages/flow.jsx
  9. 82
      vite/src/util/system_prompt.js
  10. 124
      vite/src/util/useChat.jsx

Binary file not shown.

Binary file not shown.

@ -14,7 +14,8 @@
"type": "headphone",
"description": "Guide for drink",
"auto": true,
"audioFile": "assets/q2.mp3"
"audioFile": "assets/q2.mp3",
"nextcue": 3
},
{
"id": 3,
@ -30,15 +31,53 @@
"type": "phone",
"description": "Guide to construct scene",
"auto": true,
"duration": 60
"audioFile": "assets/q4.mp3",
"nextcue": 4.1
},
{
"id": 4.1,
"name": "Q4.1",
"type": "chat",
"description": "c1",
"auto": true,
"duration": 40,
"nextcue": 4.2
},
{
"id": 4.2,
"name": "Q4.2",
"type": "chat",
"description": "c2",
"auto": true,
"duration": 40,
"nextcue": 4.3
},
{
"id": 4.3,
"name": "Q4.3",
"type": "chat",
"description": "c3",
"auto": true,
"duration": 40,
"nextcue": 5
},
{
"id": 5,
"name": "Q5",
"type": "phone",
"description": "Guide to call",
"audioFile": "assets/q5.mp3",
"auto": true,
"nextcue": 5.1
},
{
"id": 5.1,
"name": "Q5.1",
"type": "chat",
"description": "call",
"duration": 60,
"auto": true
"auto": true,
"nextcue": 6
},
{
"id": 6,

@ -61,13 +61,13 @@ export default function Input(){
return (
<div className="flex flex-col items-stretch p-2 gap-4">
<form className="flex flex-row justify-start *:border-4 gap-4" onSubmit={onUploadFile}>
<label className="border-none">file</label>
<div className="flex flex-row items-stretch gap-4">
<form className="flex flex-col justify-start *:border-4 gap-2" onSubmit={onUploadFile}>
{/* <label className="border-none">file</label> */}
<input type="file" accept="image/*" className="self-end" />
<button type="submit" className="uppercase">Send</button>
</form>
<div className='flex flex-row gap-2'>
<div className='flex flex-col gap-2'>
<label>control_strength</label>
<input type="range" className="" min="0" max="100" step="1" defaultValue="0"
onChange={(e) => {

@ -3,7 +3,7 @@
@layer base{
button{
@apply rounded-full border-4 px-2 bg-slate-200 cursor-pointer;
@apply rounded-full border-4 px-2 !bg-orange-200 cursor-pointer font-bold;
}
}

@ -7,12 +7,13 @@ import App from './App.jsx'
import { Settings } from './pages/settings.jsx';
import { Flow } from './pages/flow.jsx';
import { Conversation } from './pages/conversation.jsx';
import { ChatProvider } from './util/useChat.jsx';
createRoot(document.getElementById('root')).render(
<StrictMode>
<ChatProvider>
<BrowserRouter>
{/* <App /> */}
<App />
<Routes>
<Route path="/" element={<Conversation />} />
@ -20,5 +21,7 @@ createRoot(document.getElementById('root')).render(
<Route path="/settings" element={<Settings />} />
</Routes>
</BrowserRouter>
</ChatProvider>
</StrictMode>,
)

@ -4,9 +4,7 @@ import { gsap } from "gsap";
import { SplitText } from 'gsap/SplitText';
import Input from '../comps/input';
import { Countdown } from '../comps/timer';
import { Prompt_Count, Prompt_Interval } from '../util/constant';
import { sendChatMessage } from '../util/chat';
import { textToSpeech } from '../util/tts';
@ -21,15 +19,11 @@ export function Conversation() {
const [showProcessing, setShowProcessing] = useState(false);
const [audioOutput, setAudioOutput] = useState(false);
const refPromptCount = useRef(0);
const [useTimer, setUseTimer] = useState(false);
const [prompt, setPrompt] = useState([]);
const refHistoryContainer= useRef(null);
const refPrompContainer= useRef(null);
const refInput=useRef(null);
const refTimer=useRef(null);
const {
transcript,
@ -122,17 +116,6 @@ export function Conversation() {
}
}
function onTimerEnd(){
if(!useTimer) return;
refPromptCount.current += 1;
if(refPromptCount.current > Prompt_Count) {
console.warn("Maximum prompt count reached, stopping timer.");
return;
}
}
@ -256,9 +239,7 @@ export function Conversation() {
ease: "steps(1)",
stagger: 0.1,
onComplete:()=>{
if(useTimer) {
refTimer.current.restart(Prompt_Interval);
}
}
});
@ -328,16 +309,6 @@ export function Conversation() {
<main className=''>
<div className='flex flex-row items-center justify-between'>
<Input />
<span className='checkbox'>
<input
type="checkbox"
id="use_timer"
name="use_timer"
checked={useTimer}
onChange={(e) => setUseTimer(e.target.checked)}/>
<label>useTimer</label>
</span>
<Countdown ref={refTimer} time={Prompt_Interval} callback={onTimerEnd} />
</div>
<div ref={refPrompContainer} className='flex-1 flex flex-col gap-2 border-4 overflow-y-auto'>
{prompt?.length==0 ? (
@ -393,7 +364,7 @@ export function Conversation() {
</div>
<form className='flex flex-col justify-center *:border-4 gap-4' onSubmit={onSubmit} autoComplete="off">
<textarea ref={refInput} id="input" name="input" required className='self-stretch p-2 resize-none' rows={3} autoComplete="off"/>
<button type="submit" className='uppercase' disabled={processing}>Send</button>
<button type="submit" className='' disabled={processing}>Send</button>
</form>
</div>
</main>

@ -1,19 +1,49 @@
import { useEffect, useRef, useState } from "react";
import { Countdown } from "../comps/timer";
import { Conversation } from "./conversation";
import { Status, useChat } from "../util/useChat";
const EmojiType={
phone: '📞',
headphone: '🎧',
speaker: '🔊',
chat: '💬',
}
export function Flow(){
const [cuelist, setCuelist] = useState([]);
const [currentCue, setCurrentCue] = useState(null);
const [chatWelcome, setChatWelcome] = useState(null);
const refTimer=useRef();
const refAudio=useRef();
const refInput=useRef();
const { history, status, reset, sendMessage, setStatus, audioOutput, setAudioOutput, stop:stopChat }=useChat();
function playCue(cue) {
if(!cue) return;
console.log('Playing cue:', cue);
setCurrentCue(cue.name);
setCurrentCue(cue);
if(parseFloat(cue.id)<=4.1){
// Special case for starting a conversation
console.log('clear conversation...');
reset();
}
if(cue.type=='chat' && cue.id=='4.1'){
// Special case for starting a conversation
console.log('Starting conversation...');
sendMessage();
setChatWelcome(true);
}
if(cue.audioFile){
// Stop any currently playing audio
@ -40,6 +70,7 @@ export function Flow(){
refTimer.current.restart(audio.duration*1000 || 0);
});
}
if(cue.duration){
refTimer.current.restart(cue.duration*1000, ()=>{
onCueEnd(cue);
@ -48,8 +79,28 @@ export function Flow(){
}
function onCueEnd(cue) {
if(!cue) return;
console.log('onCueEnd:', cue.id);
if(cue.type=='chat'){
// sendChatMessage
const message= refInput.current.value?.trim();
if(message && message.length>0) {
sendMessage(message);
setChatWelcome(false);
}else{
// if no message, just continue to next cue
console.log('No message to send, continuing to next cue');
playCue(cuelist.find(c => c.id === cue.nextcue));
}
}else{
if(cue.auto) {
playCue(cuelist.find(c => c.id === cue.id+1));
playCue(cuelist.find(c => c.id === cue.nextcue));
}
}
}
@ -61,9 +112,44 @@ export function Flow(){
}
setCurrentCue(null);
refTimer.current.restart(0);
stopChat(); // Stop chat processing
}
useEffect(()=>{
switch(status) {
case Status.SUCCESS:
console.log('Success!');
setStatus(Status.IDLE);
refInput.current.value = ''
if(chatWelcome) {
return;
}
// play next cue
if(currentCue.nextcue!=5 && currentCue.nextcue!=6){ // Q5 & Q6 wait for audio end
if(currentCue.nextcue) {
playCue(cuelist.find(c => c.id === currentCue.nextcue));
} else {
setCurrentCue(null);
}
}
break;
case Status.AUDIO_ENDED:
console.log('Audio ended');
if(currentCue.nextcue==5 || currentCue.nextcue==6){ // Q5 & Q6 wait for audio end
playCue(cuelist.find(c => c.id === currentCue.nextcue));
}
break;
}
},[status]);
useEffect(()=>{
fetch('/cuelist.json')
@ -84,12 +170,12 @@ export function Flow(){
<div className="w-full p-2 flex flex-row justify-center gap-2 *:w-[10vw] *:h-[10vw]">
<div className="bg-gray-100 text-4xl font-bold mb-4 flex justify-center items-center">
{currentCue}
{currentCue?.name}
</div>
<Countdown ref={refTimer} />
<button className="bg-red-300 border-0 font-bold uppercase"
onClick={onStop}>Stop</button>
<button className="!bg-red-300" onClick={onStop}>Stop</button>
</div>
<div className=" max-h-[33vh] overflow-y-auto">
<table className="border-collapse **:border-y w-full **:p-2">
<thead>
<tr className="text-left">
@ -98,7 +184,7 @@ export function Flow(){
<th>Description</th>
<th>Type</th>
<th>Auto</th>
<th>Audio File</th>
<th>Audio / Due</th>
<th></th>
</tr>
</thead>
@ -108,11 +194,11 @@ export function Flow(){
{/* <td>{id}</td> */}
<td>{name}</td>
<td>{description}</td>
<td>{type=='phone'?'📞':(type=='headphone'?'🎧':'🔊')}</td>
<td>{auto ? '' : ''}</td>
<td>{audioFile}</td>
<td>{EmojiType[type]}</td>
<td>{auto ? '' : ''}</td>
<td>{audioFile || props.duration}</td>
<td>
<button className="rounded-full border-none bg-green-200"
<button className="rounded-full !bg-green-200"
onClick={()=>{
playCue({id, name, description, type, auto, audioFile, ...props});
}}>go</button>
@ -121,6 +207,27 @@ export function Flow(){
))}
</tbody>
</table>
</div>
<div className="flex-1 w-full overflow-y-auto flex flex-col gap-2">
<div className="flex-1 flex flex-col overflow-y-auto gap-2">
{history?.map((msg, index) => (
<div key={index} className={`w-5/6 ${msg.role=='user'? 'self-end':''}`}>
<div className={`${msg.role=='user'? 'bg-green-300':'bg-pink-300'} px-2`}>{msg.content}</div>
{msg.prompt && <div className="text-xs bg-gray-200">{msg.prompt}</div>}
</div>
))}
</div>
<textarea ref={refInput} name="message" rows={2}
className={`w-full border-1 resize-none p-2 ${status!=Status.IDLE && status!=Status.AUDIO_ENDED? 'bg-gray-500':''}`}
disabled={status!=Status.IDLE && status!=Status.AUDIO_ENDED}></textarea>
<div className="flex flex-row justify-end gap-2">
<span className="flex flex-row gap-1">
<label>audio_output</label>
<input type='checkbox' checked={audioOutput} onChange={(e) => setAudioOutput(e.target.checked)} />
</span>
<div className="rounded-2xl bg-gray-300 self-end px-4 tracking-widest">chat_status= {status}</div>
</div>
</div>
</main>
);
}

@ -1,68 +1,36 @@
export const system_prompt = `你是一位溫柔的冥想語音引導者,正在陪伴一位聽眾走入一段內心的記憶。你們會有四輪互動,每一輪都根據使用者的上一段回應即時回應,不使用固定句型。你的語氣始終柔和、慢節奏,語句簡短,帶有空間感與感官描寫
export const system_prompt = `你是一位溫柔、細膩的聲音引導者,陪伴使用者透過一通象徵性的電話,回到記憶中那段遺憾的時光
🟩 第一輪打開記憶
開場語要用簡短畫面帶入例如光影氣味某個場景的感受
每一輪對話都應由你主動發問語句需簡短語速節奏感柔和不急促使用請說我在聽請說吧我會聽等語句鼓勵使用者開口請根據使用者的回答動態延續情境的描述不可重複使用範本語句
提出一個輕柔的邀讓對方說出浮現的第一個畫面或感覺
依下列結構引導對話共四輪
回應應依據使用者語句動態延續
第一輪
- 讓使用者想像這是一通照亮心中遺憾的電話將映出那天的光影身影場景
- 引導使用者描述那段模糊記憶裡的場景
- 是在哪裡天氣如何給他的感覺是什麼
- 使用溫柔語氣鼓勵表達請說吧我會聽
📌 語句結尾請用
-你看見了什麼呢
-有一個片段浮現了可以說說看嗎
-那個畫面你想說的時候我在這裡聽著
第二輪
- 景象清晰了請引導使用者看見那個身影
- 那人是誰他當時在做什麼表情如何
- 用陪伴的語氣繼續引導我在聽
🟨 第二輪延展場景
針對使用者前一次提到的地點光線天氣人或氣氛延伸發問
第三輪
- 引導使用者回到那段遺憾的核心
- 當時發生了什麼為什麼感到遺憾
- 請給他空間表達情緒與記憶
提醒他們注意身體感聲音氣味等感官記憶
第四輪
- 引導使用者現在可以對那個人說話
- 提醒他有 60 秒的時間
- 開始說吧那些未曾說出口的話
📌 舉例式引導語風格會根據使用者前述動態生成
結語
- 用溫暖的語氣收尾
-那段回憶已經成為你生命中不可或缺的一部分
-能夠說出口的你很勇敢
你說那時是在車站車站裡是吵雜的還是特別安靜
你坐著的那張椅子冰冰的嗎腳下踩的是地磚還是木頭
那時候的風是涼的還是有點悶熱你還記得那個感覺嗎
📌 結尾建議句
-可以慢慢說說看
-讓這些細節浮現出來
-你想說的時候我就在這裡
🟧 第三輪人物與情緒層次
根據前輪提到的人物延伸他的動作姿態情緒你與他的距離
可點出一些微妙感覺你是不是有點不安還是心裡其實很平靜
📌 舉例式生成風格
他那時有看你嗎還是一直低著頭
你們靠得很近那種距離是熟悉的嗎
你說他說了一句話那句話之後你有什麼感覺浮上來
📌 柔性邀請句結尾
-那一刻的感覺還記得嗎說說也可以
-如果你想說出那種感覺就慢慢地說出來
🟥 第四輪浮現未說出口的話
引導使用者觀察自己心裡是否有一段話或某種感覺一直沒說出來
不直接逼問你想說什麼而是引導內在流動
📌 生成風格舉例
也許有一句話從那時候就留在心裡了
你一直沒說出口的那句話是不是又浮現了呢
那句話現在在你心裡你知道是哪一句對吧
📌 輕柔鼓勵句
-你可以讓它慢慢地被聽見
-如果你準備好了說出來就好
-現在你說也可以不說也沒關係
🌱 結尾語擇一動態挑選
-謝謝你陪著這段記憶走了一段路
-也許它現在可以靜靜待在心裡的某個角落了
-你已經走過來了我一直都在這裡`;
`;
export const welcome_prompt=[

@ -0,0 +1,124 @@
import { createContext, useContext, useRef, useState } from "react";
import { sendChatMessage } from "./chat";
import { textToSpeech } from "./tts";
const chatContext=createContext();
export const Status= {
IDLE: 'idle',
PROCESSING_TEXT: 'processing',
PROCESSING_AUDIO: 'processing_audio',
AUDIO_ENDED: 'audio_ended',
ERROR: 'error',
SUCCESS: 'success'
};
export function ChatProvider({children}){
const [history, setHistory] = useState([]);
const [status, setStatus] = useState(Status.IDLE);
const [audioOutput, setAudioOutput] = useState(true);
const refAudio=useRef();
function addMessage(message) {
setHistory(prev => [...prev, message]);
}
function reset() {
setHistory([]);
if(refAudio.current) {
refAudio.current.pause(); // Stop any currently playing audio
refAudio.current = null; // Reset the audio reference
}
}
function sendMessage(message, force_no_audio=false) {
console.log('Sending chat message:', message);
setStatus(Status.PROCESSING_TEXT);
let historyCopy = [...history];
if(message && message.trim() !== '') {
historyCopy=[...historyCopy, { role: 'user', content: message }];
addMessage({
role: 'user',
content: message
});
}
sendChatMessage(historyCopy).then(response => {
addMessage({
role: 'assistant',
content: response.output_text,
prompt: response.prompt
});
if(response.output_text && (!force_no_audio && audioOutput)){
setStatus(Status.PROCESSING_AUDIO);
textToSpeech(response.output_text).then(audioUrl => {
setStatus(Status.SUCCESS);
if(refAudio.current) {
refAudio.current.pause(); // Stop any currently playing audio
}
// play the audio
const audio = new Audio(audioUrl);
audio.play().catch(error => {
console.error("Audio playback error:", error);
setStatus(Status.ERROR);
});
audio.onended = () => {
setStatus(Status.AUDIO_ENDED);
}
refAudio.current = audio; // Store the new audio reference
});
}else{
setStatus(Status.SUCCESS);
}
}).catch(error => {
console.error("Chat error:", error);
setStatus(Status.ERROR);
});
}
return (
<chatContext.Provider value={{
history, status, setStatus, reset, sendMessage, setAudioOutput, audioOutput,
stop: () => {
if(refAudio.current) {
refAudio.current.pause(); // Stop any currently playing audio
refAudio.current = null; // Reset the audio reference
}
setStatus(Status.IDLE);
}
}}>
{children}
</chatContext.Provider>
);
}
export function useChat(){
const context=useContext(chatContext);
if(!context){
throw new Error("useChat must be used within a ChatProvider");
}
return context;
}
Loading…
Cancel
Save