import OpenAI from "openai"; import express from "express"; import cors from "cors"; import { system_prompt } from "./system_prompt.js"; import { Client } from 'node-osc'; import { config } from "dotenv"; config(); // Load environment variables from .env file const Output = { "type": "object", "properties": { "output_text": { "type": "string", "description": "The final output text generated by the model, without image prompt", }, "prompt": { "type": "string", "description": "The generated image prompt based on the user's input and the system's guidance.", } }, "additionalProperties": false, "required": [ "output_text","prompt" ] } const client = new OpenAI({ apiKey: process.env.OPENAI_API_KEY, }); const osc_client = new Client('127.0.0.1', 8787); osc_client.send('/test', 55555, (error) => { if (error) { console.error('Error sending OSC message:', error); } else { console.log('OSC message sent successfully'); } }); // const response = await client.responses.create({ // model: "gpt-4.1", // input: "Write a one-sentence bedtime story about a unicorn." // }); // console.log(response.output_text); const app = express(); const port = process.env.PORT || 3000; app.use(express.json()); app.use(cors()); app.post("/generate_stream", async (req, res) => { const { input } = req.body; try { const response = await client.responses.create({ model: "gpt-4.1", input: [ { role: "system", content: [ { type:'input_text', text: system_prompt, } ] }, ...input ], text:{ format:{ type:'json_schema', name:"output_prompt", schema: Output, } }, stream:true, }); for await (const event of response){ console.log(event); if(event.type=='response.output_text.delta'){ // console.log(event.delta); }else if(event.type=='response.output_item.done'){ console.log("Generated response:", event.item.content); const json=JSON.parse(event.item.content[0].text); // send prompt to TD osc_client.send('/prompt', json.prompt, (error) => { if (error) { console.error('Error sending OSC message:', error); } else { console.log('OSC message sent successfully'); } }); res.json(json); } } } catch (error) { console.error("Error generating response:", error); res.status(500).json({ error: "Failed to generate response" }); } }); app.post("/generate", async (req, res) => { const { input } = req.body; try { const response = await client.responses.create({ model: "gpt-4.1", input: [ { role: "system", content: [ { type:'input_text', text: system_prompt, } ] }, ...input ], text:{ format:{ type:'json_schema', name:"output_prompt", schema: Output, } }, }); console.log("Generated response:", response.output_text); const json=JSON.parse(response.output_text); // send prompt to TD osc_client.send('/prompt', json.prompt, (error) => { if (error) { console.error('Error sending OSC message:', error); } else { console.log('OSC message sent successfully'); } }); res.json(json); } catch (error) { console.error("Error generating response:", error); res.status(500).json({ error: "Failed to generate response" }); } }); app.listen(port, () => { console.log(`Server is running on http://localhost:${port}`); });