diff --git a/src/app/api/chat/route.ts b/src/app/api/chat/route.ts deleted file mode 100644 index 9a8e516..0000000 --- a/src/app/api/chat/route.ts +++ /dev/null @@ -1,63 +0,0 @@ -import { NextRequest, NextResponse } from 'next/server' -import OpenAI from "openai" -import { CHAT_MODELS } from '../../../services/chatService' - -if (!process.env.X_AI_API_KEY) { - throw new Error('X_AI_API_KEY is not configured in environment variables') -} - -const openai = new OpenAI({ - apiKey: process.env.X_AI_API_KEY, - baseURL: "https://api.x.ai/v1", -}) - -export async function POST(req: NextRequest): Promise { - try { - const { prompt, characterId } = await req.json() - - if (!prompt || !characterId) { - return NextResponse.json( - { error: 'Prompt and characterId are required' }, - { status: 400 } - ) - } - - // Find the model config using characterId - const modelConfig = CHAT_MODELS.find(m => m.characterId === characterId) - if (!modelConfig) { - return NextResponse.json( - { error: 'Invalid character ID' }, - { status: 400 } - ) - } - - console.log('Generating chat response with character:', modelConfig.name) - console.log('System prompt:', modelConfig.systemPrompt) - console.log('User prompt:', prompt) - - const completion = await openai.chat.completions.create({ - model: modelConfig.modelId, - messages: [ - { role: "system", content: modelConfig.systemPrompt }, - { role: "user", content: prompt } - ], - }) - - const response = completion.choices[0].message.content - - if (!response) { - console.error('No response in completion:', completion) - throw new Error('No response generated') - } - - return NextResponse.json({ response }) - } catch (error) { - console.error('Chat generation error:', error) - return NextResponse.json( - { error: error instanceof Error ? error.message : 'Failed to generate response' }, - { status: 500 } - ) - } -} - -export const dynamic = 'force-dynamic'