From af601f4bbf5c476627bf8cc87f423d93c0529003 Mon Sep 17 00:00:00 2001 From: zramsay Date: Thu, 2 Jan 2025 15:48:38 -0500 Subject: [PATCH] sleek --- package-lock.json | 1 + src/app/api/chat/route.ts | 63 ++++++++++++++++++++++++++ src/app/api/flux/route.ts | 67 --------------------------- src/app/page.tsx | 45 +++++++++--------- src/components/AIServiceCard.tsx | 29 +++++------- src/services/chatService.ts | 78 ++++++++++++++++++++++++++++++++ src/services/fluxService.ts | 70 ---------------------------- 7 files changed, 176 insertions(+), 177 deletions(-) create mode 100644 src/app/api/chat/route.ts delete mode 100644 src/app/api/flux/route.ts create mode 100644 src/services/chatService.ts delete mode 100644 src/services/fluxService.ts diff --git a/package-lock.json b/package-lock.json index a55aae4..2dde6cf 100644 --- a/package-lock.json +++ b/package-lock.json @@ -4324,6 +4324,7 @@ "version": "4.77.0", "resolved": "https://registry.npmjs.org/openai/-/openai-4.77.0.tgz", "integrity": "sha512-WWacavtns/7pCUkOWvQIjyOfcdr9X+9n9Vvb0zFeKVDAqwCMDHB+iSr24SVaBAhplvSG6JrRXFpcNM9gWhOGIw==", + "license": "Apache-2.0", "dependencies": { "@types/node": "^18.11.18", "@types/node-fetch": "^2.6.4", diff --git a/src/app/api/chat/route.ts b/src/app/api/chat/route.ts new file mode 100644 index 0000000..9a8e516 --- /dev/null +++ b/src/app/api/chat/route.ts @@ -0,0 +1,63 @@ +import { NextRequest, NextResponse } from 'next/server' +import OpenAI from "openai" +import { CHAT_MODELS } from '../../../services/chatService' + +if (!process.env.X_AI_API_KEY) { + throw new Error('X_AI_API_KEY is not configured in environment variables') +} + +const openai = new OpenAI({ + apiKey: process.env.X_AI_API_KEY, + baseURL: "https://api.x.ai/v1", +}) + +export async function POST(req: NextRequest): Promise { + try { + const { prompt, characterId } = await req.json() + + if (!prompt || !characterId) { + return NextResponse.json( + { error: 'Prompt and characterId are required' }, + { status: 400 } + ) + } + + // Find the model config using characterId + const modelConfig = CHAT_MODELS.find(m => m.characterId === characterId) + if (!modelConfig) { + return NextResponse.json( + { error: 'Invalid character ID' }, + { status: 400 } + ) + } + + console.log('Generating chat response with character:', modelConfig.name) + console.log('System prompt:', modelConfig.systemPrompt) + console.log('User prompt:', prompt) + + const completion = await openai.chat.completions.create({ + model: modelConfig.modelId, + messages: [ + { role: "system", content: modelConfig.systemPrompt }, + { role: "user", content: prompt } + ], + }) + + const response = completion.choices[0].message.content + + if (!response) { + console.error('No response in completion:', completion) + throw new Error('No response generated') + } + + return NextResponse.json({ response }) + } catch (error) { + console.error('Chat generation error:', error) + return NextResponse.json( + { error: error instanceof Error ? error.message : 'Failed to generate response' }, + { status: 500 } + ) + } +} + +export const dynamic = 'force-dynamic' diff --git a/src/app/api/flux/route.ts b/src/app/api/flux/route.ts deleted file mode 100644 index 24a0469..0000000 --- a/src/app/api/flux/route.ts +++ /dev/null @@ -1,67 +0,0 @@ -import { NextRequest, NextResponse } from 'next/server' -import { fal } from "@fal-ai/client" - -if (!process.env.FAL_AI_KEY) { - throw new Error('FAL_AI_KEY is not configured in environment variables') -} - -// Configure fal client -fal.config({ - credentials: process.env.FAL_AI_KEY -}) - -// Consistent image size for all generations -const IMAGE_WIDTH: number = 1024 -const IMAGE_HEIGHT: number = 1024 - -export async function POST(req: NextRequest): Promise { - try { - const { prompt, modelId } = await req.json() - - if (!prompt || !modelId) { - return NextResponse.json( - { error: 'Prompt and modelId are required' }, - { status: 400 } - ) - } - - console.log('Generating with Flux model:', modelId) - console.log('Prompt:', prompt) - - const result = await fal.subscribe(modelId, { - input: { - prompt: prompt, - image_size: { - width: IMAGE_WIDTH, - height: IMAGE_HEIGHT - }, - }, - logs: true, - onQueueUpdate: (update) => { - if (update.status === "IN_PROGRESS") { - console.log('Generation progress:', update.logs.map((log) => log.message)) - } - }, - }) - - console.log('Flux generation result:', result) - - // Extract the image URL from the response - const imageUrl = result.data?.images?.[0]?.url - - if (!imageUrl) { - console.error('No image URL in response:', result) - throw new Error('No image URL in response') - } - - return NextResponse.json({ imageUrl }) - } catch (error) { - console.error('Flux generation error:', error) - return NextResponse.json( - { error: error instanceof Error ? error.message : 'Failed to generate image' }, - { status: 500 } - ) - } -} - -export const dynamic = 'force-dynamic' diff --git a/src/app/page.tsx b/src/app/page.tsx index ae2eb2c..c4b125d 100644 --- a/src/app/page.tsx +++ b/src/app/page.tsx @@ -3,7 +3,7 @@ import React, { useState } from 'react' import WalletHeader from '../components/WalletHeader' import AIServiceCard from '../components/AIServiceCard' -import { generateWithFlux, FluxGenerationResult, FLUX_MODELS } from '../services/fluxService' +import { generateChatResponse, ChatGenerationResult, CHAT_MODELS } from '../services/chatService' import { processMTMPayment } from '../services/paymentService' interface WalletState { @@ -13,7 +13,7 @@ interface WalletState { declare global { interface Window { - solflare: any; // Or use a more specific type if available + solflare: any; } } @@ -49,8 +49,8 @@ const Page: React.FC = (): React.ReactElement => { } } - const handleFluxGeneration = (modelId: string, cost: number) => { - return async (prompt: string): Promise => { + const handleChatGeneration = (modelId: string, cost: number) => { + return async (prompt: string): Promise => { if (!walletState.connected || !walletState.publicKey || !window.solflare) { return { error: 'Wallet not connected' } } @@ -66,23 +66,23 @@ const Page: React.FC = (): React.ReactElement => { return { error: paymentResult.error } } - // Then generate image with specified model - return generateWithFlux(prompt, modelId) + // Then generate chat response with specified model + return generateChatResponse(prompt, modelId) } } - return ( + return (
{/* Header */}

- Mark's Meme Market + Mark's Mama Market

- Use MTM to generate memes + Ask each of Mark's grandmas for advice

- + { />
- {/* Flux Models Grid */} -
- {FLUX_MODELS.map((model) => ( + {/* Chat Models Grid */} +
+ {CHAT_MODELS.map((model) => ( - ))} - {/* Coming Soon Card - with purple theme for contrast */} + ))} + + {/* Coming Soon Card */}

- Coming Soon + More Grandmas Soon

- New AI model integration in development. Stay tuned for more amazing features! + New grandmothers in development, stay tuned!

TBD
- +