This commit is contained in:
zramsay 2025-01-02 15:48:38 -05:00
parent 8ed56ab084
commit af601f4bbf
7 changed files with 176 additions and 177 deletions

1
package-lock.json generated
View File

@ -4324,6 +4324,7 @@
"version": "4.77.0",
"resolved": "https://registry.npmjs.org/openai/-/openai-4.77.0.tgz",
"integrity": "sha512-WWacavtns/7pCUkOWvQIjyOfcdr9X+9n9Vvb0zFeKVDAqwCMDHB+iSr24SVaBAhplvSG6JrRXFpcNM9gWhOGIw==",
"license": "Apache-2.0",
"dependencies": {
"@types/node": "^18.11.18",
"@types/node-fetch": "^2.6.4",

63
src/app/api/chat/route.ts Normal file
View File

@ -0,0 +1,63 @@
import { NextRequest, NextResponse } from 'next/server'
import OpenAI from "openai"
import { CHAT_MODELS } from '../../../services/chatService'
if (!process.env.X_AI_API_KEY) {
throw new Error('X_AI_API_KEY is not configured in environment variables')
}
const openai = new OpenAI({
apiKey: process.env.X_AI_API_KEY,
baseURL: "https://api.x.ai/v1",
})
export async function POST(req: NextRequest): Promise<NextResponse> {
try {
const { prompt, characterId } = await req.json()
if (!prompt || !characterId) {
return NextResponse.json(
{ error: 'Prompt and characterId are required' },
{ status: 400 }
)
}
// Find the model config using characterId
const modelConfig = CHAT_MODELS.find(m => m.characterId === characterId)
if (!modelConfig) {
return NextResponse.json(
{ error: 'Invalid character ID' },
{ status: 400 }
)
}
console.log('Generating chat response with character:', modelConfig.name)
console.log('System prompt:', modelConfig.systemPrompt)
console.log('User prompt:', prompt)
const completion = await openai.chat.completions.create({
model: modelConfig.modelId,
messages: [
{ role: "system", content: modelConfig.systemPrompt },
{ role: "user", content: prompt }
],
})
const response = completion.choices[0].message.content
if (!response) {
console.error('No response in completion:', completion)
throw new Error('No response generated')
}
return NextResponse.json({ response })
} catch (error) {
console.error('Chat generation error:', error)
return NextResponse.json(
{ error: error instanceof Error ? error.message : 'Failed to generate response' },
{ status: 500 }
)
}
}
export const dynamic = 'force-dynamic'

View File

@ -1,67 +0,0 @@
import { NextRequest, NextResponse } from 'next/server'
import { fal } from "@fal-ai/client"
if (!process.env.FAL_AI_KEY) {
throw new Error('FAL_AI_KEY is not configured in environment variables')
}
// Configure fal client
fal.config({
credentials: process.env.FAL_AI_KEY
})
// Consistent image size for all generations
const IMAGE_WIDTH: number = 1024
const IMAGE_HEIGHT: number = 1024
export async function POST(req: NextRequest): Promise<NextResponse> {
try {
const { prompt, modelId } = await req.json()
if (!prompt || !modelId) {
return NextResponse.json(
{ error: 'Prompt and modelId are required' },
{ status: 400 }
)
}
console.log('Generating with Flux model:', modelId)
console.log('Prompt:', prompt)
const result = await fal.subscribe(modelId, {
input: {
prompt: prompt,
image_size: {
width: IMAGE_WIDTH,
height: IMAGE_HEIGHT
},
},
logs: true,
onQueueUpdate: (update) => {
if (update.status === "IN_PROGRESS") {
console.log('Generation progress:', update.logs.map((log) => log.message))
}
},
})
console.log('Flux generation result:', result)
// Extract the image URL from the response
const imageUrl = result.data?.images?.[0]?.url
if (!imageUrl) {
console.error('No image URL in response:', result)
throw new Error('No image URL in response')
}
return NextResponse.json({ imageUrl })
} catch (error) {
console.error('Flux generation error:', error)
return NextResponse.json(
{ error: error instanceof Error ? error.message : 'Failed to generate image' },
{ status: 500 }
)
}
}
export const dynamic = 'force-dynamic'

View File

@ -3,7 +3,7 @@
import React, { useState } from 'react'
import WalletHeader from '../components/WalletHeader'
import AIServiceCard from '../components/AIServiceCard'
import { generateWithFlux, FluxGenerationResult, FLUX_MODELS } from '../services/fluxService'
import { generateChatResponse, ChatGenerationResult, CHAT_MODELS } from '../services/chatService'
import { processMTMPayment } from '../services/paymentService'
interface WalletState {
@ -13,7 +13,7 @@ interface WalletState {
declare global {
interface Window {
solflare: any; // Or use a more specific type if available
solflare: any;
}
}
@ -49,8 +49,8 @@ const Page: React.FC = (): React.ReactElement => {
}
}
const handleFluxGeneration = (modelId: string, cost: number) => {
return async (prompt: string): Promise<FluxGenerationResult> => {
const handleChatGeneration = (modelId: string, cost: number) => {
return async (prompt: string): Promise<ChatGenerationResult> => {
if (!walletState.connected || !walletState.publicKey || !window.solflare) {
return { error: 'Wallet not connected' }
}
@ -66,23 +66,23 @@ const Page: React.FC = (): React.ReactElement => {
return { error: paymentResult.error }
}
// Then generate image with specified model
return generateWithFlux(prompt, modelId)
// Then generate chat response with specified model
return generateChatResponse(prompt, modelId)
}
}
return (
return (
<div className="min-h-screen w-full flex flex-col items-center bg-gradient-to-b from-slate-950 via-orange-950 to-slate-950">
<div className="container max-w-7xl mx-auto px-4 py-8">
{/* Header */}
<div className="text-center mb-8">
<h1 className="text-4xl sm:text-5xl font-bold mb-4 text-transparent bg-clip-text bg-gradient-to-r from-orange-400 to-amber-500">
Mark's Meme Market
Mark's Mama Market
</h1>
<p className="text-orange-200 text-lg mb-8">
Use MTM to generate memes
Ask each of Mark's grandmas for advice
</p>
<WalletHeader
isConnected={walletState.connected}
publicKey={walletState.publicKey}
@ -90,40 +90,41 @@ const Page: React.FC = (): React.ReactElement => {
/>
</div>
{/* Flux Models Grid */}
<div className="grid grid-cols-1 md:grid-cols-2 xl:grid-cols-3 gap-6">
{FLUX_MODELS.map((model) => (
{/* Chat Models Grid */}
<div className="grid grid-cols-1 md:grid-cols-2 xl:grid-cols-3 gap-6">
{CHAT_MODELS.map((model) => (
<AIServiceCard
key={model.modelId}
key={model.characterId}
title={model.name}
description={model.description}
tokenCost={model.cost}
isWalletConnected={walletState.connected}
onGenerate={handleFluxGeneration(model.modelId, model.cost)}
onGenerate={handleChatGeneration(model.characterId, model.cost)}
/>
))}
{/* Coming Soon Card - with purple theme for contrast */}
))}
{/* Coming Soon Card */}
<div className="relative bg-purple-900/30 backdrop-blur-lg rounded-2xl shadow-xl border border-purple-700/50 overflow-hidden group hover:shadow-purple-500/20 transition-all duration-300">
<div className="absolute inset-0 bg-gradient-to-br from-purple-600/10 to-pink-500/10 opacity-50"></div>
<div className="relative p-6 flex flex-col h-full">
<div className="flex-1">
<h3 className="text-2xl font-bold text-transparent bg-clip-text bg-gradient-to-r from-purple-400 to-pink-500">
Coming Soon
More Grandmas Soon
</h3>
<p className="mt-2 text-purple-200">
New AI model integration in development. Stay tuned for more amazing features!
New grandmothers in development, stay tuned!
</p>
<div className="mt-2 inline-block px-3 py-1 bg-purple-500/20 rounded-full">
<span className="text-purple-200 text-sm">TBD</span>
</div>
</div>
<div className="mt-6">
<button
disabled
className="w-full bg-gradient-to-r from-purple-500/50 to-pink-500/50
text-white/50 font-semibold py-4 px-6 rounded-xl
className="w-full bg-gradient-to-r from-purple-500/50 to-pink-500/50
text-white/50 font-semibold py-4 px-6 rounded-xl
cursor-not-allowed opacity-50"
>
Coming Soon

View File

@ -7,13 +7,12 @@ interface AIServiceCardProps {
description: string
tokenCost: number
isWalletConnected: boolean
onGenerate: (prompt: string) => Promise<{ imageUrl?: string, error?: string }>
onGenerate: (prompt: string) => Promise<{ response?: string, error?: string }>
}
interface GenerationState {
loading: boolean
processing: boolean
imageUrl: string | null
response: string | null
error: string | null
}
@ -27,8 +26,7 @@ const AIServiceCard: React.FC<AIServiceCardProps> = ({
const [inputText, setInputText] = useState<string>('')
const [generationState, setGenerationState] = useState<GenerationState>({
loading: false,
processing: false,
imageUrl: null,
response: null,
error: null,
})
@ -53,15 +51,14 @@ const AIServiceCard: React.FC<AIServiceCardProps> = ({
return
}
if (result.imageUrl) {
if (result.response) {
setGenerationState({
loading: false,
processing: false,
imageUrl: result.imageUrl,
response: result.response,
error: null,
})
} else {
throw new Error('No image URL received')
throw new Error('No response received')
}
} catch (error) {
setGenerationState({
@ -89,7 +86,7 @@ const AIServiceCard: React.FC<AIServiceCardProps> = ({
<textarea
value={inputText}
onChange={(e) => setInputText(e.target.value)}
placeholder="Enter your prompt here..."
placeholder="Ask me anything..."
disabled={!isWalletConnected}
className="w-full bg-slate-950/80 text-slate-200 border border-orange-900 rounded-xl p-4
placeholder-slate-500 focus:border-amber-500 focus:ring-2 focus:ring-amber-500/20
@ -105,7 +102,7 @@ const AIServiceCard: React.FC<AIServiceCardProps> = ({
transition-all duration-200 shadow-lg hover:shadow-amber-500/25
disabled:opacity-50 disabled:cursor-not-allowed disabled:hover:shadow-none"
>
{generationState.loading ? 'Processing...' : `Pay ${tokenCost} MTM & Generate`}
{generationState.loading ? 'Processing...' : `Pay ${tokenCost} MTM & Chat`}
</button>
</div>
@ -115,13 +112,9 @@ const AIServiceCard: React.FC<AIServiceCardProps> = ({
</div>
)}
{generationState.imageUrl && (
<div className="mt-4">
<img
src={generationState.imageUrl}
alt="Generated content"
className="w-full h-auto rounded-xl shadow-2xl"
/>
{generationState.response && (
<div className="mt-4 bg-slate-800/50 border border-orange-800/50 rounded-xl p-4">
<p className="text-slate-200 whitespace-pre-wrap">{generationState.response}</p>
</div>
)}
</div>

View File

@ -0,0 +1,78 @@
export interface ChatGenerationResult {
response?: string
error?: string
}
export interface ChatModelConfig {
modelId: string
characterId: string // Added unique identifier for each character
name: string
description: string
cost: number
systemPrompt: string
}
// Available x.ai models with unique character IDs
export const CHAT_MODELS: ChatModelConfig[] = [
{
modelId: "grok-2-1212",
characterId: "doris",
name: "Doris",
description: "The unconditional love grandma",
cost: 1,
systemPrompt: "You are Doris, a grandmother that showers unconditional love to everyone for everything. Be happy, cheerful, and agreeable. Reply with no more than 50 words."
},
{
modelId: "grok-2-1212",
characterId: "mabel",
name: "Mabel",
description: "The wise grandmother",
cost: 2,
systemPrompt: "You are Mabel, a grandmother that offers wisdom and intellectual musings. Provide helpful advice with a neutral emotion. Reply with no more than 50 words."
},
{
modelId: "grok-2-1212",
characterId: "greta",
name: "Greta",
description: "The mad granny",
cost: 3,
systemPrompt: "You are Greta, a grandmother that is always in a bad mood. Be mean, scornful, and insulting. Don't give advice or useful replies. Reply with no more than 50 words."
}
]
export async function generateChatResponse(
prompt: string,
characterId: string
): Promise<ChatGenerationResult> {
try {
const response = await fetch('/api/chat', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
prompt,
characterId // Send characterId instead of modelId
}),
})
if (!response.ok) {
throw new Error('Failed to generate response')
}
const data = await response.json()
console.log('Raw chat response:', data)
if (data.response) {
return { response: data.response }
} else {
console.error('Unexpected response structure:', data)
throw new Error('Invalid response format from Chat API')
}
} catch (error) {
console.error('Chat generation error:', error)
return {
error: error instanceof Error ? error.message : 'Generation failed'
}
}
}

View File

@ -1,70 +0,0 @@
export interface FluxGenerationResult {
imageUrl?: string
error?: string
}
export interface FluxModelConfig {
modelId: string
name: string
description: string
cost: number
}
// Available Flux/fal-ai models
export const FLUX_MODELS: FluxModelConfig[] = [
{
modelId: "fal-ai/flux/schnell",
name: "Schnell",
description: "Fast meme generator",
cost: 2
},
{
modelId: "fal-ai/recraft-v3",
name: "Recraft",
description: "Advanced meme generator",
cost: 400
},
{
modelId: "fal-ai/stable-diffusion-v35-large",
name: "Marquee",
description: "Best meme generator",
cost: 500
}
]
export async function generateWithFlux(
prompt: string,
modelId: string
): Promise<FluxGenerationResult> {
try {
const response = await fetch('/api/flux', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
prompt,
modelId
}),
})
if (!response.ok) {
throw new Error('Failed to generate image')
}
const data = await response.json()
console.log('Raw Flux response:', data)
if (data.imageUrl) {
return { imageUrl: data.imageUrl }
} else {
console.error('Unexpected response structure:', data)
throw new Error('Invalid response format from Flux API')
}
} catch (error) {
console.error('Flux generation error:', error)
return {
error: error instanceof Error ? error.message : 'Generation failed'
}
}
}