Files
ai-gateway/app/api/endpoints/storyline.py

34 lines
901 B
Python

from fastapi import APIRouter, Depends, Request
from app.api.deps import get_api_key
from app.core.limiter import limiter
from app.core.config import settings
from pydantic import BaseModel
router = APIRouter()
class ChatRequest(BaseModel):
prompt: str
context: str = ""
@router.post("/chat")
@limiter.limit(settings.RATE_LIMIT)
async def story_chat(
request: Request,
chat_data: ChatRequest,
api_key: str = Depends(get_api_key)
):
# This is where you would call your LLM (OpenAI, Anthropic, etc.)
# For now, we return a mock response
return {
"status": "success",
"response": f"Processed prompt: {chat_data.prompt}",
"metadata": {
"characters_received": len(chat_data.prompt),
"context_length": len(chat_data.context)
}
}
@router.get("/health")
async def health_check():
return {"status": "healthy"}