47 lines
1.3 KiB
Python
47 lines
1.3 KiB
Python
from fastapi import APIRouter, Depends, Request
|
|
from app.api.deps import get_api_key
|
|
from app.core.limiter import limiter
|
|
from app.core.config import settings
|
|
from pydantic import BaseModel
|
|
from openai import AsyncOpenAI
|
|
import asyncio
|
|
|
|
router = APIRouter()
|
|
|
|
class LLMRequest(BaseModel):
|
|
prompt: str
|
|
context: str = ""
|
|
|
|
# Initialize Async client
|
|
client = None
|
|
if settings.OPENAI_API_KEY and settings.OPENAI_API_KEY != "your-openai-api-key":
|
|
client = AsyncOpenAI(api_key=settings.OPENAI_API_KEY)
|
|
|
|
@router.post("/chat")
|
|
@limiter.limit(settings.RATE_LIMIT)
|
|
async def openai_chat(
|
|
request: Request,
|
|
chat_data: LLMRequest,
|
|
api_key: str = Depends(get_api_key)
|
|
):
|
|
try:
|
|
if not client:
|
|
return {
|
|
"status": "mock",
|
|
"model": "openai",
|
|
"response": f"MOCK: OpenAI response to '{chat_data.prompt}'"
|
|
}
|
|
|
|
# Perform Async call to OpenAI
|
|
response = await client.chat.completions.create(
|
|
model="gpt-3.5-turbo",
|
|
messages=[{"role": "user", "content": chat_data.prompt}]
|
|
)
|
|
return {
|
|
"status": "success",
|
|
"model": "openai",
|
|
"response": response.choices[0].message.content
|
|
}
|
|
except Exception as e:
|
|
return {"status": "error", "detail": str(e)}
|