diff --git a/app/api/endpoints/gemini.py b/app/api/endpoints/gemini.py index e0dfccb..ed04494 100644 --- a/app/api/endpoints/gemini.py +++ b/app/api/endpoints/gemini.py @@ -36,11 +36,28 @@ def get_gemini_client(): @limiter.limit(settings.RATE_LIMIT) async def gemini_chat( request: Request, - chat_data: LLMRequest, api_key: str = Depends(get_api_key), module: Module = Depends(get_current_module), db: Session = Depends(get_db) ): + # Handle text/plain as JSON (fallback for CORS "Simple Requests") + content_type = request.headers.get("Content-Type", "") + if "text/plain" in content_type: + try: + body = await request.body() + import json + data = json.loads(body) + chat_data = LLMRequest(**data) + except Exception as e: + return {"status": "error", "detail": f"Failed to parse text/plain as JSON: {str(e)}"} + else: + # Standard JSON parsing + try: + data = await request.json() + chat_data = LLMRequest(**data) + except Exception as e: + return {"status": "error", "detail": f"Invalid JSON: {str(e)}"} + client = get_gemini_client() try: diff --git a/docker-compose.yml b/docker-compose.yml index 4b67361..6797fe9 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,9 +1,11 @@ services: api: build: . - container_name: storyline-ai-gateway + container_name: ai-gateway + networks: + - caddy_network ports: - - "8191:8000" + - "8000:8000" env_file: - .env restart: always @@ -11,3 +13,8 @@ services: - .:/app # Override command for development/auto-reload if needed command: uvicorn app.main:app --host 0.0.0.0 --port 8000 --reload + +networks: + caddy_network: + # Define the network at the bottom + external: true diff --git a/server_log.txt b/server_log.txt new file mode 100644 index 0000000..6035890 Binary files /dev/null and b/server_log.txt differ