From 81f654c3fdeb0ac7b706ab8330d276b0c50f7458 Mon Sep 17 00:00:00 2001 From: Paulo Reyes Date: Tue, 10 Feb 2026 03:28:46 +0800 Subject: [PATCH] new endpoint settings --- app/api/endpoints/gemini.py | 24 ++++++++++++++++++++++-- app/core/prompts.py | 5 +++++ 2 files changed, 27 insertions(+), 2 deletions(-) create mode 100644 app/core/prompts.py diff --git a/app/api/endpoints/gemini.py b/app/api/endpoints/gemini.py index da9cb38..e0dfccb 100644 --- a/app/api/endpoints/gemini.py +++ b/app/api/endpoints/gemini.py @@ -8,12 +8,20 @@ from app.core.config import settings from pydantic import BaseModel from google import genai import asyncio +from google.genai import types +from app.core.prompts import GEMINI_SYSTEM_PROMPT router = APIRouter() class LLMRequest(BaseModel): prompt: str context: str = "" + system_prompt: str | None = None + knowledge_base: str | None = None + temperature: float = 0.7 + top_p: float = 0.95 + top_k: int = 40 + max_output_tokens: int = 8192 # Shared client instance (global) _client = None @@ -58,11 +66,23 @@ async def gemini_chat( if chat_data.context: prompt_content = f"Context: {chat_data.context}\n\nPrompt: {chat_data.prompt}" + # Prepare system instruction + system_instruction = chat_data.system_prompt or GEMINI_SYSTEM_PROMPT + if chat_data.knowledge_base: + system_instruction += f"\n\nKnowledge Base:\n{chat_data.knowledge_base}" + # Using the async generation method provided by the new google-genai library # We use await to ensure we don't block the event loop response = await client.aio.models.generate_content( - model="gemini-2.0-flash", - contents=prompt_content + model="gemini-2.5-flash", + contents=prompt_content, + config=types.GenerateContentConfig( + system_instruction=system_instruction, + temperature=chat_data.temperature, + top_p=chat_data.top_p, + top_k=chat_data.top_k, + max_output_tokens=chat_data.max_output_tokens + ) ) # Track usage if valid module diff --git a/app/core/prompts.py b/app/core/prompts.py new file mode 100644 index 0000000..c1dcf9b --- /dev/null +++ b/app/core/prompts.py @@ -0,0 +1,5 @@ + +# System Prompts Configuration + +GEMINI_SYSTEM_PROMPT = """You are LDEx AI, a helpful AI assistant. +Answer the user's questions concisely and accurately based on the knowledge base provided."""