updated the context calculation

This commit is contained in:
2026-02-10 01:39:07 +08:00
parent 968eb173dd
commit b20af7cbde
2 changed files with 22 additions and 4 deletions

View File

@@ -54,11 +54,15 @@ async def gemini_chat(
"response": response_text
}
prompt_content = chat_data.prompt
if chat_data.context:
prompt_content = f"Context: {chat_data.context}\n\nPrompt: {chat_data.prompt}"
# Using the async generation method provided by the new google-genai library
# We use await to ensure we don't block the event loop
response = await client.aio.models.generate_content(
model="gemini-2.0-flash",
contents=chat_data.prompt
contents=prompt_content
)
# Track usage if valid module
@@ -67,7 +71,7 @@ async def gemini_chat(
# 1 char ~= 0.25 tokens (rough estimate if exact count not returned)
# Gemini response usually has usage_metadata
usage = response.usage_metadata
prompt_tokens = usage.prompt_token_count if usage else len(chat_data.prompt) // 4
prompt_tokens = usage.prompt_token_count if usage else len(prompt_content) // 4
completion_tokens = usage.candidates_token_count if usage else len(response.text) // 4
module.ingress_tokens += prompt_tokens