You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
orquestrador/tests/test_llm_service.py

208 lines
7.6 KiB
Python

import os
import unittest
from types import SimpleNamespace
os.environ.setdefault("DEBUG", "false")
from app.services.ai.llm_service import (
INVALID_RECEIPT_WATERMARK_MESSAGE,
VALID_RECEIPT_WATERMARK_MARKER,
LLMService,
)
class LLMServiceResponseParsingTests(unittest.TestCase):
def test_extract_response_payload_supports_text_and_function_call_in_same_candidate(self):
service = LLMService.__new__(LLMService)
response = SimpleNamespace(
candidates=[
SimpleNamespace(
content=SimpleNamespace(
parts=[
SimpleNamespace(text="Legal! Buscando carros de ate 70 mil para voce.", function_call=None),
SimpleNamespace(
text=None,
function_call=SimpleNamespace(
name="consultar_estoque",
args={"preco_max": 70000.0},
),
),
]
)
)
]
)
payload = service._extract_response_payload(response)
self.assertEqual(payload["response"], "Legal! Buscando carros de ate 70 mil para voce.")
self.assertEqual(
payload["tool_call"],
{
"name": "consultar_estoque",
"arguments": {"preco_max": 70000.0},
},
)
def test_extract_response_payload_handles_text_only_candidate_without_response_text_accessor(self):
service = LLMService.__new__(LLMService)
response = SimpleNamespace(
candidates=[
SimpleNamespace(
content=SimpleNamespace(
parts=[
SimpleNamespace(text="Resposta simples", function_call=None),
]
)
)
]
)
payload = service._extract_response_payload(response)
self.assertEqual(payload, {"response": "Resposta simples", "tool_call": None})
def test_extract_response_payload_falls_back_to_response_text_accessor(self):
service = LLMService.__new__(LLMService)
response = SimpleNamespace(
text='{"ok": true}',
candidates=[
SimpleNamespace(
content=SimpleNamespace(
parts=[
SimpleNamespace(function_call=None),
]
)
)
]
)
payload = service._extract_response_payload(response)
self.assertEqual(payload, {"response": '{"ok": true}', "tool_call": None})
class LLMServiceImageWorkflowPromptTests(unittest.TestCase):
def test_build_image_workflow_prompt_preserves_visible_payment_time(self):
service = LLMService.__new__(LLMService)
prompt = service._build_image_workflow_prompt(caption="Segue o comprovante")
self.assertIn(
"preserve a data e a hora no campo data_pagamento no formato DD/MM/AAAA HH:MM",
prompt,
)
self.assertIn("Nao reduza para somente a data quando a hora estiver visivel.", prompt)
self.assertIn("marca d'agua exatamente escrita como SysaltiIA", prompt)
self.assertIn(
"O comprovante enviado nao e valido. Envie um comprovante valido com a marca d'agua SysaltiIA visivel.",
prompt,
)
self.assertIn(VALID_RECEIPT_WATERMARK_MARKER, prompt)
self.assertIn("Legenda do usuario: Segue o comprovante", prompt)
def test_coerce_image_workflow_response_rejects_payment_without_marker(self):
service = LLMService.__new__(LLMService)
response = service._coerce_image_workflow_response(
"Registrar pagamento de aluguel: contrato LOC-20260319-33CD6567; valor R$ 379,80."
)
self.assertEqual(response, INVALID_RECEIPT_WATERMARK_MESSAGE)
def test_coerce_image_workflow_response_strips_valid_watermark_marker(self):
service = LLMService.__new__(LLMService)
response = service._coerce_image_workflow_response(
f"{VALID_RECEIPT_WATERMARK_MARKER} Registrar pagamento de aluguel: contrato LOC-20260319-33CD6567; valor R$ 379,80."
)
self.assertEqual(
response,
"Registrar pagamento de aluguel: contrato LOC-20260319-33CD6567; valor R$ 379,80.",
)
class LLMServiceDispatchTests(unittest.IsolatedAsyncioTestCase):
async def test_generate_response_uses_generate_content_when_history_is_empty(self):
service = LLMService.__new__(LLMService)
service.model_names = ["gemini-2.5-pro"]
service._log_llm_event = lambda *args, **kwargs: None
service.build_vertex_tools = lambda tools: None
class DummyChat:
def __init__(self):
self.calls = []
def send_message(self, message, **kwargs):
self.calls.append((message, kwargs))
return SimpleNamespace(candidates=[])
class DummyModel:
def __init__(self):
self.generate_calls = []
self.chat = DummyChat()
def generate_content(self, message, **kwargs):
self.generate_calls.append((message, kwargs))
return SimpleNamespace(candidates=[])
def start_chat(self, history):
raise AssertionError("nao deveria abrir chat quando nao ha historico")
model = DummyModel()
service._get_model = lambda model_name: model
service._extract_response_payload = lambda response: {"response": "ok", "tool_call": None}
generation_config = {"temperature": 0, "max_output_tokens": 128}
payload = await service.generate_response(
message="teste",
tools=[],
history=[],
generation_config=generation_config,
)
self.assertEqual(payload, {"response": "ok", "tool_call": None})
self.assertEqual(
model.generate_calls,
[("teste", {"generation_config": generation_config})],
)
async def test_generate_response_uses_chat_when_history_is_present(self):
service = LLMService.__new__(LLMService)
service.model_names = ["gemini-2.5-pro"]
service._log_llm_event = lambda *args, **kwargs: None
service.build_vertex_tools = lambda tools: None
class DummyChat:
def __init__(self):
self.calls = []
def send_message(self, message, **kwargs):
self.calls.append((message, kwargs))
return SimpleNamespace(candidates=[])
class DummyModel:
def __init__(self):
self.chat = DummyChat()
self.histories = []
def generate_content(self, message, **kwargs):
raise AssertionError("nao deveria usar generate_content quando ha historico")
def start_chat(self, history):
self.histories.append(history)
return self.chat
model = DummyModel()
service._get_model = lambda model_name: model
service._extract_response_payload = lambda response: {"response": "ok", "tool_call": None}
history = [{"role": "user", "parts": ["oi"]}]
payload = await service.generate_response(message="teste", tools=[], history=history)
self.assertEqual(payload, {"response": "ok", "tool_call": None})
self.assertEqual(model.histories, [history])
self.assertEqual(model.chat.calls, [("teste", {})])