feat(runtime): concluir execucao isolada de tools na fase 7

feat/self-evolving-tools-foundation
parent de455b8566
commit 7e380a9c65

@ -1,3 +1,5 @@
import threading
from fastapi import Depends, HTTPException, Request, status
from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
from sqlalchemy.orm import Session
@ -24,12 +26,18 @@ from admin_app.services import (
AuditService,
AuthService,
CollaboratorManagementService,
ToolGenerationService,
ToolGenerationWorkerService,
ToolManagementService,
)
from shared.contracts import AdminPermission, StaffRole, permissions_for_role, role_has_permission, role_includes
bearer_scheme = HTTPBearer(auto_error=False)
_tool_generation_worker_lock = threading.Lock()
_tool_generation_worker_service: ToolGenerationWorkerService | None = None
_tool_generation_worker_config: tuple[int, str, str, int, int, float] | None = None
def get_settings(request: Request) -> AdminSettings:
app_settings = getattr(request.app.state, "admin_settings", None)
@ -106,12 +114,49 @@ def get_collaborator_management_service(
)
def get_tool_generation_service(
settings: AdminSettings = Depends(get_settings),
) -> ToolGenerationService:
"""Instancia o serviço isolado de geração via LLM do runtime administrativo.
Separado completamente do LLMService do product (app.services.ai.llm_service).
Usa as settings admin_tool_generation_model / admin_tool_generation_fallback_model.
Mapeado ao tool_generation_runtime_profile do contrato model_runtime_separation.
"""
return ToolGenerationService(settings)
def get_tool_generation_worker_service(
settings: AdminSettings = Depends(get_settings),
) -> ToolGenerationWorkerService:
global _tool_generation_worker_service, _tool_generation_worker_config
config = (
int(settings.admin_tool_generation_worker_max_workers),
str(settings.admin_tool_generation_model),
str(settings.admin_tool_generation_fallback_model),
int(settings.admin_tool_generation_timeout_seconds),
int(settings.admin_tool_generation_max_output_tokens),
float(settings.admin_tool_generation_temperature),
)
with _tool_generation_worker_lock:
if _tool_generation_worker_service is None or _tool_generation_worker_config != config:
if _tool_generation_worker_service is not None:
_tool_generation_worker_service.shutdown(wait=False)
_tool_generation_worker_service = ToolGenerationWorkerService(settings)
_tool_generation_worker_config = config
return _tool_generation_worker_service
def get_tool_management_service(
settings: AdminSettings = Depends(get_settings),
draft_repository: ToolDraftRepository = Depends(get_tool_draft_repository),
version_repository: ToolVersionRepository = Depends(get_tool_version_repository),
metadata_repository: ToolMetadataRepository = Depends(get_tool_metadata_repository),
artifact_repository: ToolArtifactRepository = Depends(get_tool_artifact_repository),
tool_generation_service: ToolGenerationService = Depends(get_tool_generation_service),
tool_generation_worker_service: ToolGenerationWorkerService = Depends(get_tool_generation_worker_service),
) -> ToolManagementService:
return ToolManagementService(
settings=settings,
@ -119,6 +164,8 @@ def get_tool_management_service(
version_repository=version_repository,
metadata_repository=metadata_repository,
artifact_repository=artifact_repository,
tool_generation_service=tool_generation_service,
tool_generation_worker_service=tool_generation_worker_service,
)

@ -1,4 +1,4 @@
from fastapi import APIRouter, Depends, HTTPException, status
from fastapi import APIRouter, Depends, HTTPException, status
from admin_app.api.dependencies import (
get_settings,
@ -138,7 +138,7 @@ def panel_tool_pipeline_run(
),
):
try:
payload = service.run_generation_pipeline(
payload = service.run_generation_pipeline_in_worker(
version_id,
runner_staff_account_id=current_staff.id,
runner_name=current_staff.display_name,
@ -374,6 +374,7 @@ def _build_pipeline_response(payload: dict) -> AdminToolGenerationPipelineRespon
steps=payload["steps"],
queue_entry=payload["queue_entry"],
automated_validations=payload.get("automated_validations", []),
execution=payload.get("execution"),
next_steps=payload["next_steps"],
)
@ -412,6 +413,7 @@ def _build_review_detail_response(payload: dict) -> AdminToolReviewDetailRespons
generated_module=payload["generated_module"],
generated_callable=payload["generated_callable"],
generated_source_code=payload["generated_source_code"],
execution=payload.get("execution"),
human_gate=payload["human_gate"],
decision_history=payload["decision_history"],
next_steps=payload["next_steps"],

@ -1,4 +1,4 @@
from fastapi import APIRouter, Depends, HTTPException, status
from fastapi import APIRouter, Depends, HTTPException, status
from admin_app.api.dependencies import (
get_settings,
@ -138,7 +138,7 @@ def tool_pipeline_run(
),
):
try:
payload = service.run_generation_pipeline(
payload = service.run_generation_pipeline_in_worker(
version_id,
runner_staff_account_id=current_staff.id,
runner_name=current_staff.display_name,
@ -374,6 +374,7 @@ def _build_pipeline_response(payload: dict) -> AdminToolGenerationPipelineRespon
steps=payload["steps"],
queue_entry=payload["queue_entry"],
automated_validations=payload.get("automated_validations", []),
execution=payload.get("execution"),
next_steps=payload["next_steps"],
)
@ -412,6 +413,7 @@ def _build_review_detail_response(payload: dict) -> AdminToolReviewDetailRespons
generated_module=payload["generated_module"],
generated_callable=payload["generated_callable"],
generated_source_code=payload["generated_source_code"],
execution=payload.get("execution"),
human_gate=payload["human_gate"],
decision_history=payload["decision_history"],
next_steps=payload["next_steps"],

@ -1,4 +1,4 @@
from datetime import datetime
from datetime import datetime
from pydantic import BaseModel, Field, field_validator
@ -840,6 +840,7 @@ class AdminToolReviewDetailResponse(BaseModel):
generated_module: str
generated_callable: str
generated_source_code: str
execution: AdminToolPipelineExecutionResponse | None = None
human_gate: AdminToolReviewHumanGateResponse
decision_history: list[AdminToolReviewHistoryEntryResponse] = Field(default_factory=list)
next_steps: list[str] = Field(default_factory=list)
@ -908,6 +909,21 @@ class AdminToolPipelineStepResponse(BaseModel):
description: str
class AdminToolPipelineExecutionResponse(BaseModel):
mode: str
target: str
dispatch_state: str | None = None
worker_max_workers: int | None = None
worker_pending_jobs: int | None = None
queued_jobs_before_submit: int | None = None
submitted_at: str | None = None
started_at: str | None = None
completed_at: str | None = None
elapsed_ms: float | None = None
worker_thread_name: str | None = None
poll_after_ms: int | None = None
last_error: str | None = None
class AdminToolGenerationPipelineResponse(BaseModel):
service: str
message: str
@ -919,6 +935,7 @@ class AdminToolGenerationPipelineResponse(BaseModel):
steps: list[AdminToolPipelineStepResponse]
queue_entry: AdminToolReviewQueueEntryResponse
automated_validations: list[AdminToolAutomatedValidationResponse] = Field(default_factory=list)
execution: AdminToolPipelineExecutionResponse | None = None
next_steps: list[str]
@ -945,6 +962,7 @@ class AdminToolDraftIntakeRequest(BaseModel):
domain: str = Field(min_length=3, max_length=40)
description: str = Field(min_length=16, max_length=280)
business_goal: str = Field(min_length=12, max_length=280)
generation_model: str | None = Field(default=None, max_length=120)
parameters: list[AdminToolDraftIntakeParameterRequest] = Field(default_factory=list, max_length=10)
@field_validator("tool_name")
@ -962,6 +980,14 @@ class AdminToolDraftIntakeRequest(BaseModel):
def normalize_domain(cls, value: str) -> str:
return value.strip().lower()
@field_validator("generation_model", mode="before")
@classmethod
def normalize_generation_model(cls, value: str | None) -> str | None:
if value is None:
return None
normalized = value.strip()
return normalized or None
class AdminToolDraftSubmissionPolicyResponse(BaseModel):
mode: str
@ -994,6 +1020,7 @@ class AdminToolDraftIntakePreviewResponse(BaseModel):
version_count: int = Field(ge=1)
parameter_count: int
required_parameter_count: int
generation_model: str | None = None
requires_director_approval: bool
owner_name: str | None = None
parameters: list[AdminToolDraftIntakePreviewParameterResponse]

@ -43,6 +43,16 @@ class AdminSettings(BaseSettings):
admin_bootstrap_password: str | None = None
admin_bootstrap_role: str = "diretor"
# ---- Runtime de geraÃÆÃ†â€™Ãƒâ€šÃ§ÃÆÃ†â€™Ãƒâ€šÃ£o de tools (separado do runtime de atendimento) ----
# Mapeado ao tool_generation_runtime_profile do contrato shared/contracts/model_runtime_separation.py.
# Nunca compartilhar estes valores com o runtime de atendimento do product.
admin_tool_generation_model: str = "gemini-3-pro-preview"
admin_tool_generation_fallback_model: str = "gemini-2.5-pro"
admin_tool_generation_timeout_seconds: int = 120
admin_tool_generation_max_output_tokens: int = 8192
admin_tool_generation_temperature: float = 0.2
admin_tool_generation_worker_max_workers: int = 1
@field_validator("admin_debug", mode="before")
@classmethod
def parse_debug_aliases(cls, value):

@ -22,6 +22,18 @@ def _ensure_admin_schema_evolution() -> None:
statements.append("ALTER TABLE tool_drafts ADD COLUMN current_version_number INT NOT NULL DEFAULT 1")
if "version_count" not in tool_draft_columns:
statements.append("ALTER TABLE tool_drafts ADD COLUMN version_count INT NOT NULL DEFAULT 1")
if "generation_model" not in tool_draft_columns:
statements.append("ALTER TABLE tool_drafts ADD COLUMN generation_model VARCHAR(120)")
if statements:
with admin_engine.begin() as connection:
for statement in statements:
connection.execute(text(statement))
if "tool_versions" in table_names:
tool_version_columns = {column["name"] for column in inspector.get_columns("tool_versions")}
statements = []
if "generation_model" not in tool_version_columns:
statements.append("ALTER TABLE tool_versions ADD COLUMN generation_model VARCHAR(120)")
if statements:
with admin_engine.begin() as connection:
for statement in statements:

@ -55,6 +55,7 @@ class ToolDraft(AdminTimestampedModel):
nullable=False,
default=True,
)
generation_model: Mapped[str | None] = mapped_column(String(120), nullable=True)
owner_staff_account_id: Mapped[int] = mapped_column(
Integer,
ForeignKey("staff_accounts.id"),

@ -44,6 +44,7 @@ class ToolVersion(AdminTimestampedModel):
nullable=False,
default=True,
)
generation_model: Mapped[str | None] = mapped_column(String(120), nullable=True)
owner_staff_account_id: Mapped[int] = mapped_column(
Integer,
ForeignKey("staff_accounts.id"),

@ -42,6 +42,7 @@ class ToolDraftRepository(BaseRepository):
version_count: int,
owner_staff_account_id: int,
owner_display_name: str,
generation_model: str | None = None,
requires_director_approval: bool = True,
commit: bool = True,
) -> ToolDraft:
@ -58,6 +59,7 @@ class ToolDraftRepository(BaseRepository):
required_parameter_count=required_parameter_count,
current_version_number=current_version_number,
version_count=version_count,
generation_model=generation_model,
requires_director_approval=requires_director_approval,
owner_staff_account_id=owner_staff_account_id,
owner_display_name=owner_display_name,
@ -85,6 +87,7 @@ class ToolDraftRepository(BaseRepository):
version_count: int,
owner_staff_account_id: int,
owner_display_name: str,
generation_model: str | None = None,
requires_director_approval: bool = True,
commit: bool = True,
) -> ToolDraft:
@ -98,6 +101,7 @@ class ToolDraftRepository(BaseRepository):
draft.required_parameter_count = required_parameter_count
draft.current_version_number = current_version_number
draft.version_count = version_count
draft.generation_model = generation_model
draft.requires_director_approval = requires_director_approval
draft.owner_staff_account_id = owner_staff_account_id
draft.owner_display_name = owner_display_name

@ -54,6 +54,7 @@ class ToolVersionRepository(BaseRepository):
required_parameter_count: int,
owner_staff_account_id: int,
owner_display_name: str,
generation_model: str | None = None,
status: ToolLifecycleStatus = ToolLifecycleStatus.DRAFT,
requires_director_approval: bool = True,
commit: bool = True,
@ -69,6 +70,7 @@ class ToolVersionRepository(BaseRepository):
business_goal=business_goal,
parameters_json=parameters_json,
required_parameter_count=required_parameter_count,
generation_model=generation_model,
requires_director_approval=requires_director_approval,
owner_staff_account_id=owner_staff_account_id,
owner_display_name=owner_display_name,

@ -7,6 +7,8 @@ from admin_app.services.auth_service import AuthService
from admin_app.services.collaborator_management_service import CollaboratorManagementService
from admin_app.services.report_service import ReportService
from admin_app.services.system_service import SystemService
from admin_app.services.tool_generation_service import ToolGenerationService
from admin_app.services.tool_generation_worker_service import ToolGenerationWorkerService
from admin_app.services.tool_management_service import ToolManagementService
__all__ = [
@ -17,5 +19,7 @@ __all__ = [
"CollaboratorManagementService",
"ReportService",
"SystemService",
"ToolGenerationService",
"ToolGenerationWorkerService",
"ToolManagementService",
]

@ -0,0 +1,444 @@
"""Serviço isolado de geração de tools via LLM para o runtime administrativo.
Este módulo é a única camada do admin_app que conversa com o Vertex AI para fins
de geração de código. Ele é completamente separado do LLMService do product
(app.services.ai.llm_service) e usa configurações próprias do AdminSettings.
Separação arquitetural garantida por:
- shared.contracts.model_runtime_separation.ModelRuntimeTarget.TOOL_GENERATION
- config keys: admin_tool_generation_model / admin_tool_generation_fallback_model
- Nenhuma importação de app.* é permitida neste módulo.
"""
from __future__ import annotations
import logging
import re
from time import perf_counter
from typing import Any
import vertexai
from google.api_core.exceptions import GoogleAPIError, NotFound
from vertexai.generative_models import GenerationConfig, GenerativeModel
from admin_app.core.settings import AdminSettings
logger = logging.getLogger(__name__)
# ---- Constantes de geração ---------------------------------------------------
_PYTHON_BLOCK_RE = re.compile(
r"```python\s*\n(.*?)```",
re.DOTALL | re.IGNORECASE,
)
# Padrões que o código gerado não pode conter.
# Aplicados antes das validações automáticas existentes no ToolManagementService.
_DANGEROUS_PATTERNS: tuple[tuple[str, str], ...] = (
(r"\bexec\s*\(", "uso de exec() proibido em tools geradas"),
(r"\beval\s*\(", "uso de eval() proibido em tools geradas"),
(r"\b__import__\s*\(", "uso de __import__() proibido em tools geradas"),
(r"os\.system\s*\(", "chamada a os.system() proibida em tools geradas"),
(r"os\.popen\s*\(", "chamada a os.popen() proibida em tools geradas"),
(r"\bsubprocess\b", "uso de subprocess proibido em tools geradas"),
(r"from\s+app\.", "importação de app.* proibida em tools geradas"),
(r"from\s+admin_app\.", "importação de admin_app.* proibida em tools geradas"),
(r"import\s+app\b", "importação direta de app proibida em tools geradas"),
(r"import\s+admin_app\b", "importação direta de admin_app proibida em tools geradas"),
(r"\bopen\s*\(", "acesso a sistema de arquivos via open() proibido em tools geradas"),
(r"__builtins__", "acesso a __builtins__ proibido em tools geradas"),
)
# Mapeamento de tipo de parâmetro para anotação Python legível
_TYPE_ANNOTATION_MAP: dict[str, str] = {
"string": "str",
"integer": "int",
"number": "float",
"boolean": "bool",
"object": "dict",
"array": "list",
}
# Cache de modelos Vertex AI instanciados (por nome de modelo)
_MODEL_CACHE: dict[str, GenerativeModel] = {}
# Flag de controle de inicialização do SDK (evita reinit por instância)
_VERTEX_INITIALIZED: bool = False
class ToolGenerationService:
"""Gera implementações de tools via Vertex AI no contexto administrativo.
Responsabilidades:
- Construir prompt estruturado a partir dos metadados da tool
- Chamar o modelo LLM de geração (separado do modelo de atendimento)
- Extrair o bloco de código Python da resposta
- Aplicar linting de segurança antes de devolver o código
- Retornar resultado estruturado para o ToolManagementService
Não faz:
- Não persiste artefatos (responsabilidade do ToolManagementService)
- Não valida contrato nem assinatura (responsabilidade do ToolManagementService)
- Não executa o código gerado
"""
def __init__(self, settings: AdminSettings) -> None:
self.settings = settings
self._ensure_vertex_initialized()
def _ensure_vertex_initialized(self) -> None:
global _VERTEX_INITIALIZED
if _VERTEX_INITIALIZED:
return
# Reutiliza as credenciais do projeto Google já configuradas nas settings
# do admin (que leem do .env, idêntico ao product). O isolamento é nos
# parâmetros de modelo e temperatura — não na conta GCP.
try:
import os
project_id = os.environ.get("GOOGLE_PROJECT_ID", "")
location = os.environ.get("GOOGLE_LOCATION", "us-central1")
vertexai.init(project=project_id, location=location)
_VERTEX_INITIALIZED = True
logger.info(
"tool_generation_service_event=vertex_initialized project=%s location=%s",
project_id,
location,
)
except Exception as exc:
logger.warning(
"tool_generation_service_event=vertex_init_warning error=%s",
exc,
)
def _get_model(self, model_name: str) -> GenerativeModel:
model = _MODEL_CACHE.get(model_name)
if model is None:
model = GenerativeModel(model_name)
_MODEL_CACHE[model_name] = model
return model
def _build_model_sequence(self, preferred_model: str | None) -> list[str]:
"""Constrói a sequência de modelos a tentar, respeitando o preferred e o fallback."""
sequence: list[str] = []
candidates = [
preferred_model,
self.settings.admin_tool_generation_model,
self.settings.admin_tool_generation_fallback_model,
]
for candidate in candidates:
normalized = str(candidate or "").strip()
if normalized and normalized not in sequence:
sequence.append(normalized)
return sequence
def _build_generation_prompt(
self,
*,
tool_name: str,
display_name: str,
domain: str,
description: str,
business_goal: str,
parameters: list[dict],
) -> str:
"""Monta o prompt estruturado de geração enviado ao modelo.
O prompt descreve o contrato esperado, os restrições de importação,
os parâmetros e o objetivo operacional da tool.
"""
signature_parts: list[str] = []
parameter_lines: list[str] = []
for param in parameters:
name = str(param.get("name") or "").strip().lower()
if not name:
continue
param_type = str(param.get("parameter_type") or "string").strip().lower()
description_param = str(param.get("description") or "").strip()
required = bool(param.get("required", True))
annotation = _TYPE_ANNOTATION_MAP.get(param_type, "str")
if required:
signature_parts.append(f"{name}: {annotation}")
else:
signature_parts.append(f"{name}: {annotation} | None = None")
required_label = "obrigatório" if required else "opcional"
parameter_lines.append(
f" - {name} ({annotation}, {required_label}): {description_param}"
)
signature = ", ".join(signature_parts)
if signature:
full_signature = f"async def run(*, {signature}) -> dict:"
else:
full_signature = "async def run() -> dict:"
parameters_block = (
"\n".join(parameter_lines)
if parameter_lines
else " (nenhum parâmetro — a tool não recebe entrada contextual)"
)
domain_context_map = {
"vendas": (
"O bot atua em um sistema de atendimento para concessionária automotiva. "
"A tool opera no domínio de vendas: estoque de veículos, negociações, pedidos e cancelamentos."
),
"revisao": (
"O bot atua em um sistema de atendimento de oficina automotiva. "
"A tool opera no domínio de revisão: agendamentos, remarcações, listagem de serviços."
),
"locacao": (
"O bot atua em um sistema de atendimento de locadora de veículos. "
"A tool opera no domínio de locação: frota, contratos, pagamentos e devoluções."
),
"orquestracao": (
"O bot atua em um sistema de orquestração conversacional. "
"A tool opera no domínio de orquestração: controla fluxo, contexto e estado da conversa."
),
}
domain_context = domain_context_map.get(
str(domain or "").strip().lower(),
"O bot atua em um sistema de atendimento automatizado.",
)
return (
"Você é um especialista em Python que gera implementações realistas de tools "
"para um bot de atendimento.\n\n"
f"CONTEXTO DO DOMÍNIO:\n{domain_context}\n\n"
"CONTRATO OBRIGATÓRIO:\n"
"- A função deve ser assíncrona: async def run(...)\n"
"- Todos os parâmetros devem ser keyword-only (após *)\n"
"- O tipo de retorno deve ser dict (JSON-serializável)\n"
"- O módulo pode importar apenas stdlib (datetime, json, re, math, uuid, etc.)\n"
"- Proibido importar: app.*, admin_app.*, subprocess, os.system, os.popen\n"
"- Proibido usar: exec(), eval(), __import__(), open()\n\n"
"TOOL A IMPLEMENTAR:\n"
f"- Nome técnico: {tool_name}\n"
f"- Nome de exibição: {display_name}\n"
f"- Domínio: {domain}\n"
f"- Descrição funcional: {description}\n"
f"- Objetivo de negócio: {business_goal}\n\n"
f"PARÂMETROS DA TOOL:\n{parameters_block}\n\n"
f"ASSINATURA ESPERADA:\n{full_signature}\n\n"
"INSTRUÇÕES DE GERAÇÃO:\n"
"- Gere uma implementação realista que simule o comportamento esperado da tool.\n"
"- O retorno deve incluir os campos relevantes ao domínio (não apenas echo dos argumentos).\n"
"- Use dados fictícios mas verossímeis para simular a resposta operacional.\n"
"- Nenhuma explicação ou comentário fora do código. Retorne apenas o bloco Python.\n"
"- O módulo deve começar com um docstring descritivo.\n"
"- Envolva o código em ```python ... ```.\n"
)
def _extract_python_block(self, raw_response: str) -> str | None:
"""Extrai o primeiro bloco ```python ... ``` da resposta do modelo."""
normalized = str(raw_response or "").strip()
match = _PYTHON_BLOCK_RE.search(normalized)
if match:
return match.group(1).strip()
# Fallback: se não há marcador de código mas o conteúdo parece Python
if normalized.startswith("async def run") or normalized.startswith('"""'):
return normalized
return None
def _apply_safety_linting(self, source_code: str) -> list[str]:
"""Verifica padrões perigosos no código gerado antes da validação formal.
Retorna lista de issues. Lista vazia = linting passou.
"""
issues: list[str] = []
for pattern, description in _DANGEROUS_PATTERNS:
if re.search(pattern, source_code, re.MULTILINE):
issues.append(f"linting: {description}.")
return issues
async def generate_tool_source(
self,
*,
tool_name: str,
display_name: str,
domain: str,
description: str,
business_goal: str,
parameters: list[dict],
preferred_model: str | None = None,
) -> dict[str, Any]:
"""Gera o código Python da tool a partir dos metadados do draft.
Retorna um dicionário com:
- passed (bool): True se o código foi gerado e passou no linting
- generated_source_code (str | None): código Python gerado
- generation_model_used (str | None): modelo que gerou o código
- prompt_rendered (str): prompt enviado ao modelo (para auditoria)
- issues (list[str]): problemas encontrados (geração ou linting)
- elapsed_ms (float): tempo total de geração em milissegundos
"""
prompt = self._build_generation_prompt(
tool_name=tool_name,
display_name=display_name,
domain=domain,
description=description,
business_goal=business_goal,
parameters=parameters,
)
model_sequence = self._build_model_sequence(preferred_model)
generation_config = GenerationConfig(
temperature=self.settings.admin_tool_generation_temperature,
max_output_tokens=self.settings.admin_tool_generation_max_output_tokens,
)
raw_response: str | None = None
generation_model_used: str | None = None
last_error: Exception | None = None
started_at = perf_counter()
import asyncio
for model_name in model_sequence:
try:
model = self._get_model(model_name)
response = await asyncio.wait_for(
asyncio.to_thread(
model.generate_content,
prompt,
generation_config=generation_config,
),
timeout=float(self.settings.admin_tool_generation_timeout_seconds),
)
candidate = (
response.candidates[0]
if getattr(response, "candidates", None)
else None
)
content = getattr(candidate, "content", None)
parts = list(getattr(content, "parts", None) or [])
text_parts = [
getattr(part, "text", None)
for part in parts
if isinstance(getattr(part, "text", None), str)
]
raw_response = "\n".join(
t for t in text_parts if t and t.strip()
).strip() or None
if raw_response is None:
# Fallback para o atributo .text raiz
try:
raw_response = str(response.text or "").strip() or None
except (AttributeError, ValueError):
raw_response = None
generation_model_used = model_name
break
except asyncio.TimeoutError:
last_error = TimeoutError(
f"modelo '{model_name}' excedeu o timeout de "
f"{self.settings.admin_tool_generation_timeout_seconds}s para geração de tools."
)
logger.warning(
"tool_generation_service_event=timeout model=%s timeout_seconds=%s",
model_name,
self.settings.admin_tool_generation_timeout_seconds,
)
continue
except NotFound as exc:
last_error = exc
_MODEL_CACHE.pop(model_name, None)
logger.warning(
"tool_generation_service_event=model_not_found model=%s error=%s",
model_name,
exc,
)
continue
except GoogleAPIError as exc:
last_error = exc
logger.warning(
"tool_generation_service_event=api_error model=%s error=%s",
model_name,
exc,
)
continue
except Exception as exc:
last_error = exc
logger.warning(
"tool_generation_service_event=unexpected_error model=%s error=%s class=%s",
model_name,
exc,
exc.__class__.__name__,
)
continue
elapsed_ms = round((perf_counter() - started_at) * 1000, 2)
if raw_response is None or generation_model_used is None:
error_detail = str(last_error) if last_error else "nenhum modelo disponivel respondeu"
logger.error(
"tool_generation_service_event=generation_failed tool_name=%s elapsed_ms=%s error=%s",
tool_name,
elapsed_ms,
error_detail,
)
return {
"passed": False,
"generated_source_code": None,
"generation_model_used": None,
"prompt_rendered": prompt,
"issues": [f"falha na geração via LLM: {error_detail}"],
"elapsed_ms": elapsed_ms,
}
generated_source_code = self._extract_python_block(raw_response)
if generated_source_code is None:
logger.warning(
"tool_generation_service_event=no_code_block tool_name=%s model=%s elapsed_ms=%s",
tool_name,
generation_model_used,
elapsed_ms,
)
return {
"passed": False,
"generated_source_code": None,
"generation_model_used": generation_model_used,
"prompt_rendered": prompt,
"issues": ["o modelo não retornou um bloco de código Python identificável."],
"elapsed_ms": elapsed_ms,
}
linting_issues = self._apply_safety_linting(generated_source_code)
if linting_issues:
logger.warning(
"tool_generation_service_event=linting_failed tool_name=%s model=%s issues=%s elapsed_ms=%s",
tool_name,
generation_model_used,
linting_issues,
elapsed_ms,
)
return {
"passed": False,
"generated_source_code": generated_source_code,
"generation_model_used": generation_model_used,
"prompt_rendered": prompt,
"issues": linting_issues,
"elapsed_ms": elapsed_ms,
}
logger.info(
"tool_generation_service_event=generation_succeeded tool_name=%s model=%s elapsed_ms=%s",
tool_name,
generation_model_used,
elapsed_ms,
)
return {
"passed": True,
"generated_source_code": generated_source_code,
"generation_model_used": generation_model_used,
"prompt_rendered": prompt,
"issues": [],
"elapsed_ms": elapsed_ms,
}

@ -0,0 +1,266 @@
from __future__ import annotations
import threading
from concurrent.futures import ThreadPoolExecutor
from datetime import UTC, datetime
from time import perf_counter
from typing import Any
from admin_app.core.settings import AdminSettings
from admin_app.db.database import AdminSessionLocal
from admin_app.repositories import (
ToolArtifactRepository,
ToolDraftRepository,
ToolMetadataRepository,
ToolVersionRepository,
)
from admin_app.services.tool_generation_service import ToolGenerationService
class ToolGenerationWorkerService:
"""Executa a pipeline de geracao em um worker dedicado do runtime admin.
O worker abre a propria sessao administrativa e cria uma instancia isolada do
ToolManagementService dentro da thread dedicada. Assim, a geracao e as
validacoes nao compartilham a sessao SQLAlchemy da request web nem o pool de
threads padrao usado pelas rotas sync do FastAPI.
"""
_THREAD_NAME_PREFIX = "admin-tool-generation-worker"
_DEFAULT_POLL_AFTER_MS = 1200
def __init__(self, settings: AdminSettings) -> None:
self.settings = settings
self.max_workers = max(1, int(settings.admin_tool_generation_worker_max_workers))
self._executor = ThreadPoolExecutor(
max_workers=self.max_workers,
thread_name_prefix=self._THREAD_NAME_PREFIX,
)
self._lock = threading.Lock()
self._pending_jobs = 0
self._jobs: dict[str, dict[str, Any]] = {}
def shutdown(self, *, wait: bool = False) -> None:
self._executor.shutdown(wait=wait, cancel_futures=True)
def execute_generation_pipeline(
self,
*,
version_id: str,
runner_staff_account_id: int,
runner_name: str,
runner_role,
) -> dict[str, Any]:
submitted_at = datetime.now(UTC).isoformat()
with self._lock:
self._pending_jobs += 1
queued_jobs_before_submit = max(self._pending_jobs - 1, 0)
started_at = perf_counter()
future = self._executor.submit(
self._run_generation_pipeline_job,
version_id,
runner_staff_account_id,
runner_name,
runner_role,
)
try:
payload = future.result()
finally:
with self._lock:
self._pending_jobs = max(self._pending_jobs - 1, 0)
pending_jobs_after_completion = self._pending_jobs
execution = {
"mode": "dedicated_generation_worker",
"target": "admin_tool_generation_worker",
"dispatch_state": "completed",
"worker_max_workers": self.max_workers,
"worker_pending_jobs": pending_jobs_after_completion,
"queued_jobs_before_submit": queued_jobs_before_submit,
"submitted_at": submitted_at,
"started_at": submitted_at,
"completed_at": datetime.now(UTC).isoformat(),
"elapsed_ms": round((perf_counter() - started_at) * 1000, 2),
"worker_thread_name": str(payload.pop("_worker_thread_name", "")) or None,
"poll_after_ms": None,
"last_error": None,
}
enriched_payload = dict(payload)
enriched_payload["execution"] = execution
return enriched_payload
def dispatch_generation_pipeline(
self,
*,
version_id: str,
runner_staff_account_id: int,
runner_name: str,
runner_role,
) -> dict[str, Any]:
normalized_version_id = str(version_id or "").strip().lower()
if not normalized_version_id:
raise ValueError("Versao administrativa invalida para o worker de geracao.")
with self._lock:
existing_job = self._jobs.get(normalized_version_id)
if existing_job is not None and existing_job.get("dispatch_state") in {"queued", "running"}:
return self._build_dispatch_snapshot_locked(existing_job)
self._pending_jobs += 1
queued_jobs_before_submit = max(self._pending_jobs - 1, 0)
job = {
"version_id": normalized_version_id,
"dispatch_state": "queued",
"queued_jobs_before_submit": queued_jobs_before_submit,
"submitted_at": datetime.now(UTC).isoformat(),
"started_at": None,
"completed_at": None,
"elapsed_ms": None,
"worker_thread_name": None,
"last_error": None,
"result_payload": None,
}
self._jobs[normalized_version_id] = job
self._executor.submit(
self._run_generation_pipeline_job_async,
normalized_version_id,
runner_staff_account_id,
runner_name,
runner_role,
)
return self._build_dispatch_snapshot_locked(job)
def get_generation_pipeline_dispatch(self, version_id: str) -> dict[str, Any] | None:
normalized_version_id = str(version_id or "").strip().lower()
if not normalized_version_id:
return None
with self._lock:
job = self._jobs.get(normalized_version_id)
if job is None:
return None
return self._build_dispatch_snapshot_locked(job)
def _run_generation_pipeline_job_async(
self,
version_id: str,
runner_staff_account_id: int,
runner_name: str,
runner_role,
) -> None:
self._mark_job_running(version_id)
try:
payload = self._run_generation_pipeline_job(
version_id,
runner_staff_account_id,
runner_name,
runner_role,
)
except Exception as exc:
self._mark_job_failed(version_id, exc)
return
self._mark_job_completed(version_id, payload)
def _mark_job_running(self, version_id: str) -> None:
with self._lock:
job = self._jobs.get(version_id)
if job is None:
return
job["dispatch_state"] = "running"
job["started_at"] = datetime.now(UTC).isoformat()
job["worker_thread_name"] = threading.current_thread().name
def _mark_job_completed(self, version_id: str, payload: dict[str, Any]) -> None:
with self._lock:
job = self._jobs.get(version_id)
if job is None:
return
completed_at = datetime.now(UTC).isoformat()
started_reference = self._parse_job_timestamp(job.get("started_at")) or self._parse_job_timestamp(job.get("submitted_at"))
elapsed_ms = None
if started_reference is not None:
elapsed_ms = round((datetime.now(UTC) - started_reference).total_seconds() * 1000, 2)
job["dispatch_state"] = "completed"
job["completed_at"] = completed_at
job["elapsed_ms"] = elapsed_ms
job["result_payload"] = dict(payload)
job["last_error"] = None
self._pending_jobs = max(self._pending_jobs - 1, 0)
def _mark_job_failed(self, version_id: str, exc: Exception) -> None:
with self._lock:
job = self._jobs.get(version_id)
if job is None:
return
completed_at = datetime.now(UTC).isoformat()
started_reference = self._parse_job_timestamp(job.get("started_at")) or self._parse_job_timestamp(job.get("submitted_at"))
elapsed_ms = None
if started_reference is not None:
elapsed_ms = round((datetime.now(UTC) - started_reference).total_seconds() * 1000, 2)
job["dispatch_state"] = "failed"
job["completed_at"] = completed_at
job["elapsed_ms"] = elapsed_ms
job["last_error"] = f"{type(exc).__name__}: {exc}"
self._pending_jobs = max(self._pending_jobs - 1, 0)
def _build_dispatch_snapshot_locked(self, job: dict[str, Any]) -> dict[str, Any]:
dispatch_state = str(job.get("dispatch_state") or "queued")
snapshot = {
"mode": "dedicated_generation_worker_async",
"target": "admin_tool_generation_worker",
"dispatch_state": dispatch_state,
"worker_max_workers": self.max_workers,
"worker_pending_jobs": self._pending_jobs,
"queued_jobs_before_submit": job.get("queued_jobs_before_submit", 0),
"submitted_at": job.get("submitted_at"),
"started_at": job.get("started_at"),
"completed_at": job.get("completed_at"),
"elapsed_ms": job.get("elapsed_ms"),
"worker_thread_name": job.get("worker_thread_name"),
"poll_after_ms": self._DEFAULT_POLL_AFTER_MS if dispatch_state in {"queued", "running"} else None,
"last_error": job.get("last_error"),
}
result_payload = job.get("result_payload")
if isinstance(result_payload, dict):
snapshot["result_payload"] = dict(result_payload)
return snapshot
@staticmethod
def _parse_job_timestamp(value: Any) -> datetime | None:
if not isinstance(value, str) or not value.strip():
return None
try:
return datetime.fromisoformat(value)
except ValueError:
return None
def _run_generation_pipeline_job(
self,
version_id: str,
runner_staff_account_id: int,
runner_name: str,
runner_role,
) -> dict[str, Any]:
from admin_app.services.tool_management_service import ToolManagementService
db = AdminSessionLocal()
try:
service = ToolManagementService(
settings=self.settings,
draft_repository=ToolDraftRepository(db),
version_repository=ToolVersionRepository(db),
metadata_repository=ToolMetadataRepository(db),
artifact_repository=ToolArtifactRepository(db),
tool_generation_service=ToolGenerationService(self.settings),
)
payload = service.run_generation_pipeline(
version_id,
runner_staff_account_id=runner_staff_account_id,
runner_name=runner_name,
runner_role=runner_role,
)
payload = dict(payload)
payload["_worker_thread_name"] = threading.current_thread().name
return payload
finally:
db.close()

@ -1,6 +1,7 @@
from __future__ import annotations
import asyncio
import hashlib
import inspect
import json
import re
@ -36,13 +37,18 @@ from shared.contracts import (
ToolParameterContract,
ToolParameterType,
ToolPublicationEnvelope,
ToolRuntimePublicationManifest,
build_generated_tool_file_path,
build_generated_tool_module_name,
build_generated_tool_module_path,
get_generated_tool_publication_manifest_path,
get_generated_tools_runtime_dir,
normalize_staff_role,
role_has_permission,
)
_PARAMETER_TYPE_DESCRIPTIONS = {
ToolParameterType.STRING: "Texto livre, codigos e identificadores.",
ToolParameterType.INTEGER: "Valores inteiros para limites, anos e contagens.",
@ -108,12 +114,154 @@ class ToolManagementService:
version_repository: ToolVersionRepository | None = None,
metadata_repository: ToolMetadataRepository | None = None,
artifact_repository: ToolArtifactRepository | None = None,
tool_generation_service=None, # ToolGenerationService | None
tool_generation_worker_service=None, # ToolGenerationWorkerService | None
):
self.settings = settings
self.draft_repository = draft_repository
self.version_repository = version_repository
self.metadata_repository = metadata_repository
self.artifact_repository = artifact_repository
# ServiÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢â¬Â ÃƒÂ¢Ã¢â€šÂ¬Ã¢â€žÂ¢ÃƒÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã ÃÆÃ¢Ã¢ââ¬Å¡Ã¬Ã¢ââ¬Å¾Ã¢ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢Ã¢ââ¬Å¡Ã¬Ã…Ã¡ÃÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã…¡ÃÆÃ¢â¬Å¡Ãƒâ€šÃ§o isolado de geraÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢â¬Â ÃƒÂ¢Ã¢â€šÂ¬Ã¢â€žÂ¢ÃƒÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã ÃÆÃ¢Ã¢ââ¬Å¡Ã¬Ã¢ââ¬Å¾Ã¢ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢Ã¢ââ¬Å¡Ã¬Ã…Ã¡ÃÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã…¡ÃÆÃ¢â¬Å¡Ãƒâ€šÃ§ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢â¬Â ÃƒÂ¢Ã¢â€šÂ¬Ã¢â€žÂ¢ÃƒÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã ÃÆÃ¢Ã¢ââ¬Å¡Ã¬Ã¢ââ¬Å¾Ã¢ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢Ã¢ââ¬Å¡Ã¬Ã…Ã¡ÃÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã…¡ÃÆÃ¢â¬Å¡Ãƒâ€šÃ£o via LLM (runtime tool_generation, separado do atendimento).
# Pode ser None para manter compatibilidade retroativa (usa stub de validaÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢â¬Â ÃƒÂ¢Ã¢â€šÂ¬Ã¢â€žÂ¢ÃƒÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã ÃÆÃ¢Ã¢ââ¬Å¡Ã¬Ã¢ââ¬Å¾Ã¢ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢Ã¢ââ¬Å¡Ã¬Ã…Ã¡ÃÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã…¡ÃÆÃ¢â¬Å¡Ãƒâ€šÃ§ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢â¬Â ÃƒÂ¢Ã¢â€šÂ¬Ã¢â€žÂ¢ÃƒÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã ÃÆÃ¢Ã¢ââ¬Å¡Ã¬Ã¢ââ¬Å¾Ã¢ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢Ã¢ââ¬Å¡Ã¬Ã…Ã¡ÃÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã…¡ÃÆÃ¢â¬Å¡Ãƒâ€šÃ£o).
self.tool_generation_service = tool_generation_service
# Worker dedicado para executar a pipeline em thread prÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢â¬Â ÃƒÂ¢Ã¢â€šÂ¬Ã¢â€žÂ¢ÃƒÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã ÃÆÃ¢Ã¢ââ¬Å¡Ã¬Ã¢ââ¬Å¾Ã¢ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢Ã¢ââ¬Å¡Ã¬Ã…Ã¡ÃÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã…¡ÃÆÃ¢â¬Å¡Ãƒâ€šÃ³pria do admin.
self.tool_generation_worker_service = tool_generation_worker_service
def run_generation_pipeline_in_worker(
self,
version_id: str,
*,
runner_staff_account_id: int,
runner_name: str,
runner_role: StaffRole | str,
) -> dict:
normalized_role = normalize_staff_role(runner_role)
if not role_has_permission(normalized_role, AdminPermission.MANAGE_TOOL_DRAFTS):
raise PermissionError(
f"Papel '{normalized_role.value}' sem permissao administrativa '{AdminPermission.MANAGE_TOOL_DRAFTS.value}'."
)
if self.tool_generation_worker_service is None:
payload = dict(
self.run_generation_pipeline(
version_id,
runner_staff_account_id=runner_staff_account_id,
runner_name=runner_name,
runner_role=runner_role,
)
)
payload["execution"] = {
"mode": "inline_admin_service",
"target": "admin_inline_generation_pipeline",
"dispatch_state": "completed",
"worker_max_workers": None,
"worker_pending_jobs": None,
"queued_jobs_before_submit": 0,
"submitted_at": None,
"started_at": None,
"completed_at": None,
"elapsed_ms": None,
"worker_thread_name": None,
"poll_after_ms": None,
"last_error": None,
}
return payload
if self.tool_generation_worker_service is not None and (
self.draft_repository is None
or self.version_repository is None
or self.metadata_repository is None
):
raise RuntimeError(
"Pipeline de geracao ainda nao esta completamente conectado ao armazenamento administrativo."
)
normalized_version_id = str(version_id or "").strip().lower()
version = self.version_repository.get_by_version_id(normalized_version_id)
if version is None:
raise LookupError("Versao administrativa nao encontrada.")
latest_versions_for_tool = self.version_repository.list_versions(tool_name=version.tool_name)
if latest_versions_for_tool and latest_versions_for_tool[0].version_id != version.version_id:
raise ValueError(
"Somente a versao mais recente da tool pode seguir pelo pipeline de geracao."
)
if version.status not in {ToolLifecycleStatus.DRAFT, ToolLifecycleStatus.FAILED}:
raise ValueError(
f"A pipeline de geracao exige status em (draft, failed), mas a versao esta em '{version.status.value}'."
)
draft = self.draft_repository.get_by_tool_name(version.tool_name)
if draft is None:
raise RuntimeError("Draft raiz da tool nao encontrado para a pipeline de geracao.")
metadata = self.metadata_repository.get_by_tool_version_id(version.id)
if metadata is None:
raise RuntimeError("Metadados persistidos da versao nao encontrados para a pipeline de geracao.")
execution = self.tool_generation_worker_service.dispatch_generation_pipeline(
version_id=version_id,
runner_staff_account_id=runner_staff_account_id,
runner_name=runner_name,
runner_role=runner_role,
)
result_payload = execution.pop("result_payload", None)
if execution.get("dispatch_state") == "completed" and isinstance(result_payload, dict):
payload = dict(result_payload)
payload["execution"] = execution
return payload
pipeline_snapshot = self._build_pipeline_snapshot(version.status)
dispatch_state = str(execution.get("dispatch_state") or "queued").strip().lower()
message_by_dispatch_state = {
"queued": "Pipeline enfileirada no worker dedicado do admin. A request foi liberada sem esperar a geracao terminar.",
"running": "Pipeline em execucao no worker dedicado do admin. A request foi liberada sem bloquear o runtime administrativo.",
"failed": "O worker dedicado falhou antes de concluir a pipeline. Revise o erro registrado e tente novamente.",
}
next_steps_by_dispatch_state = {
"queued": [
"Acompanhe a fila de revisao para ver quando a versao sair de draft e entrar em generated.",
"Enquanto a job estiver na fila dedicada, o atendimento continua desacoplado da carga de geracao.",
],
"running": [
"A pipeline ja esta sendo executada em background no worker dedicado do admin.",
"Atualize a leitura da fila ou do detalhe para acompanhar a transicao da versao quando o worker concluir.",
],
"failed": [
"Revise o erro do worker dedicado e reenvie a pipeline quando a causa for corrigida.",
"Enquanto a versao permanecer sem uma geracao concluida, ela continua fora da revisao humana e da ativacao.",
],
}
return {
"message": message_by_dispatch_state.get(
dispatch_state,
"Pipeline encaminhada para o worker dedicado do admin.",
),
"version_id": version.version_id,
"tool_name": version.tool_name,
"version_number": version.version_number,
"status": version.status,
"current_step": pipeline_snapshot["current_step"],
"steps": pipeline_snapshot["steps"],
"queue_entry": self._serialize_review_queue_entry(version, worker_execution=execution),
"automated_validations": [],
"execution": execution,
"next_steps": next_steps_by_dispatch_state.get(
dispatch_state,
["Atualize a fila administrativa para acompanhar a pipeline dedicada."],
),
}
def _get_generation_pipeline_worker_execution(self, version_id: str) -> dict | None:
if self.tool_generation_worker_service is None:
return None
getter = getattr(self.tool_generation_worker_service, "get_generation_pipeline_dispatch", None)
if not callable(getter):
return None
execution = getter(version_id)
if not isinstance(execution, dict):
return None
return dict(execution)
def _resolve_repository_session(self) -> Session | None:
repository_sessions = [
@ -390,6 +538,18 @@ class ToolManagementService:
automated_validation = self._extract_latest_automated_validation(version.id)
generated_source_code = str(validation_payload.get("generated_source_code") or "").strip()
worker_execution = self._get_generation_pipeline_worker_execution(version.version_id)
automated_validation_summary = automated_validation.get("summary")
if not generated_source_code and isinstance(worker_execution, dict):
dispatch_state = str(worker_execution.get("dispatch_state") or "").strip().lower()
if dispatch_state == "queued":
automated_validation_summary = "Pipeline enfileirada no worker dedicado aguardando execucao."
elif dispatch_state == "running":
automated_validation_summary = "Pipeline em execucao no worker dedicado do admin."
elif dispatch_state == "failed":
automated_validation_summary = worker_execution.get("last_error") or (
"O worker dedicado falhou antes de concluir a pipeline."
)
return {
"version_id": version.version_id,
@ -403,15 +563,20 @@ class ToolManagementService:
"business_goal": version.business_goal,
"owner_name": version.owner_display_name,
"parameters": self._serialize_parameters_for_response(metadata.parameters_json),
"queue_entry": self._serialize_review_queue_entry(version),
"queue_entry": self._serialize_review_queue_entry(version, worker_execution=worker_execution),
"automated_validations": list(validation_payload.get("automated_checks") or []),
"automated_validation_summary": automated_validation.get("summary"),
"automated_validation_summary": automated_validation_summary,
"generated_module": build_generated_tool_module_name(version.tool_name),
"generated_callable": GENERATED_TOOL_ENTRYPOINT,
"generated_source_code": generated_source_code,
"execution": worker_execution,
"human_gate": self._build_human_review_gate(version),
"decision_history": self._list_governance_history_entries(version.id),
"next_steps": self._build_review_detail_next_steps(version, bool(generated_source_code)),
"next_steps": self._build_review_detail_next_steps(
version,
bool(generated_source_code),
worker_execution=worker_execution,
),
}
def build_publications_payload(self) -> dict:
@ -483,11 +648,95 @@ class ToolManagementService:
if metadata is None:
raise RuntimeError("Metadados persistidos da versao nao encontrados para a pipeline de geracao.")
# ---- Fase 7: GeraÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢â¬Â ÃƒÂ¢Ã¢â€šÂ¬Ã¢â€žÂ¢ÃƒÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã ÃÆÃ¢Ã¢ââ¬Å¡Ã¬Ã¢ââ¬Å¾Ã¢ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢Ã¢ââ¬Å¡Ã¬Ã…Ã¡ÃÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã…¡ÃÆÃ¢â¬Å¡Ãƒâ€šÃ§ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢â¬Â ÃƒÂ¢Ã¢â€šÂ¬Ã¢â€žÂ¢ÃƒÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã ÃÆÃ¢Ã¢ââ¬Å¡Ã¬Ã¢ââ¬Å¾Ã¢ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢Ã¢ââ¬Å¡Ã¬Ã…Ã¡ÃÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã…¡ÃÆÃ¢â¬Å¡Ãƒâ€šÃ£o de cÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢â¬Â ÃƒÂ¢Ã¢â€šÂ¬Ã¢â€žÂ¢ÃƒÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã ÃÆÃ¢Ã¢ââ¬Å¡Ã¬Ã¢ââ¬Å¾Ã¢ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢Ã¢ââ¬Å¡Ã¬Ã…Ã¡ÃÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã…¡ÃÆÃ¢â¬Å¡Ãƒâ€šÃ³digo via LLM isolado do runtime de atendimento ----
# O tool_generation_service é None em modo de compatibilidade (usa stub).
# Quando presente, gera cÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢â¬Â ÃƒÂ¢Ã¢â€šÂ¬Ã¢â€žÂ¢ÃƒÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã ÃÆÃ¢Ã¢ââ¬Å¡Ã¬Ã¢ââ¬Å¾Ã¢ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢Ã¢ââ¬Å¡Ã¬Ã…Ã¡ÃÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã…¡ÃÆÃ¢â¬Å¡Ãƒâ€šÃ³digo real usando o modelo do runtime de geraÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢â¬Â ÃƒÂ¢Ã¢â€šÂ¬Ã¢â€žÂ¢ÃƒÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã ÃÆÃ¢Ã¢ââ¬Å¡Ã¬Ã¢ââ¬Å¾Ã¢ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢Ã¢ââ¬Å¡Ã¬Ã…Ã¡ÃÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã…¡ÃÆÃ¢â¬Å¡Ãƒâ€šÃ§ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢â¬Â ÃƒÂ¢Ã¢â€šÂ¬Ã¢â€žÂ¢ÃƒÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã ÃÆÃ¢Ã¢ââ¬Å¡Ã¬Ã¢ââ¬Å¾Ã¢ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢Ã¢ââ¬Å¡Ã¬Ã…Ã¡ÃÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã…¡ÃÆÃ¢â¬Å¡Ãƒâ€šÃ£o.
llm_generated_source: str | None = None
llm_generation_model: str | None = None
llm_generation_issues: list[str] = []
if self.tool_generation_service is not None:
preferred_model = str(version.generation_model or "").strip() or None
generation_result = asyncio.run(
self.tool_generation_service.generate_tool_source(
tool_name=version.tool_name,
display_name=metadata.display_name,
domain=metadata.domain,
description=metadata.description,
business_goal=version.business_goal,
parameters=list(metadata.parameters_json or []),
preferred_model=preferred_model,
)
)
llm_generated_source = generation_result.get("generated_source_code")
llm_generation_model = generation_result.get("generation_model_used")
llm_generation_issues = list(generation_result.get("issues") or [])
# ---- fim Fase 7 ----
repository_session = self._resolve_repository_session()
atomic_write_options = {"commit": False} if repository_session is not None else {}
artifact_commit = False if repository_session is not None else None
automated_validation_result: dict | None = None
# Se o LLM falhou (issues presentes e sem cÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢â¬Â ÃƒÂ¢Ã¢â€šÂ¬Ã¢â€žÂ¢ÃƒÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã ÃÆÃ¢Ã¢ââ¬Å¡Ã¬Ã¢ââ¬Å¾Ã¢ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢Ã¢ââ¬Å¡Ã¬Ã…Ã¡ÃÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã…¡ÃÆÃ¢â¬Å¡Ãƒâ€šÃ³digo), marca FAILED imediatamente.
if llm_generation_issues and not llm_generated_source:
try:
self.version_repository.update_status(
version,
status=ToolLifecycleStatus.FAILED,
**atomic_write_options,
)
self.metadata_repository.update_status(
metadata,
status=ToolLifecycleStatus.FAILED,
**atomic_write_options,
)
self.draft_repository.update_status(
draft,
status=ToolLifecycleStatus.FAILED,
**atomic_write_options,
)
self._persist_generation_pipeline_artifact(
draft=draft,
version=version,
actor_staff_account_id=runner_staff_account_id,
actor_name=runner_name,
actor_role=normalized_role,
llm_generated_source=None,
llm_generation_model=llm_generation_model,
llm_generation_issues=llm_generation_issues,
commit=artifact_commit,
)
if repository_session is not None:
self._commit_repository_session(
repository_session,
draft=draft,
version=version,
)
except Exception:
if repository_session is not None:
repository_session.rollback()
raise
pipeline_snapshot = self._build_pipeline_snapshot(ToolLifecycleStatus.FAILED)
return {
"message": (
"A geraÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢â¬Â ÃƒÂ¢Ã¢â€šÂ¬Ã¢â€žÂ¢ÃƒÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã ÃÆÃ¢Ã¢ââ¬Å¡Ã¬Ã¢ââ¬Å¾Ã¢ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢Ã¢ââ¬Å¡Ã¬Ã…Ã¡ÃÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã…¡ÃÆÃ¢â¬Å¡Ãƒâ€šÃ§ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢â¬Â ÃƒÂ¢Ã¢â€šÂ¬Ã¢â€žÂ¢ÃƒÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã ÃÆÃ¢Ã¢ââ¬Å¡Ã¬Ã¢ââ¬Å¾Ã¢ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢Ã¢ââ¬Å¡Ã¬Ã…Ã¡ÃÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã…¡ÃÆÃ¢â¬Å¡Ãƒâ€šÃ£o via LLM falhou antes das validaÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢â¬Â ÃƒÂ¢Ã¢â€šÂ¬Ã¢â€žÂ¢ÃƒÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã ÃÆÃ¢Ã¢ââ¬Å¡Ã¬Ã¢ââ¬Å¾Ã¢ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢Ã¢ââ¬Å¡Ã¬Ã…Ã¡ÃÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã…¡ÃÆÃ¢â¬Å¡Ãƒâ€šÃ§ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢â¬Â ÃƒÂ¢Ã¢â€šÂ¬Ã¢â€žÂ¢ÃƒÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã ÃÆÃ¢Ã¢ââ¬Å¡Ã¬Ã¢ââ¬Å¾Ã¢ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢Ã¢ââ¬Å¡Ã¬Ã…Ã¡ÃÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã…¡ÃÆÃ¢â¬Å¡Ãƒâ€šÃµes automÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢â¬Â ÃƒÂ¢Ã¢â€šÂ¬Ã¢â€žÂ¢ÃƒÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã ÃÆÃ¢Ã¢ââ¬Å¡Ã¬Ã¢ââ¬Å¾Ã¢ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢Ã¢ââ¬Å¡Ã¬Ã…Ã¡ÃÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã…¡ÃÆÃ¢â¬Å¡Ãƒâ€šÃ¡ticas. "
"Verifique os issues de geraÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢â¬Â ÃƒÂ¢Ã¢â€šÂ¬Ã¢â€žÂ¢ÃƒÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã ÃÆÃ¢Ã¢ââ¬Å¡Ã¬Ã¢ââ¬Å¾Ã¢ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢Ã¢ââ¬Å¡Ã¬Ã…Ã¡ÃÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã…¡ÃÆÃ¢â¬Å¡Ãƒâ€šÃ§ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢â¬Â ÃƒÂ¢Ã¢â€šÂ¬Ã¢â€žÂ¢ÃƒÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã ÃÆÃ¢Ã¢ââ¬Å¡Ã¬Ã¢ââ¬Å¾Ã¢ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢Ã¢ââ¬Å¡Ã¬Ã…Ã¡ÃÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã…¡ÃÆÃ¢â¬Å¡Ãƒâ€šÃ£o e execute a pipeline novamente."
),
"version_id": version.version_id,
"tool_name": version.tool_name,
"version_number": version.version_number,
"status": ToolLifecycleStatus.FAILED,
"current_step": pipeline_snapshot["current_step"],
"steps": pipeline_snapshot["steps"],
"queue_entry": self._serialize_review_queue_entry(version),
"automated_validations": [],
"next_steps": [
"Verifique o modelo de geraÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢â¬Â ÃƒÂ¢Ã¢â€šÂ¬Ã¢â€žÂ¢ÃƒÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã ÃÆÃ¢Ã¢ââ¬Å¡Ã¬Ã¢ââ¬Å¾Ã¢ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢Ã¢ââ¬Å¡Ã¬Ã…Ã¡ÃÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã…¡ÃÆÃ¢â¬Å¡Ãƒâ€šÃ§ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢â¬Â ÃƒÂ¢Ã¢â€šÂ¬Ã¢â€žÂ¢ÃƒÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã ÃÆÃ¢Ã¢ââ¬Å¡Ã¬Ã¢ââ¬Å¾Ã¢ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢Ã¢ââ¬Å¡Ã¬Ã…Ã¡ÃÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã…¡ÃÆÃ¢â¬Å¡Ãƒâ€šÃ£o configurado e se o Vertex AI estÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢â¬Â ÃƒÂ¢Ã¢â€šÂ¬Ã¢â€žÂ¢ÃƒÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã ÃÆÃ¢Ã¢ââ¬Å¡Ã¬Ã¢ââ¬Å¾Ã¢ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢Ã¢ââ¬Å¡Ã¬Ã…Ã¡ÃÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã…¡ÃÆÃ¢â¬Å¡Ãƒâ€šÃ¡ acessÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢â¬Â ÃƒÂ¢Ã¢â€šÂ¬Ã¢â€žÂ¢ÃƒÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã ÃÆÃ¢Ã¢ââ¬Å¡Ã¬Ã¢ââ¬Å¾Ã¢ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢Ã¢ââ¬Å¡Ã¬Ã…Ã¡ÃÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã…¡ÃÆÃ¢â¬Å¡Ãƒâ€šÃ­vel.",
*[f"Issue: {issue}" for issue in llm_generation_issues],
],
}
try:
self._persist_generation_pipeline_artifact(
draft=draft,
@ -495,6 +744,9 @@ class ToolManagementService:
actor_staff_account_id=runner_staff_account_id,
actor_name=runner_name,
actor_role=normalized_role,
llm_generated_source=llm_generated_source,
llm_generation_model=llm_generation_model,
llm_generation_issues=llm_generation_issues,
commit=artifact_commit,
)
automated_validation_result = self._execute_automated_contract_validation(
@ -503,6 +755,7 @@ class ToolManagementService:
metadata=metadata,
actor_staff_account_id=runner_staff_account_id,
actor_name=runner_name,
llm_generated_source=llm_generated_source,
commit=artifact_commit,
)
pipeline_status = (
@ -820,6 +1073,7 @@ class ToolManagementService:
repository_session.rollback()
raise
self._synchronize_product_runtime_publication_snapshot()
return {
"message": (
"Rollback executado com sucesso e a versao arquivada voltou ao catalogo governado como ativa."
@ -944,6 +1198,8 @@ class ToolManagementService:
repository_session.rollback()
raise
if target_status in {ToolLifecycleStatus.ACTIVE, ToolLifecycleStatus.ARCHIVED}:
self._synchronize_product_runtime_publication_snapshot()
queue_entry = None
publication = None
if target_status == ToolLifecycleStatus.ACTIVE:
@ -975,6 +1231,7 @@ class ToolManagementService:
required_parameter_count = sum(1 for parameter in normalized["parameters"] if parameter["required"])
summary = self._build_draft_summary(normalized)
stored_parameters = self._serialize_parameters_for_storage(normalized["parameters"])
generation_model = normalized["generation_model"]
submission_policy = self._build_submission_policy(submitter_role=owner_role)
if self.draft_repository is None:
@ -998,6 +1255,7 @@ class ToolManagementService:
"version_count": version_count,
"parameter_count": len(normalized["parameters"]),
"required_parameter_count": required_parameter_count,
"generation_model": generation_model,
"requires_director_approval": True,
"owner_name": owner_name,
"parameters": normalized["parameters"],
@ -1035,6 +1293,7 @@ class ToolManagementService:
required_parameter_count=required_parameter_count,
current_version_number=next_version_number,
version_count=next_version_count,
generation_model=generation_model,
owner_staff_account_id=owner_staff_account_id,
owner_display_name=owner_display_name,
requires_director_approval=True,
@ -1052,6 +1311,7 @@ class ToolManagementService:
required_parameter_count=required_parameter_count,
current_version_number=next_version_number,
version_count=next_version_count,
generation_model=generation_model,
owner_staff_account_id=owner_staff_account_id,
owner_display_name=owner_display_name,
requires_director_approval=True,
@ -1069,6 +1329,7 @@ class ToolManagementService:
business_goal=normalized["business_goal"],
parameters_json=stored_parameters,
required_parameter_count=required_parameter_count,
generation_model=generation_model,
owner_staff_account_id=owner_staff_account_id,
owner_display_name=owner_display_name,
status=ToolLifecycleStatus.DRAFT,
@ -1100,6 +1361,7 @@ class ToolManagementService:
warnings=warnings,
stored_parameters=stored_parameters,
required_parameter_count=required_parameter_count,
generation_model=generation_model,
owner_staff_account_id=owner_staff_account_id,
owner_name=owner_display_name,
commit=artifact_commit,
@ -1140,6 +1402,7 @@ class ToolManagementService:
warnings = self._build_intake_warnings(normalized)
required_parameter_count = sum(1 for parameter in normalized["parameters"] if parameter["required"])
summary = self._build_draft_summary(normalized)
generation_model = normalized["generation_model"]
submission_policy = self._build_submission_policy(submitter_role=owner_role)
existing_draft = None
if self.draft_repository is not None:
@ -1163,6 +1426,7 @@ class ToolManagementService:
"version_count": version_count,
"parameter_count": len(normalized["parameters"]),
"required_parameter_count": required_parameter_count,
"generation_model": generation_model,
"requires_director_approval": True,
"owner_name": owner_name,
"parameters": normalized["parameters"],
@ -1327,6 +1591,7 @@ class ToolManagementService:
warnings: list[str],
stored_parameters: list[dict],
required_parameter_count: int,
generation_model: str | None = None,
owner_staff_account_id: int,
owner_name: str,
commit: bool | None = None,
@ -1341,6 +1606,7 @@ class ToolManagementService:
version=version,
summary=summary,
stored_parameters=stored_parameters,
generation_model=generation_model,
)
validation_payload = self._build_validation_artifact_payload(
draft=draft,
@ -1386,6 +1652,7 @@ class ToolManagementService:
version: ToolVersion,
summary: str,
stored_parameters: list[dict],
generation_model: str | None = None,
) -> dict:
return {
"source": "admin_draft_intake",
@ -1398,6 +1665,7 @@ class ToolManagementService:
"business_goal": draft.business_goal,
"description": draft.description,
"summary": summary,
"generation_model": generation_model,
"parameters": list(stored_parameters),
"requires_director_approval": draft.requires_director_approval,
"target_package": GENERATED_TOOLS_PACKAGE,
@ -1444,6 +1712,7 @@ class ToolManagementService:
metadata: ToolMetadata,
actor_staff_account_id: int,
actor_name: str,
llm_generated_source: str | None = None,
commit: bool | None = None,
) -> dict:
previous_validation_payload = {}
@ -1467,12 +1736,14 @@ class ToolManagementService:
version=version,
metadata=metadata,
signature_schema_blueprint=signature_schema_blueprint,
llm_generated_source=llm_generated_source,
)
smoke_test_result = self._run_generated_tool_minimal_smoke_tests(
version=version,
metadata=metadata,
signature_schema_blueprint=signature_schema_blueprint,
import_loading_result=import_loading_result,
llm_generated_source=llm_generated_source,
)
automated_checks = [
{
@ -1697,6 +1968,7 @@ class ToolManagementService:
version: ToolVersion,
metadata: ToolMetadata,
signature_schema_blueprint: dict,
llm_generated_source: str | None = None,
) -> dict:
module_name = build_generated_tool_module_name(version.tool_name)
module_path = build_generated_tool_module_path(version.tool_name)
@ -1705,6 +1977,7 @@ class ToolManagementService:
version=version,
metadata=metadata,
signature_schema_blueprint=signature_schema_blueprint,
pregenerated_source=llm_generated_source,
)
issues: list[str] = []
handler = None
@ -1768,6 +2041,7 @@ class ToolManagementService:
version: ToolVersion,
metadata: ToolMetadata,
signature_schema_blueprint: dict,
llm_generated_source: str | None = None,
) -> dict:
if signature_schema_blueprint["issues"]:
return {
@ -1781,6 +2055,7 @@ class ToolManagementService:
version=version,
metadata=metadata,
signature_schema_blueprint=signature_schema_blueprint,
pregenerated_source=llm_generated_source,
),
"issues": [
"generated import/loading validation skipped because the signature/schema blueprint is invalid."
@ -1791,6 +2066,7 @@ class ToolManagementService:
version=version,
metadata=metadata,
signature_schema_blueprint=signature_schema_blueprint,
llm_generated_source=llm_generated_source,
)
issues = list(load_result["issues"])
handler = load_result["handler"]
@ -1832,6 +2108,7 @@ class ToolManagementService:
metadata: ToolMetadata,
signature_schema_blueprint: dict,
import_loading_result: dict,
llm_generated_source: str | None = None,
) -> dict:
if signature_schema_blueprint["issues"]:
return {
@ -1864,6 +2141,7 @@ class ToolManagementService:
version=version,
metadata=metadata,
signature_schema_blueprint=signature_schema_blueprint,
llm_generated_source=llm_generated_source,
)
issues = list(load_result["issues"])
handler = load_result["handler"]
@ -1958,7 +2236,14 @@ class ToolManagementService:
version: ToolVersion,
metadata: ToolMetadata,
signature_schema_blueprint: dict,
pregenerated_source: str | None = None,
) -> str:
# Fase 7: quando o LLM gerou cÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢â¬Â ÃƒÂ¢Ã¢â€šÂ¬Ã¢â€žÂ¢ÃƒÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã ÃÆÃ¢Ã¢ââ¬Å¡Ã¬Ã¢ââ¬Å¾Ã¢ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢Ã¢ââ¬Å¡Ã¬Ã…Ã¡ÃÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã…¡ÃÆÃ¢â¬Å¡Ãƒâ€šÃ³digo real, usa diretamente.
# O smoke test e o import loading validarÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢â¬Â ÃƒÂ¢Ã¢â€šÂ¬Ã¢â€žÂ¢ÃƒÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã ÃÆÃ¢Ã¢ââ¬Å¡Ã¬Ã¢ââ¬Å¾Ã¢ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢Ã¢ââ¬Å¡Ã¬Ã…Ã¡ÃÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã…¡ÃÆÃ¢â¬Å¡Ãƒâ€šÃ£o o cÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢â¬Â ÃƒÂ¢Ã¢â€šÂ¬Ã¢â€žÂ¢ÃƒÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã ÃÆÃ¢Ã¢ââ¬Å¡Ã¬Ã¢ââ¬Å¾Ã¢ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢Ã¢ââ¬Å¡Ã¬Ã…Ã¡ÃÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã…¡ÃÆÃ¢â¬Å¡Ãƒâ€šÃ³digo LLM com o mesmo rigor do stub.
if pregenerated_source:
return pregenerated_source
# Fallback: stub de validaÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢â¬Â ÃƒÂ¢Ã¢â€šÂ¬Ã¢â€žÂ¢ÃƒÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã ÃÆÃ¢Ã¢ââ¬Å¡Ã¬Ã¢ââ¬Å¾Ã¢ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢Ã¢ââ¬Å¡Ã¬Ã…Ã¡ÃÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã…¡ÃÆÃ¢â¬Å¡Ãƒâ€šÃ§ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢â¬Â ÃƒÂ¢Ã¢â€šÂ¬Ã¢â€žÂ¢ÃƒÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã ÃÆÃ¢Ã¢ââ¬Å¡Ã¬Ã¢ââ¬Å¾Ã¢ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢Ã¢ââ¬Å¡Ã¬Ã…Ã¡ÃÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã…¡ÃÆÃ¢â¬Å¡Ãƒâ€šÃ£o estrutural (sem tool_generation_service ou se gerou None).
serialized_parameters = self._serialize_parameters_for_response(metadata.parameters_json)
if serialized_parameters:
signature_tokens = []
@ -2067,6 +2352,102 @@ class ToolManagementService:
emitted_at=datetime.now(UTC),
)
def _get_version_by_tool_version_id(self, tool_version_id: int) -> ToolVersion | None:
if self.version_repository is None:
return None
for version in self.version_repository.list_versions():
if version.id == tool_version_id:
return version
return None
def _get_generated_source_code_for_version(self, tool_version_id: int) -> str:
if self.artifact_repository is None:
raise RuntimeError(
"Nao foi possivel sincronizar o runtime do product sem os artefatos de validacao da tool."
)
validation_artifact = self.artifact_repository.get_by_tool_version_and_kind(
tool_version_id,
ToolArtifactKind.VALIDATION_REPORT,
)
if validation_artifact is None:
raise RuntimeError("Artefato de validacao nao encontrado para sincronizar a tool publicada no product.")
generated_source_code = str((validation_artifact.payload_json or {}).get("generated_source_code") or "").strip()
if not generated_source_code:
raise RuntimeError("O codigo gerado da tool publicada nao foi encontrado para sincronizacao do runtime.")
return generated_source_code
def _build_published_runtime_envelope(
self,
*,
version: ToolVersion,
metadata: ToolMetadata,
generated_source_code: str,
) -> ToolPublicationEnvelope:
generated_envelope = self._build_generated_publication_envelope(version=version, metadata=metadata)
published_tool = generated_envelope.published_tool.model_copy(
update={
"status": ToolLifecycleStatus.ACTIVE,
"checksum": hashlib.sha256(generated_source_code.encode("utf-8")).hexdigest(),
"published_at": metadata.updated_at or metadata.created_at,
"published_by": metadata.author_display_name,
}
)
return generated_envelope.model_copy(
update={
"published_tool": published_tool,
"emitted_at": datetime.now(UTC),
}
)
@staticmethod
def _write_runtime_snapshot_file(path, content: str) -> None:
path.parent.mkdir(parents=True, exist_ok=True)
temp_path = path.with_suffix(f"{path.suffix}.tmp")
temp_path.write_text(content, encoding="utf-8")
temp_path.replace(path)
def _synchronize_product_runtime_publication_snapshot(self) -> None:
runtime_dir = get_generated_tools_runtime_dir()
runtime_dir.mkdir(parents=True, exist_ok=True)
init_file = runtime_dir / "__init__.py"
if not init_file.exists():
init_file.write_text(
"\"\"\"Isolated runtime package for admin-governed generated tools.\"\"\"\\n",
encoding="utf-8",
)
active_metadata_entries = self._list_latest_metadata_entries(statuses=_PUBLISHED_TOOL_STATUSES)
publication_envelopes: list[ToolPublicationEnvelope] = []
for metadata in active_metadata_entries:
version = self._get_version_by_tool_version_id(metadata.tool_version_id)
if version is None:
raise RuntimeError(
f"Versao publicada nao encontrada para sincronizar o runtime da tool '{metadata.tool_name}'."
)
generated_source_code = self._get_generated_source_code_for_version(version.id)
self._write_runtime_snapshot_file(
build_generated_tool_file_path(metadata.tool_name),
generated_source_code,
)
publication_envelopes.append(
self._build_published_runtime_envelope(
version=version,
metadata=metadata,
generated_source_code=generated_source_code,
)
)
manifest = ToolRuntimePublicationManifest(
source_service=ServiceName.ADMIN,
target_service=ServiceName.PRODUCT,
emitted_at=datetime.now(UTC),
publications=tuple(publication_envelopes),
)
self._write_runtime_snapshot_file(
get_generated_tool_publication_manifest_path(),
json.dumps(manifest.model_dump(mode="json"), ensure_ascii=True, indent=2, sort_keys=True),
)
@staticmethod
def _format_contract_validation_errors(error: ValidationError | ValueError) -> list[str]:
if isinstance(error, ValidationError):
@ -2084,6 +2465,9 @@ class ToolManagementService:
actor_staff_account_id: int,
actor_name: str,
actor_role: StaffRole,
llm_generated_source: str | None = None,
llm_generation_model: str | None = None,
llm_generation_issues: list[str] | None = None,
commit: bool | None = None,
) -> None:
if self.artifact_repository is None:
@ -2098,11 +2482,15 @@ class ToolManagementService:
)
generation_payload.update(
{
"source": "admin_generation_pipeline",
"source": "admin_generation_pipeline_llm" if llm_generated_source else "admin_generation_pipeline",
"pipeline_status": "completed",
"triggered_by": actor_name,
"triggered_by_role": actor_role.value,
"generated_at": datetime.now(UTC).isoformat(),
# ---- Fase 7: rastreabilidade da geraÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢â¬Â ÃƒÂ¢Ã¢â€šÂ¬Ã¢â€žÂ¢ÃƒÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã ÃÆÃ¢Ã¢ââ¬Å¡Ã¬Ã¢ââ¬Å¾Ã¢ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢Ã¢ââ¬Å¡Ã¬Ã…Ã¡ÃÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã…¡ÃÆÃ¢â¬Å¡Ãƒâ€šÃ§ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢â¬Â ÃƒÂ¢Ã¢â€šÂ¬Ã¢â€žÂ¢ÃƒÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã ÃÆÃ¢Ã¢ââ¬Å¡Ã¬Ã¢ââ¬Å¾Ã¢ÃÆÃ†â€™Ãƒâ€ Ã¢â¬â„¢ÃƒÆÃ¢Ã¢ââ¬Å¡Ã¬Ã…Ã¡ÃÆÃ†â€™ÃƒÂ¢Ã¢â€šÂ¬Ã…¡ÃÆÃ¢â¬Å¡Ãƒâ€šÃ£o via LLM ----
"generation_model_used": llm_generation_model,
"generation_issues": list(llm_generation_issues or []),
"generation_source": "llm" if llm_generated_source else "stub",
}
)
self.artifact_repository.upsert_version_artifact(
@ -2234,7 +2622,12 @@ class ToolManagementService:
latest_by_tool_name[normalized_tool_name] = version
return [version for version in latest_by_tool_name.values() if version is not None]
def _serialize_review_queue_entry(self, version: ToolVersion) -> dict:
def _serialize_review_queue_entry(
self,
version: ToolVersion,
*,
worker_execution: dict | None = None,
) -> dict:
metadata = (
self.metadata_repository.get_by_tool_version_id(version.id)
if self.metadata_repository is not None
@ -2242,6 +2635,34 @@ class ToolManagementService:
)
display_name = metadata.display_name if metadata is not None else version.tool_name.replace("_", " ").title()
automated_validation = self._extract_latest_automated_validation(version.id)
gate = self._build_review_gate(version.status)
automated_validation_status = automated_validation.get("status")
automated_validation_summary = automated_validation.get("summary")
effective_worker_execution = worker_execution
if effective_worker_execution is None and version.status in {
ToolLifecycleStatus.DRAFT,
ToolLifecycleStatus.FAILED,
}:
effective_worker_execution = self._get_generation_pipeline_worker_execution(version.version_id)
if isinstance(effective_worker_execution, dict):
dispatch_state = str(effective_worker_execution.get("dispatch_state") or "").strip().lower()
if dispatch_state == "queued":
gate = "generation_pipeline_queued"
automated_validation_status = "pending"
automated_validation_summary = "Pipeline enfileirada no worker dedicado aguardando execucao."
elif dispatch_state == "running":
gate = "generation_pipeline_running"
automated_validation_status = "running"
automated_validation_summary = "Pipeline em execucao no worker dedicado do admin."
elif dispatch_state == "failed":
gate = "generation_worker_failed"
automated_validation_status = "failed"
automated_validation_summary = effective_worker_execution.get("last_error") or (
"O worker dedicado falhou antes de concluir a pipeline."
)
return {
"entry_id": version.version_id,
"version_id": version.version_id,
@ -2249,11 +2670,11 @@ class ToolManagementService:
"tool_name": version.tool_name,
"display_name": display_name,
"status": version.status,
"gate": self._build_review_gate(version.status),
"gate": gate,
"summary": version.summary,
"owner_name": version.owner_display_name,
"automated_validation_status": automated_validation.get("status"),
"automated_validation_summary": automated_validation.get("summary"),
"automated_validation_status": automated_validation_status,
"automated_validation_summary": automated_validation_summary,
"queued_at": version.updated_at or version.created_at,
}
@ -2358,48 +2779,72 @@ class ToolManagementService:
self,
version: ToolVersion,
generated_source_available: bool,
*,
worker_execution: dict | None = None,
) -> list[str]:
status = version.status
next_steps_by_status = {
ToolLifecycleStatus.DRAFT: [
"Execute a pipeline de geracao para produzir o modulo governado antes da revisao humana.",
"Enquanto a versao estiver em draft, ela permanece fora da aprovacao e da ativacao.",
],
ToolLifecycleStatus.GENERATED: [
"Analise o codigo completo gerado, confirme a leitura manual e registre a revisao da diretoria.",
"Somente depois da revisao humana a versao pode seguir para aprovacao formal.",
],
ToolLifecycleStatus.VALIDATED: [
"Registre o parecer final de aprovacao da diretoria antes da publicacao.",
"A ativacao continua bloqueada ate existir aprovacao humana explicita.",
],
ToolLifecycleStatus.APPROVED: [
"A versao ja foi aprovada pela diretoria e agora pode seguir para publicacao controlada.",
"A ativacao vai validar novamente a trilha de revisao e aprovacao humana antes de entrar no catalogo.",
],
ToolLifecycleStatus.ACTIVE: [
"A versao esta ativa no catalogo governado e pode ser desativada com parecer explicito da diretoria.",
"Quando houver uma versao arquivada anterior, o rollback controlado pode restaurar rapidamente a publicacao anterior.",
],
ToolLifecycleStatus.ARCHIVED: [
"Esta versao foi retirada do catalogo ativo e permanece arquivada para historico e auditoria.",
"A diretoria pode restaurar uma versao arquivada por rollback controlado a partir da publicacao ativa correspondente.",
],
ToolLifecycleStatus.FAILED: [
"Corrija os bloqueios da pipeline e execute uma nova geracao antes de voltar para a revisao humana.",
"Enquanto a versao estiver em failed, a aprovacao e a ativacao permanecem indisponiveis.",
],
}
next_steps = list(next_steps_by_status.get(status, ["Acompanhe a governanca da versao pela trilha administrativa."]))
if status == ToolLifecycleStatus.ACTIVE:
rollback_candidate = self._find_latest_archived_version(
tool_name=version.tool_name,
excluding_version_id=version.id,
dispatch_state = ""
if isinstance(worker_execution, dict):
dispatch_state = str(worker_execution.get("dispatch_state") or "").strip().lower()
if status in {ToolLifecycleStatus.DRAFT, ToolLifecycleStatus.FAILED} and dispatch_state == "queued":
next_steps = [
"A pipeline esta enfileirada no worker dedicado do admin e ainda nao entrou na etapa de validacao.",
"Atualize este detalhe apos a execucao para acompanhar quando a versao sair de draft e entrar em generated.",
]
elif status in {ToolLifecycleStatus.DRAFT, ToolLifecycleStatus.FAILED} and dispatch_state == "running":
next_steps = [
"A pipeline esta sendo executada em background no worker dedicado do admin.",
"Atualize este detalhe quando a geracao concluir para revisar o codigo e as validacoes automaticas.",
]
elif status in {ToolLifecycleStatus.DRAFT, ToolLifecycleStatus.FAILED} and dispatch_state == "failed":
next_steps = [
"O worker dedicado falhou antes de concluir a pipeline. Corrija a causa e reenvie a geracao.",
"Enquanto a execucao dedicada nao concluir com sucesso, a versao permanece fora da revisao humana e da ativacao.",
]
else:
next_steps_by_status = {
ToolLifecycleStatus.DRAFT: [
"Execute a pipeline de geracao para produzir o modulo governado antes da revisao humana.",
"Enquanto a versao estiver em draft, ela permanece fora da aprovacao e da ativacao.",
],
ToolLifecycleStatus.GENERATED: [
"Analise o codigo completo gerado, confirme a leitura manual e registre a revisao da diretoria.",
"Somente depois da revisao humana a versao pode seguir para aprovacao formal.",
],
ToolLifecycleStatus.VALIDATED: [
"Registre o parecer final de aprovacao da diretoria antes da publicacao.",
"A ativacao continua bloqueada ate existir aprovacao humana explicita.",
],
ToolLifecycleStatus.APPROVED: [
"A versao ja foi aprovada pela diretoria e agora pode seguir para publicacao controlada.",
"A ativacao vai validar novamente a trilha de revisao e aprovacao humana antes de entrar no catalogo.",
],
ToolLifecycleStatus.ACTIVE: [
"A versao esta ativa no catalogo governado e pode ser desativada com parecer explicito da diretoria.",
"Quando houver uma versao arquivada anterior, o rollback controlado pode restaurar rapidamente a publicacao anterior.",
],
ToolLifecycleStatus.ARCHIVED: [
"Esta versao foi retirada do catalogo ativo e permanece arquivada para historico e auditoria.",
"A diretoria pode restaurar uma versao arquivada por rollback controlado a partir da publicacao ativa correspondente.",
],
ToolLifecycleStatus.FAILED: [
"Corrija os bloqueios da pipeline e execute uma nova geracao antes de voltar para a revisao humana.",
"Enquanto a versao estiver em failed, a aprovacao e a ativacao permanecem indisponiveis.",
],
}
next_steps = list(
next_steps_by_status.get(status, ["Acompanhe a governanca da versao pela trilha administrativa."])
)
if rollback_candidate is not None:
next_steps.append(
f"Ha uma versao arquivada disponivel para rollback: v{rollback_candidate.version_number}."
if status == ToolLifecycleStatus.ACTIVE:
rollback_candidate = self._find_latest_archived_version(
tool_name=version.tool_name,
excluding_version_id=version.id,
)
if rollback_candidate is not None:
next_steps.append(
f"Ha uma versao arquivada disponivel para rollback: v{rollback_candidate.version_number}."
)
if not generated_source_available:
next_steps.append("O codigo completo aparece aqui assim que a pipeline gerar e registrar a funcao governada.")
return next_steps
@ -2575,6 +3020,7 @@ class ToolManagementService:
"version_count": draft.version_count,
"parameter_count": len(parameters),
"required_parameter_count": draft.required_parameter_count,
"generation_model": version.generation_model if version is not None else draft.generation_model,
"requires_director_approval": draft.requires_director_approval,
"owner_name": draft.owner_display_name,
"parameters": parameters,
@ -2698,6 +3144,9 @@ class ToolManagementService:
"domain": domain,
"description": description,
"business_goal": business_goal,
"generation_model": (
str(payload.get("generation_model") or "").strip() or None
),
"parameters": parameters,
}

@ -10,6 +10,16 @@ class Settings(BaseSettings):
google_project_id: str
google_location: str = "us-central1"
# Runtime de atendimento do product. Mantido separado do runtime de geração
# de código do admin_app, que usa AdminSettings próprios.
atendimento_model_name: str | None = None
atendimento_bundle_model_name: str | None = None
atendimento_temperature: float = 0
atendimento_max_output_tokens: int = 768
# Aliases legados mantidos por compatibilidade enquanto o runtime de
# atendimento migra para o perfil explícito de atendimento.
vertex_model_name: str = "gemini-2.5-pro"
vertex_bundle_model_name: str = "gemini-2.5-pro"
@ -31,10 +41,10 @@ class Settings(BaseSettings):
mock_seed_enabled: bool = True
auto_seed_tools: bool = True
auto_seed_mock: bool = True
environment: str = "production"
debug: bool = False
# Cloud SQL (legacy Postgres var kept only for backward compatibility in deploy scripts)
cloud_sql_connection_name: str | None = None
@ -78,10 +88,60 @@ class Settings(BaseSettings):
@field_validator("environment", "conversation_state_backend", mode="before")
@classmethod
def normalize_text_settings(cls, value):
def normalize_runtime_text_settings(cls, value):
if isinstance(value, str):
return value.strip().lower()
return value
@field_validator("atendimento_model_name", "atendimento_bundle_model_name", mode="before")
@classmethod
def normalize_optional_model_names(cls, value):
if isinstance(value, str):
stripped = value.strip()
return stripped or None
return value
@field_validator("vertex_model_name", "vertex_bundle_model_name", mode="before")
@classmethod
def normalize_required_model_names(cls, value):
if isinstance(value, str):
return value.strip()
return value
@field_validator("atendimento_temperature")
@classmethod
def validate_atendimento_temperature(cls, value: float) -> float:
if value < 0 or value > 2:
raise ValueError("atendimento_temperature must be between 0 and 2")
return value
@field_validator("atendimento_max_output_tokens")
@classmethod
def validate_atendimento_max_output_tokens(cls, value: int) -> int:
if value < 128:
raise ValueError("atendimento_max_output_tokens must be >= 128")
return value
def resolve_atendimento_model_name(self) -> str:
configured = str(self.atendimento_model_name or "").strip()
if configured:
return configured
return str(self.vertex_model_name or "").strip()
def resolve_atendimento_bundle_model_name(self) -> str:
configured = str(self.atendimento_bundle_model_name or "").strip()
if configured:
return configured
legacy = str(self.vertex_bundle_model_name or "").strip()
if legacy:
return legacy
return self.resolve_atendimento_model_name()
def build_atendimento_generation_config(self) -> dict[str, int | float]:
return {
"temperature": float(self.atendimento_temperature),
"max_output_tokens": int(self.atendimento_max_output_tokens),
}
settings = Settings()
settings = Settings()

@ -1,8 +1,10 @@
"""
"""
Rotina dedicada de bootstrap de banco de dados.
Cria tabelas e executa seed inicial de forma explicita, fora do startup do app.
"""
import json
from datetime import UTC, datetime
from pathlib import Path
from sqlalchemy import inspect, text
@ -25,24 +27,40 @@ from app.db.mock_models import (
)
from app.db.mock_seed import seed_mock_data
from app.db.tool_seed import seed_tools
from shared.contracts import GENERATED_TOOLS_PACKAGE
from shared.contracts import (
ToolRuntimePublicationManifest,
get_generated_tool_publication_manifest_path,
get_generated_tools_runtime_dir,
)
_PROJECT_ROOT = Path(__file__).resolve().parents[2]
def _ensure_generated_tools_runtime_package() -> Path:
package_dir = _PROJECT_ROOT / GENERATED_TOOLS_PACKAGE
package_dir = get_generated_tools_runtime_dir(_PROJECT_ROOT)
package_dir.mkdir(parents=True, exist_ok=True)
init_file = package_dir / "__init__.py"
if not init_file.exists():
init_file.write_text(
'"""Isolated runtime package for admin-governed generated tools."""\n',
"\"\"\"Isolated runtime package for admin-governed generated tools.\"\"\"\\n",
encoding="utf-8",
)
manifest_path = get_generated_tool_publication_manifest_path(_PROJECT_ROOT)
if not manifest_path.exists():
manifest = ToolRuntimePublicationManifest(
emitted_at=datetime.now(UTC),
publications=(),
)
manifest_path.write_text(
json.dumps(manifest.model_dump(mode="json"), ensure_ascii=True, indent=2, sort_keys=True),
encoding="utf-8",
)
return package_dir
def _ensure_mock_schema_evolution() -> None:
inspector = inspect(mock_engine)
table_names = set(inspector.get_table_names())

@ -37,11 +37,11 @@ class LLMService:
)
LLMService._vertex_initialized = True
configured = settings.vertex_model_name.strip()
configured = settings.resolve_atendimento_model_name()
fallback_models = ["gemini-2.5-pro", "gemini-2.5-flash", "gemini-2.0-flash-001"]
self.model_names = self._build_model_sequence(configured, *fallback_models)
self.bundle_model_names = self._build_model_sequence(
settings.vertex_bundle_model_name.strip(),
settings.resolve_atendimento_bundle_model_name(),
*self.model_names,
)
@ -304,7 +304,7 @@ class LLMService:
)
if last_error:
raise RuntimeError(
f"Nenhum modelo Vertex disponivel. Verifique VERTEX_MODEL_NAME e acesso no projeto. Erro: {last_error}"
"Nenhum modelo Vertex disponivel. Verifique ATENDIMENTO_MODEL_NAME/VERTEX_MODEL_NAME e o acesso no projeto. " f"Erro: {last_error}"
) from last_error
raise RuntimeError("Falha ao gerar resposta no Vertex AI.")

@ -1,6 +1,7 @@
import logging
import json
from app.core.settings import settings
from app.services.ai.llm_service import LLMService
from app.services.orchestration.entity_normalizer import EntityNormalizer
from app.services.orchestration.turn_decision import TurnDecision
@ -123,8 +124,7 @@ class MessagePlanner:
preferred_models = getattr(self.llm, "bundle_model_names", None)
bundle_generation_config = {
"candidate_count": 1,
"temperature": 0,
"max_output_tokens": 768,
**settings.build_atendimento_generation_config(),
}
for attempt in range(2):

@ -1,8 +1,10 @@
import importlib
import inspect
import json
import logging
from typing import Callable, Dict, List
from fastapi import HTTPException
from shared.contracts import GENERATED_TOOL_ENTRYPOINT, GENERATED_TOOLS_PACKAGE
from sqlalchemy.orm import Session
from app.models.tool_model import ToolDefinition
@ -23,7 +25,15 @@ from app.services.tools.handlers import (
realizar_pedido,
validar_cliente_venda,
)
from shared.contracts import (
GENERATED_TOOL_ENTRYPOINT,
GENERATED_TOOLS_PACKAGE,
ToolParameterType,
ToolRuntimePublicationManifest,
get_generated_tool_publication_manifest_path,
)
logger = logging.getLogger(__name__)
HANDLERS: Dict[str, Callable] = {
"consultar_estoque": consultar_estoque,
@ -42,16 +52,24 @@ HANDLERS: Dict[str, Callable] = {
"registrar_pagamento_aluguel": registrar_pagamento_aluguel,
}
_PARAMETER_SCHEMA_TYPE_MAPPING = {
ToolParameterType.STRING: "string",
ToolParameterType.INTEGER: "integer",
ToolParameterType.NUMBER: "number",
ToolParameterType.BOOLEAN: "boolean",
ToolParameterType.OBJECT: "object",
ToolParameterType.ARRAY: "array",
}
class GeneratedToolCoreBoundaryViolation(RuntimeError):
"""Raised when a generated tool attempts to reuse or point at core runtime code."""
# Registry em memoria das tools disponiveis para o orquestrador.
class ToolRegistry:
"""Registry em memoria das tools disponiveis para o orquestrador."""
def __init__(self, db: Session, extra_handlers: Dict[str, Callable] | None = None):
"""Carrega tools do banco e registra apenas as que possuem handler conhecido."""
self._tools = []
available_handlers = dict(HANDLERS)
if extra_handlers:
@ -68,6 +86,7 @@ class ToolRegistry:
parameters=db_tool.parameters,
handler=handler,
)
self._load_generated_tool_publications_from_snapshot()
def register_tool(self, name, description, parameters, handler):
"""Registra uma tool em memoria para uso pelo orquestrador."""
@ -90,6 +109,65 @@ class ToolRegistry:
handler=handler,
)
def _load_generated_tool_publications_from_snapshot(self) -> None:
manifest_path = get_generated_tool_publication_manifest_path()
if not manifest_path.exists():
return
try:
manifest_payload = json.loads(manifest_path.read_text(encoding="utf-8-sig"))
manifest = ToolRuntimePublicationManifest.model_validate(manifest_payload)
except Exception as exc:
logger.warning(
"Falha ao carregar snapshot local de tools publicadas em %s: %s",
manifest_path,
exc,
)
return
for envelope in manifest.publications:
published_tool = envelope.published_tool
try:
importlib.invalidate_caches()
module = importlib.import_module(published_tool.implementation_module)
handler = getattr(module, published_tool.implementation_callable)
self.register_generated_tool(
name=published_tool.tool_name,
description=published_tool.description,
parameters=self._build_generated_parameter_schema(published_tool.parameters),
handler=handler,
)
except Exception as exc:
logger.warning(
"Falha ao registrar tool publicada '%s' a partir do snapshot local %s: %s",
published_tool.tool_name,
manifest_path,
exc,
)
@staticmethod
def _build_generated_parameter_schema(parameters) -> dict:
properties: dict[str, dict] = {}
required: list[str] = []
for parameter in parameters or ():
parameter_type = parameter.parameter_type
schema = {
"type": _PARAMETER_SCHEMA_TYPE_MAPPING[parameter_type],
"description": parameter.description,
}
if parameter_type == ToolParameterType.OBJECT:
schema["additionalProperties"] = True
elif parameter_type == ToolParameterType.ARRAY:
schema["items"] = {"type": "string"}
properties[parameter.name] = schema
if parameter.required:
required.append(parameter.name)
return {
"type": "object",
"properties": properties,
"required": required,
}
def _append_tool_definition(self, *, name, description, parameters, handler):
self._tools.append(
ToolDefinition(

@ -1,4 +1,4 @@
"""Contratos compartilhados entre product e admin."""
"""Contratos compartilhados entre product e admin."""
from shared.contracts.access_control import (
AdminPermission,
@ -52,6 +52,7 @@ from shared.contracts.system_functional_configuration import (
from shared.contracts.tool_publication import (
GENERATED_TOOL_ENTRYPOINT,
GENERATED_TOOLS_PACKAGE,
GENERATED_TOOL_PUBLICATION_MANIFEST,
PublishedToolContract,
ServiceName,
TOOL_LIFECYCLE_STAGES,
@ -61,8 +62,12 @@ from shared.contracts.tool_publication import (
ToolParameterContract,
ToolParameterType,
ToolPublicationEnvelope,
ToolRuntimePublicationManifest,
build_generated_tool_file_path,
build_generated_tool_module_name,
build_generated_tool_module_path,
get_generated_tool_publication_manifest_path,
get_generated_tools_runtime_dir,
get_tool_lifecycle_stage,
)
@ -71,6 +76,7 @@ __all__ = [
"BOT_GOVERNED_SETTINGS",
"GENERATED_TOOL_ENTRYPOINT",
"GENERATED_TOOLS_PACKAGE",
"GENERATED_TOOL_PUBLICATION_MANIFEST",
"MODEL_RUNTIME_PROFILES",
"MODEL_RUNTIME_SEPARATION_RULES",
"PRODUCT_OPERATIONAL_DATASETS",
@ -85,6 +91,7 @@ __all__ = [
"ToolParameterContract",
"ToolParameterType",
"ToolPublicationEnvelope",
"ToolRuntimePublicationManifest",
"BotGovernanceArea",
"BotGovernanceMutability",
"BotGovernedSettingContract",
@ -109,10 +116,13 @@ __all__ = [
"FunctionalConfigurationMutability",
"FunctionalConfigurationPropagation",
"FunctionalConfigurationSource",
"build_generated_tool_file_path",
"build_generated_tool_module_name",
"build_generated_tool_module_path",
"get_bot_governed_setting",
"get_functional_configuration",
"get_generated_tool_publication_manifest_path",
"get_generated_tools_runtime_dir",
"get_model_runtime_contract",
"get_operational_dataset",
"get_tool_lifecycle_stage",

@ -1,7 +1,8 @@
from __future__ import annotations
from __future__ import annotations
from datetime import datetime
from enum import Enum
from pathlib import Path
import re
from pydantic import BaseModel, Field
@ -104,7 +105,9 @@ def get_tool_lifecycle_stage(
GENERATED_TOOLS_PACKAGE = "generated_tools"
GENERATED_TOOL_ENTRYPOINT = "run"
GENERATED_TOOL_PUBLICATION_MANIFEST = "published_runtime_tools.json"
_GENERATED_TOOL_NAME_PATTERN = re.compile(r"^[a-z][a-z0-9_]{2,63}$")
_PROJECT_ROOT = Path(__file__).resolve().parents[2]
def _normalize_generated_tool_name(tool_name: str) -> str:
@ -124,6 +127,26 @@ def build_generated_tool_module_path(tool_name: str) -> str:
return f"{GENERATED_TOOLS_PACKAGE}/{normalized}.py"
def get_generated_tools_runtime_dir(project_root: Path | None = None) -> Path:
root = project_root or _PROJECT_ROOT
return root / GENERATED_TOOLS_PACKAGE
def build_generated_tool_file_path(
tool_name: str,
*,
project_root: Path | None = None,
) -> Path:
normalized = _normalize_generated_tool_name(tool_name)
return get_generated_tools_runtime_dir(project_root) / f"{normalized}.py"
def get_generated_tool_publication_manifest_path(
project_root: Path | None = None,
) -> Path:
return get_generated_tools_runtime_dir(project_root) / GENERATED_TOOL_PUBLICATION_MANIFEST
class ToolParameterType(str, Enum):
STRING = "string"
INTEGER = "integer"
@ -160,3 +183,10 @@ class ToolPublicationEnvelope(BaseModel):
publication_id: str
published_tool: PublishedToolContract
emitted_at: datetime
class ToolRuntimePublicationManifest(BaseModel):
source_service: ServiceName = ServiceName.ADMIN
target_service: ServiceName = ServiceName.PRODUCT
emitted_at: datetime
publications: tuple[ToolPublicationEnvelope, ...] = ()

@ -1,4 +1,4 @@
import unittest
import unittest
from unittest.mock import MagicMock, patch
from admin_app.db import bootstrap as bootstrap_module
@ -50,6 +50,7 @@ class AdminBootstrapRuntimeTests(unittest.TestCase):
[
"ALTER TABLE tool_drafts ADD COLUMN current_version_number INT NOT NULL DEFAULT 1",
"ALTER TABLE tool_drafts ADD COLUMN version_count INT NOT NULL DEFAULT 1",
"ALTER TABLE tool_drafts ADD COLUMN generation_model VARCHAR(120)",
],
)

@ -1,4 +1,4 @@
import unittest
import unittest
from admin_app.db.models import ToolDraft
from shared.contracts import ToolLifecycleStatus
@ -19,6 +19,7 @@ class ToolDraftModelTests(unittest.TestCase):
self.assertIn("required_parameter_count", ToolDraft.__table__.columns)
self.assertIn("current_version_number", ToolDraft.__table__.columns)
self.assertIn("version_count", ToolDraft.__table__.columns)
self.assertIn("generation_model", ToolDraft.__table__.columns)
self.assertIn("requires_director_approval", ToolDraft.__table__.columns)
self.assertIn("owner_staff_account_id", ToolDraft.__table__.columns)
self.assertIn("owner_display_name", ToolDraft.__table__.columns)

@ -0,0 +1,180 @@
import threading
import unittest
from types import SimpleNamespace
from unittest.mock import Mock, patch
from admin_app.core import AdminSettings
from admin_app.services.tool_generation_worker_service import ToolGenerationWorkerService
from admin_app.services.tool_management_service import ToolManagementService
from shared.contracts import StaffRole, ToolLifecycleStatus
class ToolGenerationWorkerServiceTests(unittest.TestCase):
def test_execute_generation_pipeline_uses_dedicated_worker_metadata(self):
worker = ToolGenerationWorkerService(AdminSettings())
main_thread_name = threading.current_thread().name
def fake_job(version_id, runner_staff_account_id, runner_name, runner_role):
self.assertNotEqual(threading.current_thread().name, main_thread_name)
self.assertEqual(version_id, 'tool_version::resumo::v1')
self.assertEqual(runner_staff_account_id, 7)
self.assertEqual(runner_name, 'Diretoria')
self.assertEqual(runner_role, StaffRole.DIRETOR)
return {
'service': 'admin_tool_governance',
'version_id': version_id,
'status': 'generated',
'_worker_thread_name': threading.current_thread().name,
}
try:
with patch.object(worker, '_run_generation_pipeline_job', side_effect=fake_job):
payload = worker.execute_generation_pipeline(
version_id='tool_version::resumo::v1',
runner_staff_account_id=7,
runner_name='Diretoria',
runner_role=StaffRole.DIRETOR,
)
finally:
worker.shutdown(wait=True)
execution = payload['execution']
self.assertEqual(execution['mode'], 'dedicated_generation_worker')
self.assertEqual(execution['target'], 'admin_tool_generation_worker')
self.assertEqual(execution['dispatch_state'], 'completed')
self.assertEqual(execution['worker_max_workers'], 1)
self.assertEqual(execution['queued_jobs_before_submit'], 0)
self.assertEqual(execution['worker_pending_jobs'], 0)
self.assertIsNotNone(execution['submitted_at'])
self.assertGreaterEqual(execution['elapsed_ms'], 0)
self.assertTrue(execution['worker_thread_name'].startswith('admin-tool-generation-worker'))
self.assertEqual(payload['version_id'], 'tool_version::resumo::v1')
self.assertEqual(payload['status'], 'generated')
def test_dispatch_generation_pipeline_returns_queued_snapshot_without_waiting_completion(self):
worker = ToolGenerationWorkerService(AdminSettings())
job_started = threading.Event()
release_job = threading.Event()
def fake_job(version_id, runner_staff_account_id, runner_name, runner_role):
job_started.set()
release_job.wait(timeout=2)
return {
'service': 'admin_tool_governance',
'version_id': version_id,
'status': 'generated',
'_worker_thread_name': threading.current_thread().name,
}
try:
with patch.object(worker, '_run_generation_pipeline_job', side_effect=fake_job):
dispatch = worker.dispatch_generation_pipeline(
version_id='tool_version::assinc::v1',
runner_staff_account_id=9,
runner_name='Diretoria',
runner_role=StaffRole.DIRETOR,
)
self.assertEqual(dispatch['mode'], 'dedicated_generation_worker_async')
self.assertIn(dispatch['dispatch_state'], {'queued', 'running'})
self.assertEqual(dispatch['target'], 'admin_tool_generation_worker')
self.assertEqual(dispatch['queued_jobs_before_submit'], 0)
self.assertGreaterEqual(dispatch['poll_after_ms'], 1)
job_started.wait(timeout=2)
running_snapshot = worker.get_generation_pipeline_dispatch('tool_version::assinc::v1')
self.assertIsNotNone(running_snapshot)
self.assertIn(running_snapshot['dispatch_state'], {'running', 'completed'})
release_job.set()
release_job.wait(timeout=2)
finally:
worker.shutdown(wait=True)
class ToolManagementServiceWorkerFallbackTests(unittest.TestCase):
def test_run_generation_pipeline_in_worker_falls_back_to_inline_execution_metadata(self):
service = ToolManagementService(settings=AdminSettings())
with patch.object(
service,
'run_generation_pipeline',
return_value={
'service': 'admin_tool_governance',
'message': 'ok',
'version_id': 'tool_version::inline::v1',
'status': 'generated',
},
) as run_generation_pipeline:
payload = service.run_generation_pipeline_in_worker(
'tool_version::inline::v1',
runner_staff_account_id=4,
runner_name='Colaborador',
runner_role=StaffRole.COLABORADOR,
)
run_generation_pipeline.assert_called_once_with(
'tool_version::inline::v1',
runner_staff_account_id=4,
runner_name='Colaborador',
runner_role=StaffRole.COLABORADOR,
)
self.assertEqual(payload['execution']['mode'], 'inline_admin_service')
self.assertEqual(payload['execution']['target'], 'admin_inline_generation_pipeline')
self.assertEqual(payload['execution']['dispatch_state'], 'completed')
self.assertEqual(payload['execution']['queued_jobs_before_submit'], 0)
self.assertIsNone(payload['execution']['worker_max_workers'])
self.assertEqual(payload['version_id'], 'tool_version::inline::v1')
def test_run_generation_pipeline_in_worker_returns_queued_snapshot_when_dedicated_worker_accepts_job(self):
version = SimpleNamespace(
id=11,
version_id='tool_version::fila::v1',
tool_name='emitir_resumo_locacao',
version_number=1,
status=ToolLifecycleStatus.DRAFT,
summary='Resumo governado em fila.',
owner_display_name='Diretoria',
updated_at=None,
created_at=None,
)
draft_repository = Mock()
draft_repository.get_by_tool_name.return_value = SimpleNamespace(id=3)
version_repository = Mock()
version_repository.get_by_version_id.return_value = version
version_repository.list_versions.return_value = [version]
metadata_repository = Mock()
metadata_repository.get_by_tool_version_id.return_value = SimpleNamespace(display_name='Emitir resumo locacao')
worker = Mock()
worker.dispatch_generation_pipeline.return_value = {
'mode': 'dedicated_generation_worker_async',
'target': 'admin_tool_generation_worker',
'dispatch_state': 'queued',
'worker_max_workers': 1,
'worker_pending_jobs': 1,
'queued_jobs_before_submit': 0,
'submitted_at': '2026-04-02T10:00:00+00:00',
'started_at': None,
'completed_at': None,
'elapsed_ms': None,
'worker_thread_name': None,
'poll_after_ms': 1200,
'last_error': None,
}
service = ToolManagementService(
settings=AdminSettings(),
draft_repository=draft_repository,
version_repository=version_repository,
metadata_repository=metadata_repository,
tool_generation_worker_service=worker,
)
payload = service.run_generation_pipeline_in_worker(
'tool_version::fila::v1',
runner_staff_account_id=1,
runner_name='Diretoria',
runner_role=StaffRole.DIRETOR,
)
self.assertEqual(payload['status'], ToolLifecycleStatus.DRAFT)
self.assertEqual(payload['execution']['dispatch_state'], 'queued')
self.assertEqual(payload['queue_entry']['gate'], 'generation_pipeline_queued')
self.assertEqual(payload['queue_entry']['automated_validation_status'], 'pending')
self.assertIn('request foi liberada', payload['message'])

@ -1,3 +1,4 @@
import asyncio
import unittest
from unittest.mock import patch
from datetime import datetime, timezone
@ -64,6 +65,7 @@ class _FakeToolDraftRepository:
version_count: int,
owner_staff_account_id: int,
owner_display_name: str,
generation_model: str | None = None,
requires_director_approval: bool = True,
commit: bool = True,
) -> ToolDraft:
@ -82,6 +84,7 @@ class _FakeToolDraftRepository:
required_parameter_count=required_parameter_count,
current_version_number=current_version_number,
version_count=version_count,
generation_model=generation_model,
requires_director_approval=requires_director_approval,
owner_staff_account_id=owner_staff_account_id,
owner_display_name=owner_display_name,
@ -107,6 +110,7 @@ class _FakeToolDraftRepository:
version_count: int,
owner_staff_account_id: int,
owner_display_name: str,
generation_model: str | None = None,
requires_director_approval: bool = True,
commit: bool = True,
) -> ToolDraft:
@ -120,6 +124,7 @@ class _FakeToolDraftRepository:
draft.required_parameter_count = required_parameter_count
draft.current_version_number = current_version_number
draft.version_count = version_count
draft.generation_model = generation_model
draft.requires_director_approval = requires_director_approval
draft.owner_staff_account_id = owner_staff_account_id
draft.owner_display_name = owner_display_name
@ -180,6 +185,7 @@ class _FakeToolVersionRepository:
required_parameter_count: int,
owner_staff_account_id: int,
owner_display_name: str,
generation_model: str | None = None,
status: ToolLifecycleStatus = ToolLifecycleStatus.DRAFT,
requires_director_approval: bool = True,
commit: bool = True,
@ -197,6 +203,7 @@ class _FakeToolVersionRepository:
business_goal=business_goal,
parameters_json=parameters_json,
required_parameter_count=required_parameter_count,
generation_model=generation_model,
requires_director_approval=requires_director_approval,
owner_staff_account_id=owner_staff_account_id,
owner_display_name=owner_display_name,
@ -384,6 +391,9 @@ class AdminToolManagementServiceTests(unittest.TestCase):
artifact_repository=self.artifact_repository,
)
def _run_async(self, awaitable):
return asyncio.run(awaitable)
def test_create_draft_submission_persists_initial_tool_version_metadata_and_artifacts(self):
payload = self.service.create_draft_submission(
{
@ -392,6 +402,7 @@ class AdminToolManagementServiceTests(unittest.TestCase):
"display_name": "Consultar vendas por periodo",
"description": "Consulta vendas consolidadas por periodo informado no painel.",
"business_goal": "Ajudar o time interno a acompanhar o desempenho comercial com mais agilidade.",
"generation_model": "gemini-2.5-pro",
"parameters": [
{
"name": "periodo_inicio",
@ -422,11 +433,14 @@ class AdminToolManagementServiceTests(unittest.TestCase):
self.assertEqual(payload["draft_preview"]["draft_id"], "draft_fake_1")
self.assertEqual(payload["draft_preview"]["version_id"], "tool_version::consultar_vendas_periodo::v1")
self.assertEqual(payload["draft_preview"]["version_number"], 1)
self.assertEqual(payload["draft_preview"]["generation_model"], "gemini-2.5-pro")
self.assertEqual(payload["draft_preview"]["version_count"], 1)
self.assertEqual(payload["draft_preview"]["status"], ToolLifecycleStatus.DRAFT)
self.assertEqual(payload["draft_preview"]["owner_name"], "Equipe Interna")
self.assertEqual(len(self.draft_repository.drafts), 1)
self.assertEqual(len(self.version_repository.versions), 1)
self.assertEqual(self.draft_repository.drafts[0].generation_model, "gemini-2.5-pro")
self.assertEqual(self.version_repository.versions[0].generation_model, "gemini-2.5-pro")
self.assertEqual(len(self.metadata_repository.metadata_entries), 1)
self.assertEqual(self.metadata_repository.metadata_entries[0].author_display_name, "Equipe Interna")
self.assertEqual(self.metadata_repository.metadata_entries[0].version_number, 1)
@ -1307,6 +1321,126 @@ class AdminToolManagementServiceTests(unittest.TestCase):
self.assertTrue(restored_publication["deactivation_action_available"])
class AdminToolManagementTransactionalPersistenceTests(unittest.TestCase):
def test_publish_and_deactivate_keep_local_runtime_snapshot_for_product(self):
import json
import shutil
from pathlib import Path
from unittest.mock import patch
draft_repository = _FakeToolDraftRepository()
version_repository = _FakeToolVersionRepository()
metadata_repository = _FakeToolMetadataRepository()
artifact_repository = _FakeToolArtifactRepository()
service = ToolManagementService(
settings=AdminSettings(admin_api_prefix="/admin"),
draft_repository=draft_repository,
version_repository=version_repository,
metadata_repository=metadata_repository,
artifact_repository=artifact_repository,
)
intake_payload = service.create_draft_submission(
{
"domain": "locacao",
"tool_name": "emitir_resumo_locacao",
"display_name": "Emitir resumo de locacao",
"description": "Resume uma locacao ativa com dados importantes para o atendimento.",
"business_goal": "Permitir que a equipe gere um resumo operacional de locacao sem acessar o core.",
"parameters": [
{
"name": "reserva_id",
"parameter_type": ToolParameterType.STRING,
"description": "Identificador da reserva que sera resumida.",
"required": True,
}
],
},
owner_staff_account_id=7,
owner_name="Equipe Interna",
owner_role=StaffRole.COLABORADOR,
)
version_id = intake_payload["draft_preview"]["version_id"]
service.run_generation_pipeline(
version_id,
runner_staff_account_id=7,
runner_name="Equipe Interna",
runner_role=StaffRole.COLABORADOR,
)
service.review_version(
version_id,
reviewer_staff_account_id=99,
reviewer_name="Diretoria",
reviewer_role=StaffRole.DIRETOR,
decision_notes="Revisao humana registrada para liberacao controlada.",
reviewed_generated_code=True,
)
service.approve_version(
version_id,
approver_staff_account_id=99,
approver_name="Diretoria",
approver_role=StaffRole.DIRETOR,
decision_notes="Aprovacao formal liberada para publicacao governada.",
)
sandbox_root = Path.cwd() / ".tmp_test_admin_runtime_snapshot"
shutil.rmtree(sandbox_root, ignore_errors=True)
package_dir = sandbox_root / GENERATED_TOOLS_PACKAGE
manifest_path = package_dir / "published_runtime_tools.json"
def build_file_path(tool_name: str):
return package_dir / f"{tool_name}.py"
try:
with patch(
"admin_app.services.tool_management_service.get_generated_tools_runtime_dir",
return_value=package_dir,
), patch(
"admin_app.services.tool_management_service.get_generated_tool_publication_manifest_path",
return_value=manifest_path,
), patch(
"admin_app.services.tool_management_service.build_generated_tool_file_path",
side_effect=build_file_path,
):
service.publish_version(
version_id,
publisher_staff_account_id=99,
publisher_name="Diretoria",
publisher_role=StaffRole.DIRETOR,
)
manifest = json.loads(manifest_path.read_text(encoding="utf-8"))
self.assertEqual(manifest["target_service"], "product")
self.assertEqual(len(manifest["publications"]), 1)
self.assertEqual(
manifest["publications"][0]["published_tool"]["tool_name"],
"emitir_resumo_locacao",
)
self.assertEqual(
manifest["publications"][0]["published_tool"]["status"],
ToolLifecycleStatus.ACTIVE.value,
)
self.assertTrue(build_file_path("emitir_resumo_locacao").exists())
self.assertIn(
"async def run",
build_file_path("emitir_resumo_locacao").read_text(encoding="utf-8"),
)
service.deactivate_version(
version_id,
actor_staff_account_id=99,
actor_name="Diretoria",
actor_role=StaffRole.DIRETOR,
decision_notes="Desativacao controlada para manter apenas o snapshot local anterior.",
)
manifest_after = json.loads(manifest_path.read_text(encoding="utf-8"))
self.assertEqual(manifest_after["publications"], [])
finally:
shutil.rmtree(sandbox_root, ignore_errors=True)
def setUp(self):
self.engine = create_engine("sqlite:///:memory:")
AdminBase.metadata.create_all(bind=self.engine)
@ -1371,3 +1505,46 @@ class AdminToolManagementTransactionalPersistenceTests(unittest.TestCase):
if __name__ == "__main__":
unittest.main()
class AdminToolManagementWorkerDispatchTests(unittest.TestCase):
def test_run_generation_pipeline_in_worker_falls_back_to_inline_execution_metadata(self):
service = ToolManagementService(settings=AdminSettings(admin_api_prefix="/admin"))
expected_payload = {
"message": "Pipeline executado.",
"version_id": "tool_version::teste::v1",
"tool_name": "teste",
"version_number": 1,
"status": ToolLifecycleStatus.GENERATED,
"current_step": "validation",
"steps": [],
"queue_entry": {
"entry_id": "tool_version::teste::v1",
"version_id": "tool_version::teste::v1",
"version_number": 1,
"tool_name": "teste",
"display_name": "Tool teste",
"status": ToolLifecycleStatus.GENERATED,
"gate": "validation_required",
"summary": "Resumo",
},
"automated_validations": [],
"next_steps": [],
}
with patch.object(service, "run_generation_pipeline", return_value=expected_payload) as run_pipeline:
payload = service.run_generation_pipeline_in_worker(
"tool_version::teste::v1",
runner_staff_account_id=7,
runner_name="Equipe Interna",
runner_role=StaffRole.COLABORADOR,
)
run_pipeline.assert_called_once_with(
"tool_version::teste::v1",
runner_staff_account_id=7,
runner_name="Equipe Interna",
runner_role=StaffRole.COLABORADOR,
)
self.assertEqual(payload["execution"]["mode"], "inline_admin_service")
self.assertEqual(payload["execution"]["target"], "admin_inline_generation_pipeline")

@ -1,4 +1,4 @@
import unittest
import unittest
from admin_app.db.models import ToolVersion
from shared.contracts import ToolLifecycleStatus
@ -17,6 +17,7 @@ class ToolVersionModelTests(unittest.TestCase):
self.assertIn("business_goal", ToolVersion.__table__.columns)
self.assertIn("parameters_json", ToolVersion.__table__.columns)
self.assertIn("required_parameter_count", ToolVersion.__table__.columns)
self.assertIn("generation_model", ToolVersion.__table__.columns)
self.assertIn("requires_director_approval", ToolVersion.__table__.columns)
self.assertIn("owner_staff_account_id", ToolVersion.__table__.columns)
self.assertIn("owner_display_name", ToolVersion.__table__.columns)

@ -1,4 +1,4 @@
import os
import os
import unittest
from datetime import datetime, timedelta
from app.core.time_utils import utc_now
@ -3702,5 +3702,119 @@ class ToolRegistryExecutionTests(unittest.IsolatedAsyncioTestCase):
)
def test_registry_loads_generated_tool_from_local_publication_snapshot(self):
import asyncio
import json
import shutil
import sys
from pathlib import Path
from unittest.mock import patch
sandbox_root = Path.cwd() / ".tmp_test_registry_snapshot_valid"
shutil.rmtree(sandbox_root, ignore_errors=True)
package_dir = sandbox_root / "generated_tools"
package_dir.mkdir(parents=True, exist_ok=True)
(package_dir / "__init__.py").write_text("", encoding="utf-8")
(package_dir / "emitir_resumo_locacao.py").write_text(
"async def run(reserva_id: str):\n return {\"reserva_id\": reserva_id, \"status\": \"ok\"}\n",
encoding="utf-8",
)
manifest_path = package_dir / "published_runtime_tools.json"
manifest_path.write_text(
json.dumps(
{
"source_service": "admin",
"target_service": "product",
"emitted_at": "2026-04-02T12:00:00+00:00",
"publications": [
{
"source_service": "admin",
"target_service": "product",
"publication_id": "metadata::emitir_resumo_locacao::v1",
"emitted_at": "2026-04-02T12:00:00+00:00",
"published_tool": {
"tool_name": "emitir_resumo_locacao",
"display_name": "Emitir resumo de locacao",
"description": "Gera um resumo curto da locacao.",
"version": 1,
"status": "active",
"parameters": [
{
"name": "reserva_id",
"parameter_type": "string",
"description": "Identificador da reserva.",
"required": True,
}
],
"implementation_module": "generated_tools.emitir_resumo_locacao",
"implementation_callable": "run",
},
}
],
},
ensure_ascii=True,
),
encoding="utf-8",
)
registry = ToolRegistry.__new__(ToolRegistry)
registry._tools = []
sys.path.insert(0, str(sandbox_root))
sys.modules.pop("generated_tools", None)
sys.modules.pop("generated_tools.emitir_resumo_locacao", None)
try:
with patch(
"app.services.tools.tool_registry.get_generated_tool_publication_manifest_path",
return_value=manifest_path,
):
registry._load_generated_tool_publications_from_snapshot()
self.assertEqual([tool.name for tool in registry.get_tools()], ["emitir_resumo_locacao"])
result = asyncio.run(registry.execute("emitir_resumo_locacao", {"reserva_id": "LOC-1"}))
self.assertEqual(result["reserva_id"], "LOC-1")
finally:
if str(sandbox_root) in sys.path:
sys.path.remove(str(sandbox_root))
sys.modules.pop("generated_tools", None)
sys.modules.pop("generated_tools.emitir_resumo_locacao", None)
shutil.rmtree(sandbox_root, ignore_errors=True)
def test_registry_ignores_invalid_publication_snapshot_and_keeps_existing_tools(self):
import shutil
import sys
from pathlib import Path
from unittest.mock import patch
async def core_tool(**kwargs):
return kwargs
sandbox_root = Path.cwd() / ".tmp_test_registry_snapshot_invalid"
shutil.rmtree(sandbox_root, ignore_errors=True)
sandbox_root.mkdir(parents=True, exist_ok=True)
manifest_path = sandbox_root / "published_runtime_tools.json"
manifest_path.write_text("{invalid json", encoding="utf-8")
registry = ToolRegistry.__new__(ToolRegistry)
registry._tools = [
ToolDefinition(
name="consultar_estoque",
description="",
parameters={},
handler=core_tool,
)
]
sys.modules.pop("generated_tools", None)
try:
with patch(
"app.services.tools.tool_registry.get_generated_tool_publication_manifest_path",
return_value=manifest_path,
):
registry._load_generated_tool_publications_from_snapshot()
self.assertEqual([tool.name for tool in registry.get_tools()], ["consultar_estoque"])
finally:
sys.modules.pop("generated_tools", None)
shutil.rmtree(sandbox_root, ignore_errors=True)
if __name__ == "__main__":
unittest.main()

@ -1,9 +1,11 @@
import os
import unittest
from types import SimpleNamespace
from unittest.mock import patch
os.environ.setdefault("DEBUG", "false")
from app.core.settings import Settings
from app.services.ai.llm_service import (
INVALID_RECEIPT_WATERMARK_MESSAGE,
VALID_RECEIPT_WATERMARK_MARKER,
@ -82,6 +84,57 @@ class LLMServiceResponseParsingTests(unittest.TestCase):
self.assertEqual(payload, {"response": '{"ok": true}', "tool_call": None})
class LLMServiceRuntimeConfigurationTests(unittest.TestCase):
def setUp(self):
self._vertex_initialized = LLMService._vertex_initialized
self._models = dict(LLMService._models)
self._vertex_tools_cache = dict(LLMService._vertex_tools_cache)
LLMService._vertex_initialized = False
LLMService._models = {}
LLMService._vertex_tools_cache = {}
def tearDown(self):
LLMService._vertex_initialized = self._vertex_initialized
LLMService._models = self._models
LLMService._vertex_tools_cache = self._vertex_tools_cache
def test_constructor_prefers_explicit_atendimento_runtime_models(self):
runtime_settings = Settings(
google_project_id="test-project",
google_location="us-central1",
atendimento_model_name="gemini-atendimento",
atendimento_bundle_model_name="gemini-atendimento-bundle",
vertex_model_name="legacy-runtime",
vertex_bundle_model_name="legacy-bundle",
)
with patch("app.services.ai.llm_service.settings", runtime_settings), patch(
"app.services.ai.llm_service.vertexai.init"
) as vertex_init:
service = LLMService()
vertex_init.assert_called_once_with(project="test-project", location="us-central1")
self.assertEqual(service.model_names[0], "gemini-atendimento")
self.assertEqual(service.bundle_model_names[0], "gemini-atendimento-bundle")
self.assertIn("gemini-2.5-pro", service.model_names)
def test_constructor_falls_back_to_legacy_vertex_runtime_model_names(self):
runtime_settings = Settings(
google_project_id="test-project",
google_location="us-central1",
vertex_model_name="legacy-runtime",
vertex_bundle_model_name="legacy-bundle",
)
with patch("app.services.ai.llm_service.settings", runtime_settings), patch(
"app.services.ai.llm_service.vertexai.init"
):
service = LLMService()
self.assertEqual(service.model_names[0], "legacy-runtime")
self.assertEqual(service.bundle_model_names[0], "legacy-bundle")
class LLMServiceImageWorkflowPromptTests(unittest.TestCase):
def test_build_image_workflow_prompt_preserves_visible_payment_time(self):
service = LLMService.__new__(LLMService)
@ -122,6 +175,7 @@ class LLMServiceImageWorkflowPromptTests(unittest.TestCase):
"Registrar pagamento de aluguel: contrato LOC-20260319-33CD6567; valor R$ 379,80.",
)
class LLMServiceDispatchTests(unittest.IsolatedAsyncioTestCase):
async def test_generate_response_uses_generate_content_when_history_is_empty(self):
service = LLMService.__new__(LLMService)
@ -203,5 +257,4 @@ class LLMServiceDispatchTests(unittest.IsolatedAsyncioTestCase):
self.assertEqual(payload, {"response": "ok", "tool_call": None})
self.assertEqual(model.histories, [history])
self.assertEqual(model.chat.calls, [("teste", {})])
self.assertEqual(model.chat.calls, [("teste", {})])

@ -1,3 +1,4 @@
import json
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
@ -54,12 +55,26 @@ class BootstrapRuntimeTests(unittest.TestCase):
mock_create_all.assert_called_once()
def test_ensure_generated_tools_runtime_package_creates_package_files(self):
with TemporaryDirectory() as temp_dir:
with patch.object(bootstrap_module, "_PROJECT_ROOT", Path(temp_dir)):
import shutil
sandbox_root = Path.cwd() / ".tmp_test_runtime_bootstrap"
shutil.rmtree(sandbox_root, ignore_errors=True)
sandbox_root.mkdir(parents=True, exist_ok=True)
try:
with patch.object(bootstrap_module, "_PROJECT_ROOT", sandbox_root):
package_dir = bootstrap_module._ensure_generated_tools_runtime_package()
self.assertEqual(package_dir.name, "generated_tools")
self.assertTrue(package_dir.exists())
self.assertTrue((package_dir / "__init__.py").exists())
manifest_path = package_dir / "published_runtime_tools.json"
self.assertTrue(manifest_path.exists())
manifest = json.loads(manifest_path.read_text(encoding="utf-8"))
self.assertEqual(manifest["target_service"], "product")
self.assertEqual(manifest["publications"], [])
finally:
shutil.rmtree(sandbox_root, ignore_errors=True)
@patch.object(bootstrap_module, "seed_tools")
@patch.object(bootstrap_module, "seed_mock_data")
@ -128,3 +143,4 @@ class HttpStartupTests(unittest.IsolatedAsyncioTestCase):
if __name__ == "__main__":
unittest.main()

@ -1,4 +1,4 @@
import os
import os
import unittest
from types import SimpleNamespace
from unittest.mock import AsyncMock, patch

Loading…
Cancel
Save