Compare commits

...

2 Commits

@ -1,3 +1,5 @@
import threading
from fastapi import Depends, HTTPException, Request, status
from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
from sqlalchemy.orm import Session
@ -24,12 +26,18 @@ from admin_app.services import (
AuditService,
AuthService,
CollaboratorManagementService,
ToolGenerationService,
ToolGenerationWorkerService,
ToolManagementService,
)
from shared.contracts import AdminPermission, StaffRole, permissions_for_role, role_has_permission, role_includes
bearer_scheme = HTTPBearer(auto_error=False)
_tool_generation_worker_lock = threading.Lock()
_tool_generation_worker_service: ToolGenerationWorkerService | None = None
_tool_generation_worker_config: tuple[int, str, str, int, int, float] | None = None
def get_settings(request: Request) -> AdminSettings:
app_settings = getattr(request.app.state, "admin_settings", None)
@ -106,12 +114,49 @@ def get_collaborator_management_service(
)
def get_tool_generation_service(
settings: AdminSettings = Depends(get_settings),
) -> ToolGenerationService:
"""Instancia o serviço isolado de geração via LLM do runtime administrativo.
Separado completamente do LLMService do product (app.services.ai.llm_service).
Usa as settings admin_tool_generation_model / admin_tool_generation_fallback_model.
Mapeado ao tool_generation_runtime_profile do contrato model_runtime_separation.
"""
return ToolGenerationService(settings)
def get_tool_generation_worker_service(
settings: AdminSettings = Depends(get_settings),
) -> ToolGenerationWorkerService:
global _tool_generation_worker_service, _tool_generation_worker_config
config = (
int(settings.admin_tool_generation_worker_max_workers),
str(settings.admin_tool_generation_model),
str(settings.admin_tool_generation_fallback_model),
int(settings.admin_tool_generation_timeout_seconds),
int(settings.admin_tool_generation_max_output_tokens),
float(settings.admin_tool_generation_temperature),
)
with _tool_generation_worker_lock:
if _tool_generation_worker_service is None or _tool_generation_worker_config != config:
if _tool_generation_worker_service is not None:
_tool_generation_worker_service.shutdown(wait=False)
_tool_generation_worker_service = ToolGenerationWorkerService(settings)
_tool_generation_worker_config = config
return _tool_generation_worker_service
def get_tool_management_service(
settings: AdminSettings = Depends(get_settings),
draft_repository: ToolDraftRepository = Depends(get_tool_draft_repository),
version_repository: ToolVersionRepository = Depends(get_tool_version_repository),
metadata_repository: ToolMetadataRepository = Depends(get_tool_metadata_repository),
artifact_repository: ToolArtifactRepository = Depends(get_tool_artifact_repository),
tool_generation_service: ToolGenerationService = Depends(get_tool_generation_service),
tool_generation_worker_service: ToolGenerationWorkerService = Depends(get_tool_generation_worker_service),
) -> ToolManagementService:
return ToolManagementService(
settings=settings,
@ -119,6 +164,8 @@ def get_tool_management_service(
version_repository=version_repository,
metadata_repository=metadata_repository,
artifact_repository=artifact_repository,
tool_generation_service=tool_generation_service,
tool_generation_worker_service=tool_generation_worker_service,
)

@ -1,4 +1,4 @@
from fastapi import APIRouter, Depends, HTTPException, status
from fastapi import APIRouter, Depends, HTTPException, status
from admin_app.api.dependencies import (
get_settings,
@ -11,10 +11,13 @@ from admin_app.api.schemas import (
AdminToolDraftIntakeResponse,
AdminToolDraftListResponse,
AdminToolGenerationPipelineResponse,
AdminToolGovernanceDecisionRequest,
AdminToolGovernanceTransitionResponse,
AdminToolManagementActionResponse,
AdminToolOverviewResponse,
AdminToolPublicationListResponse,
AdminToolReviewDecisionRequest,
AdminToolReviewDetailResponse,
AdminToolReviewQueueResponse,
)
from admin_app.core import AdminSettings, AuthenticatedStaffPrincipal
@ -135,7 +138,7 @@ def panel_tool_pipeline_run(
),
):
try:
payload = service.run_generation_pipeline(
payload = service.run_generation_pipeline_in_worker(
version_id,
runner_staff_account_id=current_staff.id,
runner_name=current_staff.display_name,
@ -171,12 +174,32 @@ def panel_tool_review_queue(
)
@router.get(
"/review-queue/{version_id}",
response_model=AdminToolReviewDetailResponse,
)
def panel_tool_review_queue_detail(
version_id: str,
service: ToolManagementService = Depends(get_tool_management_service),
_current_staff: AuthenticatedStaffPrincipal = Depends(
require_panel_admin_permission(AdminPermission.REVIEW_TOOL_GENERATIONS)
),
):
try:
payload = service.build_review_detail_payload(version_id)
except LookupError as exc:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(exc)) from exc
return _build_review_detail_response(payload)
@router.post(
"/review-queue/{version_id}/review",
response_model=AdminToolGovernanceTransitionResponse,
)
def panel_tool_review_queue_review(
version_id: str,
decision: AdminToolReviewDecisionRequest,
service: ToolManagementService = Depends(get_tool_management_service),
current_staff: AuthenticatedStaffPrincipal = Depends(
require_panel_admin_permission(AdminPermission.REVIEW_TOOL_GENERATIONS)
@ -188,6 +211,8 @@ def panel_tool_review_queue_review(
reviewer_staff_account_id=current_staff.id,
reviewer_name=current_staff.display_name,
reviewer_role=current_staff.role,
decision_notes=decision.decision_notes,
reviewed_generated_code=decision.reviewed_generated_code,
)
except LookupError as exc:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(exc)) from exc
@ -205,6 +230,7 @@ def panel_tool_review_queue_review(
)
def panel_tool_review_queue_approve(
version_id: str,
decision: AdminToolReviewDecisionRequest,
service: ToolManagementService = Depends(get_tool_management_service),
current_staff: AuthenticatedStaffPrincipal = Depends(
require_panel_admin_permission(AdminPermission.REVIEW_TOOL_GENERATIONS)
@ -216,6 +242,7 @@ def panel_tool_review_queue_approve(
approver_staff_account_id=current_staff.id,
approver_name=current_staff.display_name,
approver_role=current_staff.role,
decision_notes=decision.decision_notes,
)
except LookupError as exc:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(exc)) from exc
@ -274,6 +301,66 @@ def panel_tool_publications_publish(
return _build_governance_transition_response(payload)
@router.post(
"/publications/{version_id}/deactivate",
response_model=AdminToolGovernanceTransitionResponse,
)
def panel_tool_publications_deactivate(
version_id: str,
decision: AdminToolGovernanceDecisionRequest,
service: ToolManagementService = Depends(get_tool_management_service),
current_staff: AuthenticatedStaffPrincipal = Depends(
require_panel_admin_permission(AdminPermission.PUBLISH_TOOLS)
),
):
try:
payload = service.deactivate_version(
version_id,
actor_staff_account_id=current_staff.id,
actor_name=current_staff.display_name,
actor_role=current_staff.role,
decision_notes=decision.decision_notes,
)
except LookupError as exc:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(exc)) from exc
except PermissionError as exc:
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=str(exc)) from exc
except ValueError as exc:
raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail=str(exc)) from exc
return _build_governance_transition_response(payload)
@router.post(
"/publications/{version_id}/rollback",
response_model=AdminToolGovernanceTransitionResponse,
)
def panel_tool_publications_rollback(
version_id: str,
decision: AdminToolGovernanceDecisionRequest,
service: ToolManagementService = Depends(get_tool_management_service),
current_staff: AuthenticatedStaffPrincipal = Depends(
require_panel_admin_permission(AdminPermission.PUBLISH_TOOLS)
),
):
try:
payload = service.rollback_version(
version_id,
actor_staff_account_id=current_staff.id,
actor_name=current_staff.display_name,
actor_role=current_staff.role,
decision_notes=decision.decision_notes,
)
except LookupError as exc:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(exc)) from exc
except PermissionError as exc:
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=str(exc)) from exc
except ValueError as exc:
raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail=str(exc)) from exc
return _build_governance_transition_response(payload)
def _build_pipeline_response(payload: dict) -> AdminToolGenerationPipelineResponse:
return AdminToolGenerationPipelineResponse(
@ -287,6 +374,7 @@ def _build_pipeline_response(payload: dict) -> AdminToolGenerationPipelineRespon
steps=payload["steps"],
queue_entry=payload["queue_entry"],
automated_validations=payload.get("automated_validations", []),
execution=payload.get("execution"),
next_steps=payload["next_steps"],
)
@ -305,6 +393,33 @@ def _build_governance_transition_response(payload: dict) -> AdminToolGovernanceT
)
def _build_review_detail_response(payload: dict) -> AdminToolReviewDetailResponse:
return AdminToolReviewDetailResponse(
service="orquestrador-admin",
version_id=payload["version_id"],
tool_name=payload["tool_name"],
display_name=payload["display_name"],
domain=payload["domain"],
version_number=payload["version_number"],
status=payload["status"],
summary=payload["summary"],
description=payload["description"],
business_goal=payload["business_goal"],
owner_name=payload["owner_name"],
parameters=payload["parameters"],
queue_entry=payload["queue_entry"],
automated_validations=payload["automated_validations"],
automated_validation_summary=payload["automated_validation_summary"],
generated_module=payload["generated_module"],
generated_callable=payload["generated_callable"],
generated_source_code=payload["generated_source_code"],
execution=payload.get("execution"),
human_gate=payload["human_gate"],
decision_history=payload["decision_history"],
next_steps=payload["next_steps"],
)
def _build_panel_actions(
settings: AdminSettings,
current_role: StaffRole | str | None = None,

@ -1,4 +1,4 @@
from fastapi import APIRouter, Depends, HTTPException, status
from fastapi import APIRouter, Depends, HTTPException, status
from admin_app.api.dependencies import (
get_settings,
@ -11,10 +11,13 @@ from admin_app.api.schemas import (
AdminToolDraftIntakeResponse,
AdminToolDraftListResponse,
AdminToolGenerationPipelineResponse,
AdminToolGovernanceDecisionRequest,
AdminToolGovernanceTransitionResponse,
AdminToolManagementActionResponse,
AdminToolOverviewResponse,
AdminToolPublicationListResponse,
AdminToolReviewDecisionRequest,
AdminToolReviewDetailResponse,
AdminToolReviewQueueResponse,
)
from admin_app.core import AdminSettings, AuthenticatedStaffPrincipal
@ -135,7 +138,7 @@ def tool_pipeline_run(
),
):
try:
payload = service.run_generation_pipeline(
payload = service.run_generation_pipeline_in_worker(
version_id,
runner_staff_account_id=current_staff.id,
runner_name=current_staff.display_name,
@ -171,12 +174,32 @@ def tool_review_queue(
)
@router.get(
"/review-queue/{version_id}",
response_model=AdminToolReviewDetailResponse,
)
def tool_review_queue_detail(
version_id: str,
service: ToolManagementService = Depends(get_tool_management_service),
_current_staff: AuthenticatedStaffPrincipal = Depends(
require_admin_permission(AdminPermission.REVIEW_TOOL_GENERATIONS)
),
):
try:
payload = service.build_review_detail_payload(version_id)
except LookupError as exc:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(exc)) from exc
return _build_review_detail_response(payload)
@router.post(
"/review-queue/{version_id}/review",
response_model=AdminToolGovernanceTransitionResponse,
)
def tool_review_queue_review(
version_id: str,
decision: AdminToolReviewDecisionRequest,
service: ToolManagementService = Depends(get_tool_management_service),
current_staff: AuthenticatedStaffPrincipal = Depends(
require_admin_permission(AdminPermission.REVIEW_TOOL_GENERATIONS)
@ -188,6 +211,8 @@ def tool_review_queue_review(
reviewer_staff_account_id=current_staff.id,
reviewer_name=current_staff.display_name,
reviewer_role=current_staff.role,
decision_notes=decision.decision_notes,
reviewed_generated_code=decision.reviewed_generated_code,
)
except LookupError as exc:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(exc)) from exc
@ -205,6 +230,7 @@ def tool_review_queue_review(
)
def tool_review_queue_approve(
version_id: str,
decision: AdminToolReviewDecisionRequest,
service: ToolManagementService = Depends(get_tool_management_service),
current_staff: AuthenticatedStaffPrincipal = Depends(
require_admin_permission(AdminPermission.REVIEW_TOOL_GENERATIONS)
@ -216,6 +242,7 @@ def tool_review_queue_approve(
approver_staff_account_id=current_staff.id,
approver_name=current_staff.display_name,
approver_role=current_staff.role,
decision_notes=decision.decision_notes,
)
except LookupError as exc:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(exc)) from exc
@ -274,6 +301,66 @@ def tool_publications_publish(
return _build_governance_transition_response(payload)
@router.post(
"/publications/{version_id}/deactivate",
response_model=AdminToolGovernanceTransitionResponse,
)
def tool_publications_deactivate(
version_id: str,
decision: AdminToolGovernanceDecisionRequest,
service: ToolManagementService = Depends(get_tool_management_service),
current_staff: AuthenticatedStaffPrincipal = Depends(
require_admin_permission(AdminPermission.PUBLISH_TOOLS)
),
):
try:
payload = service.deactivate_version(
version_id,
actor_staff_account_id=current_staff.id,
actor_name=current_staff.display_name,
actor_role=current_staff.role,
decision_notes=decision.decision_notes,
)
except LookupError as exc:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(exc)) from exc
except PermissionError as exc:
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=str(exc)) from exc
except ValueError as exc:
raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail=str(exc)) from exc
return _build_governance_transition_response(payload)
@router.post(
"/publications/{version_id}/rollback",
response_model=AdminToolGovernanceTransitionResponse,
)
def tool_publications_rollback(
version_id: str,
decision: AdminToolGovernanceDecisionRequest,
service: ToolManagementService = Depends(get_tool_management_service),
current_staff: AuthenticatedStaffPrincipal = Depends(
require_admin_permission(AdminPermission.PUBLISH_TOOLS)
),
):
try:
payload = service.rollback_version(
version_id,
actor_staff_account_id=current_staff.id,
actor_name=current_staff.display_name,
actor_role=current_staff.role,
decision_notes=decision.decision_notes,
)
except LookupError as exc:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(exc)) from exc
except PermissionError as exc:
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=str(exc)) from exc
except ValueError as exc:
raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail=str(exc)) from exc
return _build_governance_transition_response(payload)
def _build_pipeline_response(payload: dict) -> AdminToolGenerationPipelineResponse:
return AdminToolGenerationPipelineResponse(
@ -287,6 +374,7 @@ def _build_pipeline_response(payload: dict) -> AdminToolGenerationPipelineRespon
steps=payload["steps"],
queue_entry=payload["queue_entry"],
automated_validations=payload.get("automated_validations", []),
execution=payload.get("execution"),
next_steps=payload["next_steps"],
)
@ -305,6 +393,33 @@ def _build_governance_transition_response(payload: dict) -> AdminToolGovernanceT
)
def _build_review_detail_response(payload: dict) -> AdminToolReviewDetailResponse:
return AdminToolReviewDetailResponse(
service="orquestrador-admin",
version_id=payload["version_id"],
tool_name=payload["tool_name"],
display_name=payload["display_name"],
domain=payload["domain"],
version_number=payload["version_number"],
status=payload["status"],
summary=payload["summary"],
description=payload["description"],
business_goal=payload["business_goal"],
owner_name=payload["owner_name"],
parameters=payload["parameters"],
queue_entry=payload["queue_entry"],
automated_validations=payload["automated_validations"],
automated_validation_summary=payload["automated_validation_summary"],
generated_module=payload["generated_module"],
generated_callable=payload["generated_callable"],
generated_source_code=payload["generated_source_code"],
execution=payload.get("execution"),
human_gate=payload["human_gate"],
decision_history=payload["decision_history"],
next_steps=payload["next_steps"],
)
def _build_actions(
settings: AdminSettings,
current_role: StaffRole | str | None = None,

@ -1,4 +1,4 @@
from datetime import datetime
from datetime import datetime
from pydantic import BaseModel, Field, field_validator
@ -776,6 +776,76 @@ class AdminToolReviewQueueResponse(BaseModel):
supported_statuses: list[ToolLifecycleStatus]
class AdminToolReviewDecisionRequest(BaseModel):
decision_notes: str = Field(min_length=12, max_length=2000)
reviewed_generated_code: bool = False
@field_validator("decision_notes")
@classmethod
def normalize_decision_notes(cls, value: str) -> str:
return value.strip()
class AdminToolGovernanceDecisionRequest(BaseModel):
decision_notes: str = Field(min_length=12, max_length=2000)
@field_validator("decision_notes")
@classmethod
def normalize_decision_notes(cls, value: str) -> str:
return value.strip()
class AdminToolReviewHumanGateResponse(BaseModel):
current_gate: str
review_action_available: bool
approval_action_available: bool
publication_action_available: bool
deactivation_action_available: bool
rollback_action_available: bool
rollback_target_version_id: str | None = None
rollback_target_version_number: int | None = None
requires_decision_notes: bool
requires_code_review_confirmation: bool
class AdminToolReviewHistoryEntryResponse(BaseModel):
action_key: str
label: str
summary: str
previous_status: str | None = None
current_status: str | None = None
actor_name: str | None = None
actor_role: str | None = None
decision_notes: str | None = None
reviewed_generated_code: bool | None = None
recorded_at: datetime | None = None
class AdminToolReviewDetailResponse(BaseModel):
service: str
version_id: str
tool_name: str
display_name: str
domain: str
version_number: int = Field(ge=1)
status: ToolLifecycleStatus
summary: str
description: str
business_goal: str
owner_name: str | None = None
parameters: list["AdminToolPublicationParameterResponse"] = Field(default_factory=list)
queue_entry: AdminToolReviewQueueEntryResponse
automated_validations: list["AdminToolAutomatedValidationResponse"] = Field(default_factory=list)
automated_validation_summary: str | None = None
generated_module: str
generated_callable: str
generated_source_code: str
execution: AdminToolPipelineExecutionResponse | None = None
human_gate: AdminToolReviewHumanGateResponse
decision_history: list[AdminToolReviewHistoryEntryResponse] = Field(default_factory=list)
next_steps: list[str] = Field(default_factory=list)
class AdminToolPublicationParameterResponse(BaseModel):
name: str
parameter_type: ToolParameterType
@ -791,6 +861,7 @@ class AdminToolPublicationSummaryResponse(BaseModel):
domain: str
version: int
status: ToolLifecycleStatus
version_id: str | None = None
parameter_count: int
parameters: list[AdminToolPublicationParameterResponse] = Field(default_factory=list)
author_name: str | None = None
@ -798,6 +869,10 @@ class AdminToolPublicationSummaryResponse(BaseModel):
implementation_callable: str
published_by: str | None = None
published_at: datetime | None = None
deactivation_action_available: bool = False
rollback_action_available: bool = False
rollback_target_version_id: str | None = None
rollback_target_version_number: int | None = None
class AdminToolPublicationListResponse(BaseModel):
@ -834,6 +909,21 @@ class AdminToolPipelineStepResponse(BaseModel):
description: str
class AdminToolPipelineExecutionResponse(BaseModel):
mode: str
target: str
dispatch_state: str | None = None
worker_max_workers: int | None = None
worker_pending_jobs: int | None = None
queued_jobs_before_submit: int | None = None
submitted_at: str | None = None
started_at: str | None = None
completed_at: str | None = None
elapsed_ms: float | None = None
worker_thread_name: str | None = None
poll_after_ms: int | None = None
last_error: str | None = None
class AdminToolGenerationPipelineResponse(BaseModel):
service: str
message: str
@ -845,6 +935,7 @@ class AdminToolGenerationPipelineResponse(BaseModel):
steps: list[AdminToolPipelineStepResponse]
queue_entry: AdminToolReviewQueueEntryResponse
automated_validations: list[AdminToolAutomatedValidationResponse] = Field(default_factory=list)
execution: AdminToolPipelineExecutionResponse | None = None
next_steps: list[str]
@ -871,6 +962,7 @@ class AdminToolDraftIntakeRequest(BaseModel):
domain: str = Field(min_length=3, max_length=40)
description: str = Field(min_length=16, max_length=280)
business_goal: str = Field(min_length=12, max_length=280)
generation_model: str | None = Field(default=None, max_length=120)
parameters: list[AdminToolDraftIntakeParameterRequest] = Field(default_factory=list, max_length=10)
@field_validator("tool_name")
@ -888,6 +980,14 @@ class AdminToolDraftIntakeRequest(BaseModel):
def normalize_domain(cls, value: str) -> str:
return value.strip().lower()
@field_validator("generation_model", mode="before")
@classmethod
def normalize_generation_model(cls, value: str | None) -> str | None:
if value is None:
return None
normalized = value.strip()
return normalized or None
class AdminToolDraftSubmissionPolicyResponse(BaseModel):
mode: str
@ -920,6 +1020,7 @@ class AdminToolDraftIntakePreviewResponse(BaseModel):
version_count: int = Field(ge=1)
parameter_count: int
required_parameter_count: int
generation_model: str | None = None
requires_director_approval: bool
owner_name: str | None = None
parameters: list[AdminToolDraftIntakePreviewParameterResponse]

@ -43,6 +43,16 @@ class AdminSettings(BaseSettings):
admin_bootstrap_password: str | None = None
admin_bootstrap_role: str = "diretor"
# ---- Runtime de geraÃÆÃ†â€™Ãƒâ€šÃ§ÃÆÃ†â€™Ãƒâ€šÃ£o de tools (separado do runtime de atendimento) ----
# Mapeado ao tool_generation_runtime_profile do contrato shared/contracts/model_runtime_separation.py.
# Nunca compartilhar estes valores com o runtime de atendimento do product.
admin_tool_generation_model: str = "gemini-3-pro-preview"
admin_tool_generation_fallback_model: str = "gemini-2.5-pro"
admin_tool_generation_timeout_seconds: int = 120
admin_tool_generation_max_output_tokens: int = 8192
admin_tool_generation_temperature: float = 0.2
admin_tool_generation_worker_max_workers: int = 1
@field_validator("admin_debug", mode="before")
@classmethod
def parse_debug_aliases(cls, value):

@ -22,6 +22,18 @@ def _ensure_admin_schema_evolution() -> None:
statements.append("ALTER TABLE tool_drafts ADD COLUMN current_version_number INT NOT NULL DEFAULT 1")
if "version_count" not in tool_draft_columns:
statements.append("ALTER TABLE tool_drafts ADD COLUMN version_count INT NOT NULL DEFAULT 1")
if "generation_model" not in tool_draft_columns:
statements.append("ALTER TABLE tool_drafts ADD COLUMN generation_model VARCHAR(120)")
if statements:
with admin_engine.begin() as connection:
for statement in statements:
connection.execute(text(statement))
if "tool_versions" in table_names:
tool_version_columns = {column["name"] for column in inspector.get_columns("tool_versions")}
statements = []
if "generation_model" not in tool_version_columns:
statements.append("ALTER TABLE tool_versions ADD COLUMN generation_model VARCHAR(120)")
if statements:
with admin_engine.begin() as connection:
for statement in statements:

@ -21,6 +21,8 @@ class ToolArtifactKind(str, Enum):
DIRECTOR_REVIEW = "director_review"
DIRECTOR_APPROVAL = "director_approval"
PUBLICATION_RELEASE = "publication_release"
PUBLICATION_DEACTIVATION = "publication_deactivation"
PUBLICATION_ROLLBACK = "publication_rollback"
class ToolArtifactStorageKind(str, Enum):

@ -55,6 +55,7 @@ class ToolDraft(AdminTimestampedModel):
nullable=False,
default=True,
)
generation_model: Mapped[str | None] = mapped_column(String(120), nullable=True)
owner_staff_account_id: Mapped[int] = mapped_column(
Integer,
ForeignKey("staff_accounts.id"),

@ -44,6 +44,7 @@ class ToolVersion(AdminTimestampedModel):
nullable=False,
default=True,
)
generation_model: Mapped[str | None] = mapped_column(String(120), nullable=True)
owner_staff_account_id: Mapped[int] = mapped_column(
Integer,
ForeignKey("staff_accounts.id"),

@ -42,6 +42,7 @@ class ToolDraftRepository(BaseRepository):
version_count: int,
owner_staff_account_id: int,
owner_display_name: str,
generation_model: str | None = None,
requires_director_approval: bool = True,
commit: bool = True,
) -> ToolDraft:
@ -58,6 +59,7 @@ class ToolDraftRepository(BaseRepository):
required_parameter_count=required_parameter_count,
current_version_number=current_version_number,
version_count=version_count,
generation_model=generation_model,
requires_director_approval=requires_director_approval,
owner_staff_account_id=owner_staff_account_id,
owner_display_name=owner_display_name,
@ -85,6 +87,7 @@ class ToolDraftRepository(BaseRepository):
version_count: int,
owner_staff_account_id: int,
owner_display_name: str,
generation_model: str | None = None,
requires_director_approval: bool = True,
commit: bool = True,
) -> ToolDraft:
@ -98,6 +101,7 @@ class ToolDraftRepository(BaseRepository):
draft.required_parameter_count = required_parameter_count
draft.current_version_number = current_version_number
draft.version_count = version_count
draft.generation_model = generation_model
draft.requires_director_approval = requires_director_approval
draft.owner_staff_account_id = owner_staff_account_id
draft.owner_display_name = owner_display_name

@ -54,6 +54,7 @@ class ToolVersionRepository(BaseRepository):
required_parameter_count: int,
owner_staff_account_id: int,
owner_display_name: str,
generation_model: str | None = None,
status: ToolLifecycleStatus = ToolLifecycleStatus.DRAFT,
requires_director_approval: bool = True,
commit: bool = True,
@ -69,6 +70,7 @@ class ToolVersionRepository(BaseRepository):
business_goal=business_goal,
parameters_json=parameters_json,
required_parameter_count=required_parameter_count,
generation_model=generation_model,
requires_director_approval=requires_director_approval,
owner_staff_account_id=owner_staff_account_id,
owner_display_name=owner_display_name,

@ -7,6 +7,8 @@ from admin_app.services.auth_service import AuthService
from admin_app.services.collaborator_management_service import CollaboratorManagementService
from admin_app.services.report_service import ReportService
from admin_app.services.system_service import SystemService
from admin_app.services.tool_generation_service import ToolGenerationService
from admin_app.services.tool_generation_worker_service import ToolGenerationWorkerService
from admin_app.services.tool_management_service import ToolManagementService
__all__ = [
@ -17,5 +19,7 @@ __all__ = [
"CollaboratorManagementService",
"ReportService",
"SystemService",
"ToolGenerationService",
"ToolGenerationWorkerService",
"ToolManagementService",
]

@ -0,0 +1,444 @@
"""Serviço isolado de geração de tools via LLM para o runtime administrativo.
Este módulo é a única camada do admin_app que conversa com o Vertex AI para fins
de geração de código. Ele é completamente separado do LLMService do product
(app.services.ai.llm_service) e usa configurações próprias do AdminSettings.
Separação arquitetural garantida por:
- shared.contracts.model_runtime_separation.ModelRuntimeTarget.TOOL_GENERATION
- config keys: admin_tool_generation_model / admin_tool_generation_fallback_model
- Nenhuma importação de app.* é permitida neste módulo.
"""
from __future__ import annotations
import logging
import re
from time import perf_counter
from typing import Any
import vertexai
from google.api_core.exceptions import GoogleAPIError, NotFound
from vertexai.generative_models import GenerationConfig, GenerativeModel
from admin_app.core.settings import AdminSettings
logger = logging.getLogger(__name__)
# ---- Constantes de geração ---------------------------------------------------
_PYTHON_BLOCK_RE = re.compile(
r"```python\s*\n(.*?)```",
re.DOTALL | re.IGNORECASE,
)
# Padrões que o código gerado não pode conter.
# Aplicados antes das validações automáticas existentes no ToolManagementService.
_DANGEROUS_PATTERNS: tuple[tuple[str, str], ...] = (
(r"\bexec\s*\(", "uso de exec() proibido em tools geradas"),
(r"\beval\s*\(", "uso de eval() proibido em tools geradas"),
(r"\b__import__\s*\(", "uso de __import__() proibido em tools geradas"),
(r"os\.system\s*\(", "chamada a os.system() proibida em tools geradas"),
(r"os\.popen\s*\(", "chamada a os.popen() proibida em tools geradas"),
(r"\bsubprocess\b", "uso de subprocess proibido em tools geradas"),
(r"from\s+app\.", "importação de app.* proibida em tools geradas"),
(r"from\s+admin_app\.", "importação de admin_app.* proibida em tools geradas"),
(r"import\s+app\b", "importação direta de app proibida em tools geradas"),
(r"import\s+admin_app\b", "importação direta de admin_app proibida em tools geradas"),
(r"\bopen\s*\(", "acesso a sistema de arquivos via open() proibido em tools geradas"),
(r"__builtins__", "acesso a __builtins__ proibido em tools geradas"),
)
# Mapeamento de tipo de parâmetro para anotação Python legível
_TYPE_ANNOTATION_MAP: dict[str, str] = {
"string": "str",
"integer": "int",
"number": "float",
"boolean": "bool",
"object": "dict",
"array": "list",
}
# Cache de modelos Vertex AI instanciados (por nome de modelo)
_MODEL_CACHE: dict[str, GenerativeModel] = {}
# Flag de controle de inicialização do SDK (evita reinit por instância)
_VERTEX_INITIALIZED: bool = False
class ToolGenerationService:
"""Gera implementações de tools via Vertex AI no contexto administrativo.
Responsabilidades:
- Construir prompt estruturado a partir dos metadados da tool
- Chamar o modelo LLM de geração (separado do modelo de atendimento)
- Extrair o bloco de código Python da resposta
- Aplicar linting de segurança antes de devolver o código
- Retornar resultado estruturado para o ToolManagementService
Não faz:
- Não persiste artefatos (responsabilidade do ToolManagementService)
- Não valida contrato nem assinatura (responsabilidade do ToolManagementService)
- Não executa o código gerado
"""
def __init__(self, settings: AdminSettings) -> None:
self.settings = settings
self._ensure_vertex_initialized()
def _ensure_vertex_initialized(self) -> None:
global _VERTEX_INITIALIZED
if _VERTEX_INITIALIZED:
return
# Reutiliza as credenciais do projeto Google já configuradas nas settings
# do admin (que leem do .env, idêntico ao product). O isolamento é nos
# parâmetros de modelo e temperatura — não na conta GCP.
try:
import os
project_id = os.environ.get("GOOGLE_PROJECT_ID", "")
location = os.environ.get("GOOGLE_LOCATION", "us-central1")
vertexai.init(project=project_id, location=location)
_VERTEX_INITIALIZED = True
logger.info(
"tool_generation_service_event=vertex_initialized project=%s location=%s",
project_id,
location,
)
except Exception as exc:
logger.warning(
"tool_generation_service_event=vertex_init_warning error=%s",
exc,
)
def _get_model(self, model_name: str) -> GenerativeModel:
model = _MODEL_CACHE.get(model_name)
if model is None:
model = GenerativeModel(model_name)
_MODEL_CACHE[model_name] = model
return model
def _build_model_sequence(self, preferred_model: str | None) -> list[str]:
"""Constrói a sequência de modelos a tentar, respeitando o preferred e o fallback."""
sequence: list[str] = []
candidates = [
preferred_model,
self.settings.admin_tool_generation_model,
self.settings.admin_tool_generation_fallback_model,
]
for candidate in candidates:
normalized = str(candidate or "").strip()
if normalized and normalized not in sequence:
sequence.append(normalized)
return sequence
def _build_generation_prompt(
self,
*,
tool_name: str,
display_name: str,
domain: str,
description: str,
business_goal: str,
parameters: list[dict],
) -> str:
"""Monta o prompt estruturado de geração enviado ao modelo.
O prompt descreve o contrato esperado, os restrições de importação,
os parâmetros e o objetivo operacional da tool.
"""
signature_parts: list[str] = []
parameter_lines: list[str] = []
for param in parameters:
name = str(param.get("name") or "").strip().lower()
if not name:
continue
param_type = str(param.get("parameter_type") or "string").strip().lower()
description_param = str(param.get("description") or "").strip()
required = bool(param.get("required", True))
annotation = _TYPE_ANNOTATION_MAP.get(param_type, "str")
if required:
signature_parts.append(f"{name}: {annotation}")
else:
signature_parts.append(f"{name}: {annotation} | None = None")
required_label = "obrigatório" if required else "opcional"
parameter_lines.append(
f" - {name} ({annotation}, {required_label}): {description_param}"
)
signature = ", ".join(signature_parts)
if signature:
full_signature = f"async def run(*, {signature}) -> dict:"
else:
full_signature = "async def run() -> dict:"
parameters_block = (
"\n".join(parameter_lines)
if parameter_lines
else " (nenhum parâmetro — a tool não recebe entrada contextual)"
)
domain_context_map = {
"vendas": (
"O bot atua em um sistema de atendimento para concessionária automotiva. "
"A tool opera no domínio de vendas: estoque de veículos, negociações, pedidos e cancelamentos."
),
"revisao": (
"O bot atua em um sistema de atendimento de oficina automotiva. "
"A tool opera no domínio de revisão: agendamentos, remarcações, listagem de serviços."
),
"locacao": (
"O bot atua em um sistema de atendimento de locadora de veículos. "
"A tool opera no domínio de locação: frota, contratos, pagamentos e devoluções."
),
"orquestracao": (
"O bot atua em um sistema de orquestração conversacional. "
"A tool opera no domínio de orquestração: controla fluxo, contexto e estado da conversa."
),
}
domain_context = domain_context_map.get(
str(domain or "").strip().lower(),
"O bot atua em um sistema de atendimento automatizado.",
)
return (
"Você é um especialista em Python que gera implementações realistas de tools "
"para um bot de atendimento.\n\n"
f"CONTEXTO DO DOMÍNIO:\n{domain_context}\n\n"
"CONTRATO OBRIGATÓRIO:\n"
"- A função deve ser assíncrona: async def run(...)\n"
"- Todos os parâmetros devem ser keyword-only (após *)\n"
"- O tipo de retorno deve ser dict (JSON-serializável)\n"
"- O módulo pode importar apenas stdlib (datetime, json, re, math, uuid, etc.)\n"
"- Proibido importar: app.*, admin_app.*, subprocess, os.system, os.popen\n"
"- Proibido usar: exec(), eval(), __import__(), open()\n\n"
"TOOL A IMPLEMENTAR:\n"
f"- Nome técnico: {tool_name}\n"
f"- Nome de exibição: {display_name}\n"
f"- Domínio: {domain}\n"
f"- Descrição funcional: {description}\n"
f"- Objetivo de negócio: {business_goal}\n\n"
f"PARÂMETROS DA TOOL:\n{parameters_block}\n\n"
f"ASSINATURA ESPERADA:\n{full_signature}\n\n"
"INSTRUÇÕES DE GERAÇÃO:\n"
"- Gere uma implementação realista que simule o comportamento esperado da tool.\n"
"- O retorno deve incluir os campos relevantes ao domínio (não apenas echo dos argumentos).\n"
"- Use dados fictícios mas verossímeis para simular a resposta operacional.\n"
"- Nenhuma explicação ou comentário fora do código. Retorne apenas o bloco Python.\n"
"- O módulo deve começar com um docstring descritivo.\n"
"- Envolva o código em ```python ... ```.\n"
)
def _extract_python_block(self, raw_response: str) -> str | None:
"""Extrai o primeiro bloco ```python ... ``` da resposta do modelo."""
normalized = str(raw_response or "").strip()
match = _PYTHON_BLOCK_RE.search(normalized)
if match:
return match.group(1).strip()
# Fallback: se não há marcador de código mas o conteúdo parece Python
if normalized.startswith("async def run") or normalized.startswith('"""'):
return normalized
return None
def _apply_safety_linting(self, source_code: str) -> list[str]:
"""Verifica padrões perigosos no código gerado antes da validação formal.
Retorna lista de issues. Lista vazia = linting passou.
"""
issues: list[str] = []
for pattern, description in _DANGEROUS_PATTERNS:
if re.search(pattern, source_code, re.MULTILINE):
issues.append(f"linting: {description}.")
return issues
async def generate_tool_source(
self,
*,
tool_name: str,
display_name: str,
domain: str,
description: str,
business_goal: str,
parameters: list[dict],
preferred_model: str | None = None,
) -> dict[str, Any]:
"""Gera o código Python da tool a partir dos metadados do draft.
Retorna um dicionário com:
- passed (bool): True se o código foi gerado e passou no linting
- generated_source_code (str | None): código Python gerado
- generation_model_used (str | None): modelo que gerou o código
- prompt_rendered (str): prompt enviado ao modelo (para auditoria)
- issues (list[str]): problemas encontrados (geração ou linting)
- elapsed_ms (float): tempo total de geração em milissegundos
"""
prompt = self._build_generation_prompt(
tool_name=tool_name,
display_name=display_name,
domain=domain,
description=description,
business_goal=business_goal,
parameters=parameters,
)
model_sequence = self._build_model_sequence(preferred_model)
generation_config = GenerationConfig(
temperature=self.settings.admin_tool_generation_temperature,
max_output_tokens=self.settings.admin_tool_generation_max_output_tokens,
)
raw_response: str | None = None
generation_model_used: str | None = None
last_error: Exception | None = None
started_at = perf_counter()
import asyncio
for model_name in model_sequence:
try:
model = self._get_model(model_name)
response = await asyncio.wait_for(
asyncio.to_thread(
model.generate_content,
prompt,
generation_config=generation_config,
),
timeout=float(self.settings.admin_tool_generation_timeout_seconds),
)
candidate = (
response.candidates[0]
if getattr(response, "candidates", None)
else None
)
content = getattr(candidate, "content", None)
parts = list(getattr(content, "parts", None) or [])
text_parts = [
getattr(part, "text", None)
for part in parts
if isinstance(getattr(part, "text", None), str)
]
raw_response = "\n".join(
t for t in text_parts if t and t.strip()
).strip() or None
if raw_response is None:
# Fallback para o atributo .text raiz
try:
raw_response = str(response.text or "").strip() or None
except (AttributeError, ValueError):
raw_response = None
generation_model_used = model_name
break
except asyncio.TimeoutError:
last_error = TimeoutError(
f"modelo '{model_name}' excedeu o timeout de "
f"{self.settings.admin_tool_generation_timeout_seconds}s para geração de tools."
)
logger.warning(
"tool_generation_service_event=timeout model=%s timeout_seconds=%s",
model_name,
self.settings.admin_tool_generation_timeout_seconds,
)
continue
except NotFound as exc:
last_error = exc
_MODEL_CACHE.pop(model_name, None)
logger.warning(
"tool_generation_service_event=model_not_found model=%s error=%s",
model_name,
exc,
)
continue
except GoogleAPIError as exc:
last_error = exc
logger.warning(
"tool_generation_service_event=api_error model=%s error=%s",
model_name,
exc,
)
continue
except Exception as exc:
last_error = exc
logger.warning(
"tool_generation_service_event=unexpected_error model=%s error=%s class=%s",
model_name,
exc,
exc.__class__.__name__,
)
continue
elapsed_ms = round((perf_counter() - started_at) * 1000, 2)
if raw_response is None or generation_model_used is None:
error_detail = str(last_error) if last_error else "nenhum modelo disponivel respondeu"
logger.error(
"tool_generation_service_event=generation_failed tool_name=%s elapsed_ms=%s error=%s",
tool_name,
elapsed_ms,
error_detail,
)
return {
"passed": False,
"generated_source_code": None,
"generation_model_used": None,
"prompt_rendered": prompt,
"issues": [f"falha na geração via LLM: {error_detail}"],
"elapsed_ms": elapsed_ms,
}
generated_source_code = self._extract_python_block(raw_response)
if generated_source_code is None:
logger.warning(
"tool_generation_service_event=no_code_block tool_name=%s model=%s elapsed_ms=%s",
tool_name,
generation_model_used,
elapsed_ms,
)
return {
"passed": False,
"generated_source_code": None,
"generation_model_used": generation_model_used,
"prompt_rendered": prompt,
"issues": ["o modelo não retornou um bloco de código Python identificável."],
"elapsed_ms": elapsed_ms,
}
linting_issues = self._apply_safety_linting(generated_source_code)
if linting_issues:
logger.warning(
"tool_generation_service_event=linting_failed tool_name=%s model=%s issues=%s elapsed_ms=%s",
tool_name,
generation_model_used,
linting_issues,
elapsed_ms,
)
return {
"passed": False,
"generated_source_code": generated_source_code,
"generation_model_used": generation_model_used,
"prompt_rendered": prompt,
"issues": linting_issues,
"elapsed_ms": elapsed_ms,
}
logger.info(
"tool_generation_service_event=generation_succeeded tool_name=%s model=%s elapsed_ms=%s",
tool_name,
generation_model_used,
elapsed_ms,
)
return {
"passed": True,
"generated_source_code": generated_source_code,
"generation_model_used": generation_model_used,
"prompt_rendered": prompt,
"issues": [],
"elapsed_ms": elapsed_ms,
}

@ -0,0 +1,266 @@
from __future__ import annotations
import threading
from concurrent.futures import ThreadPoolExecutor
from datetime import UTC, datetime
from time import perf_counter
from typing import Any
from admin_app.core.settings import AdminSettings
from admin_app.db.database import AdminSessionLocal
from admin_app.repositories import (
ToolArtifactRepository,
ToolDraftRepository,
ToolMetadataRepository,
ToolVersionRepository,
)
from admin_app.services.tool_generation_service import ToolGenerationService
class ToolGenerationWorkerService:
"""Executa a pipeline de geracao em um worker dedicado do runtime admin.
O worker abre a propria sessao administrativa e cria uma instancia isolada do
ToolManagementService dentro da thread dedicada. Assim, a geracao e as
validacoes nao compartilham a sessao SQLAlchemy da request web nem o pool de
threads padrao usado pelas rotas sync do FastAPI.
"""
_THREAD_NAME_PREFIX = "admin-tool-generation-worker"
_DEFAULT_POLL_AFTER_MS = 1200
def __init__(self, settings: AdminSettings) -> None:
self.settings = settings
self.max_workers = max(1, int(settings.admin_tool_generation_worker_max_workers))
self._executor = ThreadPoolExecutor(
max_workers=self.max_workers,
thread_name_prefix=self._THREAD_NAME_PREFIX,
)
self._lock = threading.Lock()
self._pending_jobs = 0
self._jobs: dict[str, dict[str, Any]] = {}
def shutdown(self, *, wait: bool = False) -> None:
self._executor.shutdown(wait=wait, cancel_futures=True)
def execute_generation_pipeline(
self,
*,
version_id: str,
runner_staff_account_id: int,
runner_name: str,
runner_role,
) -> dict[str, Any]:
submitted_at = datetime.now(UTC).isoformat()
with self._lock:
self._pending_jobs += 1
queued_jobs_before_submit = max(self._pending_jobs - 1, 0)
started_at = perf_counter()
future = self._executor.submit(
self._run_generation_pipeline_job,
version_id,
runner_staff_account_id,
runner_name,
runner_role,
)
try:
payload = future.result()
finally:
with self._lock:
self._pending_jobs = max(self._pending_jobs - 1, 0)
pending_jobs_after_completion = self._pending_jobs
execution = {
"mode": "dedicated_generation_worker",
"target": "admin_tool_generation_worker",
"dispatch_state": "completed",
"worker_max_workers": self.max_workers,
"worker_pending_jobs": pending_jobs_after_completion,
"queued_jobs_before_submit": queued_jobs_before_submit,
"submitted_at": submitted_at,
"started_at": submitted_at,
"completed_at": datetime.now(UTC).isoformat(),
"elapsed_ms": round((perf_counter() - started_at) * 1000, 2),
"worker_thread_name": str(payload.pop("_worker_thread_name", "")) or None,
"poll_after_ms": None,
"last_error": None,
}
enriched_payload = dict(payload)
enriched_payload["execution"] = execution
return enriched_payload
def dispatch_generation_pipeline(
self,
*,
version_id: str,
runner_staff_account_id: int,
runner_name: str,
runner_role,
) -> dict[str, Any]:
normalized_version_id = str(version_id or "").strip().lower()
if not normalized_version_id:
raise ValueError("Versao administrativa invalida para o worker de geracao.")
with self._lock:
existing_job = self._jobs.get(normalized_version_id)
if existing_job is not None and existing_job.get("dispatch_state") in {"queued", "running"}:
return self._build_dispatch_snapshot_locked(existing_job)
self._pending_jobs += 1
queued_jobs_before_submit = max(self._pending_jobs - 1, 0)
job = {
"version_id": normalized_version_id,
"dispatch_state": "queued",
"queued_jobs_before_submit": queued_jobs_before_submit,
"submitted_at": datetime.now(UTC).isoformat(),
"started_at": None,
"completed_at": None,
"elapsed_ms": None,
"worker_thread_name": None,
"last_error": None,
"result_payload": None,
}
self._jobs[normalized_version_id] = job
self._executor.submit(
self._run_generation_pipeline_job_async,
normalized_version_id,
runner_staff_account_id,
runner_name,
runner_role,
)
return self._build_dispatch_snapshot_locked(job)
def get_generation_pipeline_dispatch(self, version_id: str) -> dict[str, Any] | None:
normalized_version_id = str(version_id or "").strip().lower()
if not normalized_version_id:
return None
with self._lock:
job = self._jobs.get(normalized_version_id)
if job is None:
return None
return self._build_dispatch_snapshot_locked(job)
def _run_generation_pipeline_job_async(
self,
version_id: str,
runner_staff_account_id: int,
runner_name: str,
runner_role,
) -> None:
self._mark_job_running(version_id)
try:
payload = self._run_generation_pipeline_job(
version_id,
runner_staff_account_id,
runner_name,
runner_role,
)
except Exception as exc:
self._mark_job_failed(version_id, exc)
return
self._mark_job_completed(version_id, payload)
def _mark_job_running(self, version_id: str) -> None:
with self._lock:
job = self._jobs.get(version_id)
if job is None:
return
job["dispatch_state"] = "running"
job["started_at"] = datetime.now(UTC).isoformat()
job["worker_thread_name"] = threading.current_thread().name
def _mark_job_completed(self, version_id: str, payload: dict[str, Any]) -> None:
with self._lock:
job = self._jobs.get(version_id)
if job is None:
return
completed_at = datetime.now(UTC).isoformat()
started_reference = self._parse_job_timestamp(job.get("started_at")) or self._parse_job_timestamp(job.get("submitted_at"))
elapsed_ms = None
if started_reference is not None:
elapsed_ms = round((datetime.now(UTC) - started_reference).total_seconds() * 1000, 2)
job["dispatch_state"] = "completed"
job["completed_at"] = completed_at
job["elapsed_ms"] = elapsed_ms
job["result_payload"] = dict(payload)
job["last_error"] = None
self._pending_jobs = max(self._pending_jobs - 1, 0)
def _mark_job_failed(self, version_id: str, exc: Exception) -> None:
with self._lock:
job = self._jobs.get(version_id)
if job is None:
return
completed_at = datetime.now(UTC).isoformat()
started_reference = self._parse_job_timestamp(job.get("started_at")) or self._parse_job_timestamp(job.get("submitted_at"))
elapsed_ms = None
if started_reference is not None:
elapsed_ms = round((datetime.now(UTC) - started_reference).total_seconds() * 1000, 2)
job["dispatch_state"] = "failed"
job["completed_at"] = completed_at
job["elapsed_ms"] = elapsed_ms
job["last_error"] = f"{type(exc).__name__}: {exc}"
self._pending_jobs = max(self._pending_jobs - 1, 0)
def _build_dispatch_snapshot_locked(self, job: dict[str, Any]) -> dict[str, Any]:
dispatch_state = str(job.get("dispatch_state") or "queued")
snapshot = {
"mode": "dedicated_generation_worker_async",
"target": "admin_tool_generation_worker",
"dispatch_state": dispatch_state,
"worker_max_workers": self.max_workers,
"worker_pending_jobs": self._pending_jobs,
"queued_jobs_before_submit": job.get("queued_jobs_before_submit", 0),
"submitted_at": job.get("submitted_at"),
"started_at": job.get("started_at"),
"completed_at": job.get("completed_at"),
"elapsed_ms": job.get("elapsed_ms"),
"worker_thread_name": job.get("worker_thread_name"),
"poll_after_ms": self._DEFAULT_POLL_AFTER_MS if dispatch_state in {"queued", "running"} else None,
"last_error": job.get("last_error"),
}
result_payload = job.get("result_payload")
if isinstance(result_payload, dict):
snapshot["result_payload"] = dict(result_payload)
return snapshot
@staticmethod
def _parse_job_timestamp(value: Any) -> datetime | None:
if not isinstance(value, str) or not value.strip():
return None
try:
return datetime.fromisoformat(value)
except ValueError:
return None
def _run_generation_pipeline_job(
self,
version_id: str,
runner_staff_account_id: int,
runner_name: str,
runner_role,
) -> dict[str, Any]:
from admin_app.services.tool_management_service import ToolManagementService
db = AdminSessionLocal()
try:
service = ToolManagementService(
settings=self.settings,
draft_repository=ToolDraftRepository(db),
version_repository=ToolVersionRepository(db),
metadata_repository=ToolMetadataRepository(db),
artifact_repository=ToolArtifactRepository(db),
tool_generation_service=ToolGenerationService(self.settings),
)
payload = service.run_generation_pipeline(
version_id,
runner_staff_account_id=runner_staff_account_id,
runner_name=runner_name,
runner_role=runner_role,
)
payload = dict(payload)
payload["_worker_thread_name"] = threading.current_thread().name
return payload
finally:
db.close()

File diff suppressed because it is too large Load Diff

@ -672,6 +672,88 @@ def render_tool_review_page(
</div>
</div>
<div class="card border-0 shadow-sm admin-surface-card mt-4 mb-4">
<div class="card-body p-4">
<div class="d-flex flex-wrap justify-content-between align-items-start gap-3 mb-3">
<div>
<p class="text-uppercase small fw-semibold text-secondary mb-2">Detalhe da versao</p>
<h3 class="h3 fw-semibold mb-2">Revisao humana antes da ativacao</h3>
<p class="text-secondary mb-0">Selecione uma versao da fila para validar o contrato, inspecionar o codigo completo gerado e registrar a decisao da diretoria.</p>
</div>
<span class="badge rounded-pill bg-body-tertiary text-secondary border" data-tool-review-detail-status>Nenhum item</span>
</div>
<div class="row g-4">
<div class="col-12 col-xxl-5">
<div class="d-flex flex-column gap-3">
<div class="admin-tool-review-note p-4" data-tool-review-detail-summary>
<div class="fw-semibold mb-2" data-tool-review-detail-title>Selecione um item da fila</div>
<p class="text-secondary mb-0">O detalhe da versao aparece aqui junto com o resumo funcional e o gate humano atual.</p>
</div>
<div>
<p class="text-uppercase small fw-semibold text-secondary mb-2">Contexto e parametros</p>
<div class="vstack gap-2" data-tool-review-detail-meta>
<div class="admin-tool-inline-note rounded-4 p-3 small text-secondary">Nenhuma versao selecionada.</div>
</div>
</div>
<div>
<p class="text-uppercase small fw-semibold text-secondary mb-2">Validacoes automaticas</p>
<div class="vstack gap-2" data-tool-review-validation-list>
<div class="admin-tool-inline-note rounded-4 p-3 small text-secondary">As validacoes da pipeline aparecem aqui.</div>
</div>
</div>
<div>
<p class="text-uppercase small fw-semibold text-secondary mb-2">Historico da diretoria</p>
<div class="vstack gap-2" data-tool-review-history-list>
<div class="admin-tool-inline-note rounded-4 p-3 small text-secondary">Nenhuma decisao humana registrada ainda.</div>
</div>
</div>
<div>
<p class="text-uppercase small fw-semibold text-secondary mb-2">Proximos passos</p>
<div class="vstack gap-2" data-tool-review-next-steps>
<div class="admin-tool-inline-note rounded-4 p-3 small text-secondary">Os proximos passos da versao aparecem aqui.</div>
</div>
</div>
</div>
</div>
<div class="col-12 col-xxl-7">
<div class="d-flex flex-column gap-3 h-100">
<div>
<label class="form-label fw-semibold" for="admin-tool-review-generated-code">Codigo completo da funcao gerada</label>
<textarea class="form-control rounded-4 font-monospace" id="admin-tool-review-generated-code" rows="22" readonly data-tool-review-code>O codigo gerado pela pipeline aparecera aqui assim que uma versao for selecionada.</textarea>
<div class="form-text">Use este campo para revisar a implementacao completa antes de validar, aprovar e ativar a nova tool.</div>
</div>
<div>
<label class="form-label fw-semibold" for="admin-tool-review-decision-notes">Parecer da diretoria</label>
<textarea class="form-control rounded-4" id="admin-tool-review-decision-notes" rows="5" placeholder="Registre o racional da revisao ou da aprovacao humana." data-tool-review-decision-notes></textarea>
<div class="form-text" data-tool-review-decision-hint>As notas da decisao ficam persistidas na trilha administrativa da versao.</div>
</div>
<div class="form-check admin-tool-inline-note rounded-4 p-3">
<input class="form-check-input" type="checkbox" value="1" id="admin-tool-review-code-check" data-tool-review-reviewed-code>
<label class="form-check-label small text-secondary" for="admin-tool-review-code-check">
Confirmo que revisei o codigo completo gerado antes de validar esta versao.
</label>
</div>
<div class="d-flex flex-wrap gap-2" data-tool-review-actions>
<button class="btn btn-outline-dark rounded-pill" type="button" data-tool-review-action="review" disabled>Registrar revisao</button>
<button class="btn btn-dark rounded-pill" type="button" data-tool-review-action="approve" disabled>Aprovar versao</button>
<button class="btn btn-success rounded-pill" type="button" data-tool-review-action="publish" disabled>Publicar no catalogo</button>
<button class="btn btn-outline-danger rounded-pill" type="button" data-tool-review-action="deactivate" disabled>Desativar versao</button>
<button class="btn btn-warning rounded-pill" type="button" data-tool-review-action="rollback" disabled>Executar rollback</button>
</div>
</div>
</div>
</div>
</div>
</div>
<div class="card border-0 shadow-sm admin-surface-card mt-4">
<div class="card-body p-4">
<div class="d-flex flex-wrap justify-content-between align-items-start gap-3 mb-3">

@ -762,7 +762,7 @@ def _build_tool_review_view(request: Request, settings: AdminSettings) -> AdminT
review_notes=(
"Conferir se o gate do item combina com o estado esperado do lifecycle.",
"Observar se a descricao e o objetivo operacional da tool estao claros para o time.",
"Usar o catalogo ativo como comparativo antes de promover uma nova versao.",
"Ler o codigo completo gerado antes de validar manualmente a versao.",
),
approval_notes=(
"Verificar nome, descricao e semantica dos parametros antes da aprovacao.",

@ -115,23 +115,96 @@ function mountToolReviewBoard(board) {
const publicationList = board.querySelector("[data-tool-publication-list]");
const lifecycleList = board.querySelector("[data-tool-contract-lifecycle]");
const parameterTypes = board.querySelector("[data-tool-parameter-types]");
const detailStatus = board.querySelector("[data-tool-review-detail-status]");
const detailSummary = board.querySelector("[data-tool-review-detail-summary]");
const detailTitle = board.querySelector("[data-tool-review-detail-title]");
const detailMeta = board.querySelector("[data-tool-review-detail-meta]");
const validationList = board.querySelector("[data-tool-review-validation-list]");
const historyList = board.querySelector("[data-tool-review-history-list]");
const nextStepsList = board.querySelector("[data-tool-review-next-steps]");
const codeField = board.querySelector("[data-tool-review-code]");
const decisionNotes = board.querySelector("[data-tool-review-decision-notes]");
const decisionHint = board.querySelector("[data-tool-review-decision-hint]");
const reviewedGeneratedCode = board.querySelector("[data-tool-review-reviewed-code]");
const reviewButton = board.querySelector('[data-tool-review-action="review"]');
const approveButton = board.querySelector('[data-tool-review-action="approve"]');
const publishButton = board.querySelector('[data-tool-review-action="publish"]');
const deactivateButton = board.querySelector('[data-tool-review-action="deactivate"]');
const rollbackButton = board.querySelector('[data-tool-review-action="rollback"]');
let selectedVersionId = "";
let lastRenderedHumanGate = null;
let lastRenderedHasSourceCode = false;
if (refreshButton) {
refreshButton.addEventListener("click", () => {
void loadBoard();
void loadBoard(selectedVersionId);
});
}
if (queueList) {
queueList.addEventListener("click", (event) => {
const target = event.target;
if (!(target instanceof HTMLElement)) {
return;
}
const trigger = target.closest("[data-tool-review-select]");
if (!(trigger instanceof HTMLElement)) {
return;
}
const nextVersionId = String(trigger.dataset.versionId || "").trim();
if (!nextVersionId) {
return;
}
void loadReviewDetail(nextVersionId);
});
}
if (publicationList) {
publicationList.addEventListener("click", (event) => {
const target = event.target;
if (!(target instanceof HTMLElement)) {
return;
}
const trigger = target.closest("[data-tool-publication-select]");
if (!(trigger instanceof HTMLElement)) {
return;
}
const nextVersionId = String(trigger.dataset.versionId || "").trim();
if (!nextVersionId) {
return;
}
void loadReviewDetail(nextVersionId);
});
}
[reviewButton, approveButton, publishButton, deactivateButton, rollbackButton].forEach((button) => {
if (!(button instanceof HTMLButtonElement)) {
return;
}
button.dataset.defaultLabel = button.textContent || "";
button.addEventListener("click", () => {
const actionKey = String(button.dataset.toolReviewAction || "").trim();
if (!actionKey) {
return;
}
void submitGovernanceAction(actionKey);
});
});
renderEmptyDetail("Selecione um item da fila para carregar o contexto completo da revisao humana.");
void loadBoard();
async function loadBoard() {
async function loadBoard(preferredVersionId = "") {
toggleRefreshing(true);
clearFeedback();
const overviewResult = await fetchPanelJson(board.dataset.overviewEndpoint);
const contractsResult = await fetchPanelJson(board.dataset.contractsEndpoint);
const reviewQueueResult = await fetchPanelJson(board.dataset.reviewQueueEndpoint);
const publicationsResult = await fetchPanelJson(board.dataset.publicationsEndpoint);
const [overviewResult, contractsResult, reviewQueueResult, publicationsResult] = await Promise.all([
fetchPanelJson(board.dataset.overviewEndpoint),
fetchPanelJson(board.dataset.contractsEndpoint),
fetchPanelJson(board.dataset.reviewQueueEndpoint),
fetchPanelJson(board.dataset.publicationsEndpoint),
]);
if (!overviewResult.ok && !contractsResult.ok && !reviewQueueResult.ok && !publicationsResult.ok) {
showFeedback("warning", overviewResult.message || "Entre com uma sessao administrativa web para carregar esta tela.");
@ -146,9 +219,29 @@ function mountToolReviewBoard(board) {
renderLockedLifecycle(contractsResult.message);
}
if (reviewQueueResult.ok) {
renderReviewQueue(reviewQueueResult.body);
renderReviewQueue(reviewQueueResult.body, preferredVersionId || selectedVersionId);
const items = Array.isArray(reviewQueueResult.body?.items) ? reviewQueueResult.body.items : [];
if (items.length > 0) {
const nextVersionId = items.some((item) => item?.version_id === (preferredVersionId || selectedVersionId))
? (preferredVersionId || selectedVersionId)
: String(items[0]?.version_id || "").trim();
if (nextVersionId) {
await loadReviewDetail(nextVersionId);
} else {
renderEmptyDetail(reviewQueueResult.body?.message || "Nenhuma versao pronta para detalhe.");
}
} else {
const fallbackVersionId = String(preferredVersionId || selectedVersionId || "").trim();
if (fallbackVersionId) {
await loadReviewDetail(fallbackVersionId);
} else {
selectedVersionId = "";
renderEmptyDetail(reviewQueueResult.body?.message || "Nenhuma versao aguardando revisao neste momento.");
}
}
} else {
renderLockedQueue(reviewQueueResult.message);
renderLockedDetail(reviewQueueResult.message || "A sessao atual nao pode acessar o detalhe de revisao.");
}
if (publicationsResult.ok) {
renderPublications(publicationsResult.body);
@ -160,6 +253,99 @@ function mountToolReviewBoard(board) {
toggleRefreshing(false);
}
async function loadReviewDetail(versionId) {
const normalizedVersionId = String(versionId || "").trim();
if (!normalizedVersionId) {
renderEmptyDetail("Selecione uma versao valida para abrir o detalhe da revisao.");
return;
}
selectedVersionId = normalizedVersionId;
renderDetailLoading();
const detailUrl = `${board.dataset.reviewQueueEndpoint}/${encodeURIComponent(normalizedVersionId)}`;
const detailResult = await fetchPanelJson(detailUrl);
if (!detailResult.ok) {
renderLockedDetail(detailResult.message || "Nao foi possivel carregar o detalhe da versao selecionada.");
showFeedback("warning", detailResult.message || "Nao foi possivel carregar o detalhe da revisao humana.");
return;
}
renderReviewDetail(detailResult.body);
renderReviewQueueSelection(normalizedVersionId);
}
async function submitGovernanceAction(actionKey) {
if (!selectedVersionId) {
showFeedback("warning", "Selecione uma versao da fila antes de registrar uma decisao humana.");
return;
}
const actionUrl = resolveGovernanceActionUrl(actionKey, selectedVersionId);
if (!actionUrl) {
showFeedback("warning", "A acao solicitada nao esta disponivel para esta versao.");
return;
}
let payload;
if (actionKey === "review") {
payload = {
decision_notes: String(decisionNotes?.value || "").trim(),
reviewed_generated_code: Boolean(reviewedGeneratedCode?.checked),
};
} else if (actionKey === "approve" || actionKey === "deactivate" || actionKey === "rollback") {
payload = {
decision_notes: String(decisionNotes?.value || "").trim(),
};
}
toggleActionLoading(actionKey, true);
clearFeedback();
try {
const response = await fetch(actionUrl, {
method: "POST",
credentials: "same-origin",
headers: {
Accept: "application/json",
...(payload ? { "Content-Type": "application/json" } : {}),
},
...(payload ? { body: JSON.stringify(payload) } : {}),
});
const body = await readJson(response);
if (!response.ok) {
throw new Error(body?.detail || "Nao foi possivel registrar a decisao humana desta versao.");
}
if (decisionNotes instanceof HTMLTextAreaElement) {
decisionNotes.value = "";
}
if (reviewedGeneratedCode instanceof HTMLInputElement) {
reviewedGeneratedCode.checked = false;
}
showFeedback("success", body?.message || "Decisao humana registrada com sucesso.");
await loadBoard(body?.version_id || selectedVersionId);
} catch (error) {
showFeedback("danger", error instanceof Error ? error.message : "Erro inesperado ao registrar a decisao humana.");
} finally {
toggleActionLoading(actionKey, false);
}
}
function resolveGovernanceActionUrl(actionKey, versionId) {
const encodedVersionId = encodeURIComponent(String(versionId || "").trim());
if (!encodedVersionId) {
return "";
}
if (actionKey === "publish" || actionKey === "deactivate" || actionKey === "rollback") {
return `${board.dataset.publicationsEndpoint}/${encodedVersionId}/${actionKey}`;
}
if (actionKey === "review" || actionKey === "approve") {
return `${board.dataset.reviewQueueEndpoint}/${encodedVersionId}/${actionKey}`;
}
return "";
}
function toggleRefreshing(isLoading) {
if (!refreshButton || !refreshLabel || !refreshSpinner) {
return;
@ -169,6 +355,26 @@ function mountToolReviewBoard(board) {
refreshLabel.textContent = isLoading ? "Atualizando..." : "Atualizar leitura";
}
function toggleActionLoading(actionKey, isLoading) {
const buttonsByAction = {
review: reviewButton,
approve: approveButton,
publish: publishButton,
deactivate: deactivateButton,
rollback: rollbackButton,
};
const button = buttonsByAction[actionKey];
if (!(button instanceof HTMLButtonElement)) {
return;
}
const defaultLabel = button.dataset.defaultLabel || button.textContent || "";
button.disabled = isLoading || button.disabled;
button.textContent = isLoading ? "Processando..." : defaultLabel;
if (!isLoading) {
configureActionPanel(lastRenderedHumanGate, lastRenderedHasSourceCode);
}
}
function clearFeedback() {
feedback.className = "alert d-none rounded-4 mb-4";
feedback.textContent = "";
@ -204,15 +410,46 @@ function mountToolReviewBoard(board) {
parameterTypes.innerHTML = `<span class="badge rounded-pill bg-body-tertiary text-secondary border">Bloqueado</span>`;
}
function renderReviewQueue(payload) {
function renderReviewQueue(payload, preferredVersionId = "") {
const items = Array.isArray(payload?.items) ? payload.items : [];
setText("[data-tool-review-queue-count]", String(items.length));
setText("[data-tool-review-queue-mode]", payload?.queue_mode || "Fila web");
queueList.innerHTML = items.length > 0
? items.map((item) => `<article class="admin-tool-review-card rounded-4 p-4"><div class="d-flex justify-content-between align-items-start gap-3 mb-3"><div><div class="small text-uppercase fw-semibold text-secondary mb-2">${escapeHtml(item.gate || "revisao")}</div><h4 class="h5 fw-semibold mb-1">${escapeHtml(item.display_name || item.tool_name || "Tool")}</h4><div class="small text-secondary">${escapeHtml(item.tool_name || "")}</div></div><span class="badge rounded-pill bg-warning-subtle text-warning-emphasis border border-warning-subtle">${escapeHtml(item.status || "pendente")}</span></div><p class="text-secondary mb-0">${escapeHtml(item.summary || payload?.message || "Item aguardando analise do time.")}</p></article>`).join("")
? items.map((item) => {
const isSelected = String(item?.version_id || "") === String(preferredVersionId || selectedVersionId || "");
const validationSummary = item?.automated_validation_summary
? `<div class="small text-secondary mt-2"><strong>Pipeline:</strong> ${escapeHtml(item.automated_validation_summary)}</div>`
: "";
const queuedAt = item?.queued_at
? `<div class="small text-secondary mt-2"><strong>Atualizado:</strong> ${escapeHtml(formatDateTime(item.queued_at))}</div>`
: "";
return `<article class="admin-tool-review-card rounded-4 p-4 ${isSelected ? "border border-dark" : ""}"><div class="d-flex justify-content-between align-items-start gap-3 mb-3"><div><div class="small text-uppercase fw-semibold text-secondary mb-2">${escapeHtml(item.gate || "revisao")}</div><h4 class="h5 fw-semibold mb-1">${escapeHtml(item.display_name || item.tool_name || "Tool")}</h4><div class="small text-secondary">${escapeHtml(item.tool_name || "")}</div></div><span class="badge rounded-pill bg-warning-subtle text-warning-emphasis border border-warning-subtle">${escapeHtml(item.status || "pendente")}</span></div><p class="text-secondary mb-3">${escapeHtml(item.summary || payload?.message || "Item aguardando analise do time.")}</p>${validationSummary}${queuedAt}<div class="pt-3"><button class="btn btn-sm ${isSelected ? "btn-dark" : "btn-outline-dark"} rounded-pill" type="button" data-tool-review-select="true" data-version-id="${escapeHtml(item.version_id || "")}">${isSelected ? "Versao selecionada" : "Abrir detalhe"}</button></div></article>`;
}).join("")
: `<div class="admin-tool-empty-state rounded-4 p-4"><h4 class="h5 fw-semibold mb-2">Fila sem itens no momento</h4><p class="text-secondary mb-0">${escapeHtml(payload?.message || "Nenhuma tool aguardando revisao agora.")}</p></div>`;
}
function renderReviewQueueSelection(versionId) {
const normalizedVersionId = String(versionId || "").trim();
queueList.querySelectorAll("[data-tool-review-select]").forEach((button) => {
if (!(button instanceof HTMLButtonElement)) {
return;
}
const isSelected = String(button.dataset.versionId || "") === normalizedVersionId;
button.classList.toggle("btn-dark", isSelected);
button.classList.toggle("btn-outline-dark", !isSelected);
button.textContent = isSelected ? "Versao selecionada" : "Abrir detalhe";
});
queueList.querySelectorAll(".admin-tool-review-card").forEach((card) => {
if (!(card instanceof HTMLElement)) {
return;
}
const cardButton = card.querySelector("[data-tool-review-select]");
const isSelected = cardButton instanceof HTMLElement && String(cardButton.dataset.versionId || "") === normalizedVersionId;
card.classList.toggle("border", isSelected);
card.classList.toggle("border-dark", isSelected);
});
}
function renderLockedQueue(message) {
setText("[data-tool-review-queue-count]", "0");
setText("[data-tool-review-queue-mode]", "Bloqueado");
@ -224,7 +461,18 @@ function mountToolReviewBoard(board) {
setText("[data-tool-review-publication-count]", String(items.length));
setText("[data-tool-publication-source]", payload?.source || "Catalogo web");
publicationList.innerHTML = items.length > 0
? items.slice(0, 9).map((item) => `<div class="col-12 col-md-6 col-xxl-4"><article class="admin-tool-publication-card rounded-4 p-4 h-100"><div class="d-flex justify-content-between align-items-start gap-3 mb-3"><div><div class="small text-uppercase fw-semibold text-secondary mb-2">${escapeHtml(item.domain || "tool")}</div><h4 class="h5 fw-semibold mb-1">${escapeHtml(item.display_name || item.tool_name || "Tool")}</h4><div class="small text-secondary">${escapeHtml(item.tool_name || "")}</div></div><span class="badge rounded-pill bg-success-subtle text-success-emphasis border border-success-subtle">v${escapeHtml(String(item.version || 1))}</span></div><p class="text-secondary mb-3">${escapeHtml(item.description || "Publicacao ativa no catalogo do produto.")}</p><div class="small text-secondary mb-1"><strong>Status:</strong> ${escapeHtml(item.status || "draft")}</div><div class="small text-secondary mb-1"><strong>Parametros:</strong> ${escapeHtml(String(item.parameter_count || 0))}</div><div class="small text-secondary mb-3"><strong>Autor:</strong> ${escapeHtml(item.author_name || item.published_by || "Nao informado")}</div><div class="small text-secondary">${escapeHtml(item.implementation_module || "")}</div></article></div>`).join("")
? items.slice(0, 9).map((item) => {
const manageButton = item?.version_id
? `<div class="pt-3"><button class="btn btn-sm btn-outline-dark rounded-pill" type="button" data-tool-publication-select="true" data-version-id="${escapeHtml(item.version_id || "")}">Abrir detalhe</button></div>`
: "";
const rollbackBadge = item?.rollback_action_available
? `<span class="badge rounded-pill bg-warning-subtle text-warning-emphasis border border-warning-subtle">Rollback disponivel</span>`
: "";
const deactivateBadge = item?.deactivation_action_available
? `<span class="badge rounded-pill bg-body-tertiary text-secondary border">Desativacao disponivel</span>`
: "";
return `<div class="col-12 col-md-6 col-xxl-4"><article class="admin-tool-publication-card rounded-4 p-4 h-100"><div class="d-flex justify-content-between align-items-start gap-3 mb-3"><div><div class="small text-uppercase fw-semibold text-secondary mb-2">${escapeHtml(item.domain || "tool")}</div><h4 class="h5 fw-semibold mb-1">${escapeHtml(item.display_name || item.tool_name || "Tool")}</h4><div class="small text-secondary">${escapeHtml(item.tool_name || "")}</div></div><span class="badge rounded-pill bg-success-subtle text-success-emphasis border border-success-subtle">v${escapeHtml(String(item.version || 1))}</span></div><p class="text-secondary mb-3">${escapeHtml(item.description || "Publicacao ativa no catalogo do produto.")}</p><div class="d-flex flex-wrap gap-2 mb-3">${deactivateBadge}${rollbackBadge}</div><div class="small text-secondary mb-1"><strong>Status:</strong> ${escapeHtml(item.status || "draft")}</div><div class="small text-secondary mb-1"><strong>Parametros:</strong> ${escapeHtml(String(item.parameter_count || 0))}</div><div class="small text-secondary mb-3"><strong>Autor:</strong> ${escapeHtml(item.author_name || item.published_by || "Nao informado")}</div><div class="small text-secondary">${escapeHtml(item.implementation_module || "")}</div>${manageButton}</article></div>`;
}).join("")
: `<div class="col-12"><div class="admin-tool-empty-state rounded-4 p-4"><h4 class="h5 fw-semibold mb-2">Catalogo ativo vazio</h4><p class="text-secondary mb-0">Nenhuma publicacao ativa retornada pela sessao web.</p></div></div>`;
}
@ -233,6 +481,195 @@ function mountToolReviewBoard(board) {
setText("[data-tool-publication-source]", "Bloqueado");
publicationList.innerHTML = `<div class="col-12"><div class="admin-tool-empty-state rounded-4 p-4"><h4 class="h5 fw-semibold mb-2">Catalogo protegido</h4><p class="text-secondary mb-0">${escapeHtml(message || "A sessao atual nao possui permissao para ler as publicacoes ativas.")}</p></div></div>`;
}
function renderDetailLoading() {
detailStatus.textContent = "Carregando";
detailTitle.textContent = "Sincronizando detalhe da versao";
detailSummary.innerHTML = `<div class="fw-semibold mb-2">Carregando contexto governado</div><p class="text-secondary mb-0">A leitura do detalhe da versao esta em andamento.</p>`;
detailMeta.innerHTML = `<div class="admin-tool-inline-note rounded-4 p-3 small text-secondary">Carregando metadados persistidos...</div>`;
validationList.innerHTML = `<div class="admin-tool-inline-note rounded-4 p-3 small text-secondary">Carregando validacoes automaticas...</div>`;
historyList.innerHTML = `<div class="admin-tool-inline-note rounded-4 p-3 small text-secondary">Carregando historico humano...</div>`;
nextStepsList.innerHTML = `<div class="admin-tool-inline-note rounded-4 p-3 small text-secondary">Carregando proximos passos...</div>`;
if (codeField instanceof HTMLTextAreaElement) {
codeField.value = "Carregando codigo gerado...";
}
configureActionPanel(null, false);
}
function renderEmptyDetail(message) {
detailStatus.textContent = "Nenhum item";
detailTitle.textContent = "Selecione um item da fila";
detailSummary.innerHTML = `<div class="fw-semibold mb-2">Revisao humana aguardando selecao</div><p class="text-secondary mb-0">${escapeHtml(message || "Escolha uma versao da fila para abrir o detalhe governado.")}</p>`;
detailMeta.innerHTML = `<div class="admin-tool-inline-note rounded-4 p-3 small text-secondary">Os metadados persistidos e os parametros da versao aparecem aqui.</div>`;
validationList.innerHTML = `<div class="admin-tool-inline-note rounded-4 p-3 small text-secondary">As validacoes automaticas da pipeline aparecem aqui.</div>`;
historyList.innerHTML = `<div class="admin-tool-inline-note rounded-4 p-3 small text-secondary">As decisoes humanas de revisao, aprovacao e publicacao aparecem aqui.</div>`;
nextStepsList.innerHTML = `<div class="admin-tool-inline-note rounded-4 p-3 small text-secondary">Selecione uma versao para visualizar os proximos passos recomendados.</div>`;
if (codeField instanceof HTMLTextAreaElement) {
codeField.value = "O codigo completo da funcao gerada aparecera aqui assim que uma versao for selecionada.";
}
if (decisionNotes instanceof HTMLTextAreaElement) {
decisionNotes.value = "";
}
if (reviewedGeneratedCode instanceof HTMLInputElement) {
reviewedGeneratedCode.checked = false;
}
configureActionPanel(null, false);
}
function renderLockedDetail(message) {
detailStatus.textContent = "Bloqueado";
detailTitle.textContent = "Detalhe indisponivel";
detailSummary.innerHTML = `<div class="fw-semibold mb-2">Leitura protegida</div><p class="text-secondary mb-0">${escapeHtml(message || "A sessao atual nao pode visualizar o detalhe de revisao desta versao.")}</p>`;
detailMeta.innerHTML = `<div class="admin-tool-inline-note rounded-4 p-3 small text-secondary">Sem acesso aos metadados desta versao.</div>`;
validationList.innerHTML = `<div class="admin-tool-inline-note rounded-4 p-3 small text-secondary">Sem acesso ao relatorio de validacao automatica.</div>`;
historyList.innerHTML = `<div class="admin-tool-inline-note rounded-4 p-3 small text-secondary">Sem acesso ao historico de governanca.</div>`;
nextStepsList.innerHTML = `<div class="admin-tool-inline-note rounded-4 p-3 small text-secondary">Entre com uma sessao com permissao de revisao para continuar.</div>`;
if (codeField instanceof HTMLTextAreaElement) {
codeField.value = message || "A leitura do codigo gerado esta protegida pela permissao de revisao.";
}
if (decisionNotes instanceof HTMLTextAreaElement) {
decisionNotes.value = "";
}
if (reviewedGeneratedCode instanceof HTMLInputElement) {
reviewedGeneratedCode.checked = false;
}
configureActionPanel(null, false);
}
function renderReviewDetail(payload) {
const parameters = Array.isArray(payload?.parameters) ? payload.parameters : [];
const validations = Array.isArray(payload?.automated_validations) ? payload.automated_validations : [];
const history = Array.isArray(payload?.decision_history) ? payload.decision_history : [];
const nextSteps = Array.isArray(payload?.next_steps) ? payload.next_steps : [];
const humanGate = payload?.human_gate || null;
const hasSourceCode = Boolean(String(payload?.generated_source_code || "").trim());
detailStatus.textContent = payload?.status || "versao";
detailTitle.innerHTML = `${escapeHtml(payload?.display_name || payload?.tool_name || "Tool")}&nbsp;<span class="small text-secondary">v${escapeHtml(String(payload?.version_number || 1))}</span>`;
detailSummary.innerHTML = `<div class="fw-semibold mb-2">${escapeHtml(payload?.summary || "Resumo indisponivel")}</div><p class="text-secondary mb-0">${escapeHtml(payload?.description || "Sem descricao detalhada para esta versao.")}</p>`;
const parameterMarkup = parameters.length > 0
? parameters.map((item) => `<div class="admin-tool-inline-note rounded-4 p-3"><div class="d-flex justify-content-between gap-3"><div><div class="fw-semibold">${escapeHtml(item.name || "parametro")}</div><div class="small text-secondary mt-1">${escapeHtml(item.description || "")}</div></div><span class="badge rounded-pill bg-body-tertiary text-secondary border">${escapeHtml(item.parameter_type || "string")}${item.required ? " *" : ""}</span></div></div>`).join("")
: `<div class="admin-tool-inline-note rounded-4 p-3 small text-secondary">Esta versao nao declarou parametros de entrada.</div>`;
detailMeta.innerHTML = `
<div class="admin-tool-inline-note rounded-4 p-3 small text-secondary">
<div><strong>Tool:</strong> ${escapeHtml(payload?.tool_name || "-")}</div>
<div><strong>Dominio:</strong> ${escapeHtml(payload?.domain || "-")}</div>
<div><strong>Owner:</strong> ${escapeHtml(payload?.owner_name || "Nao informado")}</div>
<div><strong>Gate atual:</strong> ${escapeHtml(humanGate?.current_gate || payload?.queue_entry?.gate || "governance_required")}</div>
<div><strong>Modulo:</strong> ${escapeHtml(payload?.generated_module || "-")}</div>
<div><strong>Entrypoint:</strong> ${escapeHtml(payload?.generated_callable || "run")}</div>
<div><strong>Resumo da pipeline:</strong> ${escapeHtml(payload?.automated_validation_summary || "Sem resumo de validacao automatica.")}</div>
</div>
${parameterMarkup}
<div class="admin-tool-inline-note rounded-4 p-3 small text-secondary"><strong>Objetivo de negocio:</strong> ${escapeHtml(payload?.business_goal || "Nao informado")}</div>
`;
validationList.innerHTML = validations.length > 0
? validations.map((item) => {
const issues = Array.isArray(item?.blocking_issues) && item.blocking_issues.length > 0
? `<div class="small text-secondary mt-2"><strong>Bloqueios:</strong> ${escapeHtml(item.blocking_issues.join("; "))}</div>`
: `<div class="small text-secondary mt-2">Sem bloqueios nesta checagem.</div>`;
return `<div class="admin-tool-inline-note rounded-4 p-3"><div class="d-flex justify-content-between align-items-start gap-3"><div><div class="fw-semibold">${escapeHtml(item.label || item.key || "Validacao")}</div><div class="small text-secondary mt-1">${escapeHtml(item.summary || "")}</div>${issues}</div><span class="badge rounded-pill ${String(item.status || "").toLowerCase() === "passed" ? "bg-success-subtle text-success-emphasis border border-success-subtle" : "bg-danger-subtle text-danger-emphasis border border-danger-subtle"}">${escapeHtml(item.status || "pendente")}</span></div></div>`;
}).join("")
: `<div class="admin-tool-inline-note rounded-4 p-3 small text-secondary">Nenhuma validacao automatica registrada para esta versao.</div>`;
historyList.innerHTML = history.length > 0
? history.map((item) => {
const statusTransition = item?.previous_status || item?.current_status
? `<div class="small text-secondary mt-2"><strong>Status:</strong> ${escapeHtml(item.previous_status || "-")} -> ${escapeHtml(item.current_status || "-")}</div>`
: "";
const decisionNotesMarkup = item?.decision_notes
? `<div class="small text-secondary mt-2"><strong>Parecer:</strong> ${escapeHtml(item.decision_notes)}</div>`
: "";
const reviewedMarkup = item?.reviewed_generated_code === true
? `<div class="small text-secondary mt-2"><strong>Codigo revisado:</strong> confirmado</div>`
: "";
const actorMarkup = item?.actor_name
? `<div class="small text-secondary mt-1">${escapeHtml(item.actor_name)}${item?.actor_role ? ` ? ${escapeHtml(item.actor_role)}` : ""}${item?.recorded_at ? ` ? ${escapeHtml(formatDateTime(item.recorded_at))}` : ""}</div>`
: "";
return `<div class="admin-tool-inline-note rounded-4 p-3"><div class="fw-semibold">${escapeHtml(item.label || "Governanca registrada")}</div><div class="small text-secondary mt-1">${escapeHtml(item.summary || "")}</div>${actorMarkup}${statusTransition}${decisionNotesMarkup}${reviewedMarkup}</div>`;
}).join("")
: `<div class="admin-tool-inline-note rounded-4 p-3 small text-secondary">Nenhuma decisao humana registrada ainda para esta versao.</div>`;
nextStepsList.innerHTML = nextSteps.length > 0
? nextSteps.map((item) => `<div class="admin-tool-inline-note rounded-4 p-3 small text-secondary">${escapeHtml(item)}</div>`).join("")
: `<div class="admin-tool-inline-note rounded-4 p-3 small text-secondary">Nenhum proximo passo retornado para esta versao.</div>`;
if (codeField instanceof HTMLTextAreaElement) {
codeField.value = hasSourceCode
? String(payload.generated_source_code)
: "A pipeline ainda nao registrou o codigo completo gerado para esta versao.";
}
if (decisionNotes instanceof HTMLTextAreaElement) {
decisionNotes.value = "";
}
if (reviewedGeneratedCode instanceof HTMLInputElement) {
reviewedGeneratedCode.checked = false;
}
if (decisionHint instanceof HTMLElement) {
decisionHint.textContent = buildDecisionHint(humanGate, hasSourceCode);
}
configureActionPanel(humanGate, hasSourceCode);
}
function configureActionPanel(humanGate, hasSourceCode) {
lastRenderedHumanGate = humanGate;
lastRenderedHasSourceCode = hasSourceCode;
configureActionButton(reviewButton, Boolean(humanGate?.review_action_available) && hasSourceCode);
configureActionButton(approveButton, Boolean(humanGate?.approval_action_available));
configureActionButton(publishButton, Boolean(humanGate?.publication_action_available));
configureActionButton(deactivateButton, Boolean(humanGate?.deactivation_action_available));
configureActionButton(rollbackButton, Boolean(humanGate?.rollback_action_available));
const notesEnabled = Boolean(humanGate?.requires_decision_notes);
if (decisionNotes instanceof HTMLTextAreaElement) {
decisionNotes.disabled = !notesEnabled;
if (!notesEnabled) {
decisionNotes.value = "";
}
}
if (reviewedGeneratedCode instanceof HTMLInputElement) {
reviewedGeneratedCode.disabled = !Boolean(humanGate?.requires_code_review_confirmation);
if (reviewedGeneratedCode.disabled) {
reviewedGeneratedCode.checked = false;
}
}
}
function configureActionButton(button, isEnabled) {
if (!(button instanceof HTMLButtonElement)) {
return;
}
const defaultLabel = button.dataset.defaultLabel || button.textContent || "";
button.textContent = defaultLabel;
button.disabled = !isEnabled;
}
function buildDecisionHint(humanGate, hasSourceCode) {
if (!humanGate) {
return "As notas da decisao ficam persistidas na trilha administrativa da versao.";
}
if (humanGate.review_action_available && !hasSourceCode) {
return "A revisao humana fica habilitada assim que o codigo completo gerado estiver disponivel para leitura.";
}
if (humanGate.review_action_available) {
return "Para validar a versao, registre o parecer e confirme explicitamente que o codigo completo foi revisado.";
}
if (humanGate.approval_action_available) {
return "A aprovacao formal ainda exige um parecer explicito da diretoria antes da publicacao.";
}
if (humanGate.publication_action_available) {
return "A revisao e a aprovacao humanas ja ficaram registradas. Agora a diretoria pode publicar a versao no catalogo.";
}
if (humanGate.deactivation_action_available && humanGate.rollback_action_available) {
return `A versao esta ativa. Registre um parecer para desativar a publicacao ou executar rollback para v${escapeHtml(String(humanGate.rollback_target_version_number || "?"))}.`;
}
if (humanGate.deactivation_action_available) {
return "A versao esta ativa. Registre um parecer para desativar a publicacao ativa com trilha auditavel.";
}
return "As notas da decisao ficam persistidas na trilha administrativa da versao.";
}
}
function mountToolIntakePage(page) {

@ -10,6 +10,16 @@ class Settings(BaseSettings):
google_project_id: str
google_location: str = "us-central1"
# Runtime de atendimento do product. Mantido separado do runtime de geração
# de código do admin_app, que usa AdminSettings próprios.
atendimento_model_name: str | None = None
atendimento_bundle_model_name: str | None = None
atendimento_temperature: float = 0
atendimento_max_output_tokens: int = 768
# Aliases legados mantidos por compatibilidade enquanto o runtime de
# atendimento migra para o perfil explícito de atendimento.
vertex_model_name: str = "gemini-2.5-pro"
vertex_bundle_model_name: str = "gemini-2.5-pro"
@ -31,10 +41,10 @@ class Settings(BaseSettings):
mock_seed_enabled: bool = True
auto_seed_tools: bool = True
auto_seed_mock: bool = True
environment: str = "production"
debug: bool = False
# Cloud SQL (legacy Postgres var kept only for backward compatibility in deploy scripts)
cloud_sql_connection_name: str | None = None
@ -78,10 +88,60 @@ class Settings(BaseSettings):
@field_validator("environment", "conversation_state_backend", mode="before")
@classmethod
def normalize_text_settings(cls, value):
def normalize_runtime_text_settings(cls, value):
if isinstance(value, str):
return value.strip().lower()
return value
@field_validator("atendimento_model_name", "atendimento_bundle_model_name", mode="before")
@classmethod
def normalize_optional_model_names(cls, value):
if isinstance(value, str):
stripped = value.strip()
return stripped or None
return value
@field_validator("vertex_model_name", "vertex_bundle_model_name", mode="before")
@classmethod
def normalize_required_model_names(cls, value):
if isinstance(value, str):
return value.strip()
return value
@field_validator("atendimento_temperature")
@classmethod
def validate_atendimento_temperature(cls, value: float) -> float:
if value < 0 or value > 2:
raise ValueError("atendimento_temperature must be between 0 and 2")
return value
@field_validator("atendimento_max_output_tokens")
@classmethod
def validate_atendimento_max_output_tokens(cls, value: int) -> int:
if value < 128:
raise ValueError("atendimento_max_output_tokens must be >= 128")
return value
def resolve_atendimento_model_name(self) -> str:
configured = str(self.atendimento_model_name or "").strip()
if configured:
return configured
return str(self.vertex_model_name or "").strip()
def resolve_atendimento_bundle_model_name(self) -> str:
configured = str(self.atendimento_bundle_model_name or "").strip()
if configured:
return configured
legacy = str(self.vertex_bundle_model_name or "").strip()
if legacy:
return legacy
return self.resolve_atendimento_model_name()
def build_atendimento_generation_config(self) -> dict[str, int | float]:
return {
"temperature": float(self.atendimento_temperature),
"max_output_tokens": int(self.atendimento_max_output_tokens),
}
settings = Settings()
settings = Settings()

@ -1,8 +1,10 @@
"""
"""
Rotina dedicada de bootstrap de banco de dados.
Cria tabelas e executa seed inicial de forma explicita, fora do startup do app.
"""
import json
from datetime import UTC, datetime
from pathlib import Path
from sqlalchemy import inspect, text
@ -25,24 +27,40 @@ from app.db.mock_models import (
)
from app.db.mock_seed import seed_mock_data
from app.db.tool_seed import seed_tools
from shared.contracts import GENERATED_TOOLS_PACKAGE
from shared.contracts import (
ToolRuntimePublicationManifest,
get_generated_tool_publication_manifest_path,
get_generated_tools_runtime_dir,
)
_PROJECT_ROOT = Path(__file__).resolve().parents[2]
def _ensure_generated_tools_runtime_package() -> Path:
package_dir = _PROJECT_ROOT / GENERATED_TOOLS_PACKAGE
package_dir = get_generated_tools_runtime_dir(_PROJECT_ROOT)
package_dir.mkdir(parents=True, exist_ok=True)
init_file = package_dir / "__init__.py"
if not init_file.exists():
init_file.write_text(
'"""Isolated runtime package for admin-governed generated tools."""\n',
"\"\"\"Isolated runtime package for admin-governed generated tools.\"\"\"\\n",
encoding="utf-8",
)
manifest_path = get_generated_tool_publication_manifest_path(_PROJECT_ROOT)
if not manifest_path.exists():
manifest = ToolRuntimePublicationManifest(
emitted_at=datetime.now(UTC),
publications=(),
)
manifest_path.write_text(
json.dumps(manifest.model_dump(mode="json"), ensure_ascii=True, indent=2, sort_keys=True),
encoding="utf-8",
)
return package_dir
def _ensure_mock_schema_evolution() -> None:
inspector = inspect(mock_engine)
table_names = set(inspector.get_table_names())

@ -37,11 +37,11 @@ class LLMService:
)
LLMService._vertex_initialized = True
configured = settings.vertex_model_name.strip()
configured = settings.resolve_atendimento_model_name()
fallback_models = ["gemini-2.5-pro", "gemini-2.5-flash", "gemini-2.0-flash-001"]
self.model_names = self._build_model_sequence(configured, *fallback_models)
self.bundle_model_names = self._build_model_sequence(
settings.vertex_bundle_model_name.strip(),
settings.resolve_atendimento_bundle_model_name(),
*self.model_names,
)
@ -304,7 +304,7 @@ class LLMService:
)
if last_error:
raise RuntimeError(
f"Nenhum modelo Vertex disponivel. Verifique VERTEX_MODEL_NAME e acesso no projeto. Erro: {last_error}"
"Nenhum modelo Vertex disponivel. Verifique ATENDIMENTO_MODEL_NAME/VERTEX_MODEL_NAME e o acesso no projeto. " f"Erro: {last_error}"
) from last_error
raise RuntimeError("Falha ao gerar resposta no Vertex AI.")

@ -1,6 +1,7 @@
import logging
import json
from app.core.settings import settings
from app.services.ai.llm_service import LLMService
from app.services.orchestration.entity_normalizer import EntityNormalizer
from app.services.orchestration.turn_decision import TurnDecision
@ -123,8 +124,7 @@ class MessagePlanner:
preferred_models = getattr(self.llm, "bundle_model_names", None)
bundle_generation_config = {
"candidate_count": 1,
"temperature": 0,
"max_output_tokens": 768,
**settings.build_atendimento_generation_config(),
}
for attempt in range(2):

@ -1,8 +1,10 @@
import importlib
import inspect
import json
import logging
from typing import Callable, Dict, List
from fastapi import HTTPException
from shared.contracts import GENERATED_TOOL_ENTRYPOINT, GENERATED_TOOLS_PACKAGE
from sqlalchemy.orm import Session
from app.models.tool_model import ToolDefinition
@ -23,7 +25,15 @@ from app.services.tools.handlers import (
realizar_pedido,
validar_cliente_venda,
)
from shared.contracts import (
GENERATED_TOOL_ENTRYPOINT,
GENERATED_TOOLS_PACKAGE,
ToolParameterType,
ToolRuntimePublicationManifest,
get_generated_tool_publication_manifest_path,
)
logger = logging.getLogger(__name__)
HANDLERS: Dict[str, Callable] = {
"consultar_estoque": consultar_estoque,
@ -42,16 +52,24 @@ HANDLERS: Dict[str, Callable] = {
"registrar_pagamento_aluguel": registrar_pagamento_aluguel,
}
_PARAMETER_SCHEMA_TYPE_MAPPING = {
ToolParameterType.STRING: "string",
ToolParameterType.INTEGER: "integer",
ToolParameterType.NUMBER: "number",
ToolParameterType.BOOLEAN: "boolean",
ToolParameterType.OBJECT: "object",
ToolParameterType.ARRAY: "array",
}
class GeneratedToolCoreBoundaryViolation(RuntimeError):
"""Raised when a generated tool attempts to reuse or point at core runtime code."""
# Registry em memoria das tools disponiveis para o orquestrador.
class ToolRegistry:
"""Registry em memoria das tools disponiveis para o orquestrador."""
def __init__(self, db: Session, extra_handlers: Dict[str, Callable] | None = None):
"""Carrega tools do banco e registra apenas as que possuem handler conhecido."""
self._tools = []
available_handlers = dict(HANDLERS)
if extra_handlers:
@ -68,6 +86,7 @@ class ToolRegistry:
parameters=db_tool.parameters,
handler=handler,
)
self._load_generated_tool_publications_from_snapshot()
def register_tool(self, name, description, parameters, handler):
"""Registra uma tool em memoria para uso pelo orquestrador."""
@ -90,6 +109,65 @@ class ToolRegistry:
handler=handler,
)
def _load_generated_tool_publications_from_snapshot(self) -> None:
manifest_path = get_generated_tool_publication_manifest_path()
if not manifest_path.exists():
return
try:
manifest_payload = json.loads(manifest_path.read_text(encoding="utf-8-sig"))
manifest = ToolRuntimePublicationManifest.model_validate(manifest_payload)
except Exception as exc:
logger.warning(
"Falha ao carregar snapshot local de tools publicadas em %s: %s",
manifest_path,
exc,
)
return
for envelope in manifest.publications:
published_tool = envelope.published_tool
try:
importlib.invalidate_caches()
module = importlib.import_module(published_tool.implementation_module)
handler = getattr(module, published_tool.implementation_callable)
self.register_generated_tool(
name=published_tool.tool_name,
description=published_tool.description,
parameters=self._build_generated_parameter_schema(published_tool.parameters),
handler=handler,
)
except Exception as exc:
logger.warning(
"Falha ao registrar tool publicada '%s' a partir do snapshot local %s: %s",
published_tool.tool_name,
manifest_path,
exc,
)
@staticmethod
def _build_generated_parameter_schema(parameters) -> dict:
properties: dict[str, dict] = {}
required: list[str] = []
for parameter in parameters or ():
parameter_type = parameter.parameter_type
schema = {
"type": _PARAMETER_SCHEMA_TYPE_MAPPING[parameter_type],
"description": parameter.description,
}
if parameter_type == ToolParameterType.OBJECT:
schema["additionalProperties"] = True
elif parameter_type == ToolParameterType.ARRAY:
schema["items"] = {"type": "string"}
properties[parameter.name] = schema
if parameter.required:
required.append(parameter.name)
return {
"type": "object",
"properties": properties,
"required": required,
}
def _append_tool_definition(self, *, name, description, parameters, handler):
self._tools.append(
ToolDefinition(

@ -1,4 +1,4 @@
"""Contratos compartilhados entre product e admin."""
"""Contratos compartilhados entre product e admin."""
from shared.contracts.access_control import (
AdminPermission,
@ -52,6 +52,7 @@ from shared.contracts.system_functional_configuration import (
from shared.contracts.tool_publication import (
GENERATED_TOOL_ENTRYPOINT,
GENERATED_TOOLS_PACKAGE,
GENERATED_TOOL_PUBLICATION_MANIFEST,
PublishedToolContract,
ServiceName,
TOOL_LIFECYCLE_STAGES,
@ -61,8 +62,12 @@ from shared.contracts.tool_publication import (
ToolParameterContract,
ToolParameterType,
ToolPublicationEnvelope,
ToolRuntimePublicationManifest,
build_generated_tool_file_path,
build_generated_tool_module_name,
build_generated_tool_module_path,
get_generated_tool_publication_manifest_path,
get_generated_tools_runtime_dir,
get_tool_lifecycle_stage,
)
@ -71,6 +76,7 @@ __all__ = [
"BOT_GOVERNED_SETTINGS",
"GENERATED_TOOL_ENTRYPOINT",
"GENERATED_TOOLS_PACKAGE",
"GENERATED_TOOL_PUBLICATION_MANIFEST",
"MODEL_RUNTIME_PROFILES",
"MODEL_RUNTIME_SEPARATION_RULES",
"PRODUCT_OPERATIONAL_DATASETS",
@ -85,6 +91,7 @@ __all__ = [
"ToolParameterContract",
"ToolParameterType",
"ToolPublicationEnvelope",
"ToolRuntimePublicationManifest",
"BotGovernanceArea",
"BotGovernanceMutability",
"BotGovernedSettingContract",
@ -109,10 +116,13 @@ __all__ = [
"FunctionalConfigurationMutability",
"FunctionalConfigurationPropagation",
"FunctionalConfigurationSource",
"build_generated_tool_file_path",
"build_generated_tool_module_name",
"build_generated_tool_module_path",
"get_bot_governed_setting",
"get_functional_configuration",
"get_generated_tool_publication_manifest_path",
"get_generated_tools_runtime_dir",
"get_model_runtime_contract",
"get_operational_dataset",
"get_tool_lifecycle_stage",

@ -1,7 +1,8 @@
from __future__ import annotations
from __future__ import annotations
from datetime import datetime
from enum import Enum
from pathlib import Path
import re
from pydantic import BaseModel, Field
@ -104,7 +105,9 @@ def get_tool_lifecycle_stage(
GENERATED_TOOLS_PACKAGE = "generated_tools"
GENERATED_TOOL_ENTRYPOINT = "run"
GENERATED_TOOL_PUBLICATION_MANIFEST = "published_runtime_tools.json"
_GENERATED_TOOL_NAME_PATTERN = re.compile(r"^[a-z][a-z0-9_]{2,63}$")
_PROJECT_ROOT = Path(__file__).resolve().parents[2]
def _normalize_generated_tool_name(tool_name: str) -> str:
@ -124,6 +127,26 @@ def build_generated_tool_module_path(tool_name: str) -> str:
return f"{GENERATED_TOOLS_PACKAGE}/{normalized}.py"
def get_generated_tools_runtime_dir(project_root: Path | None = None) -> Path:
root = project_root or _PROJECT_ROOT
return root / GENERATED_TOOLS_PACKAGE
def build_generated_tool_file_path(
tool_name: str,
*,
project_root: Path | None = None,
) -> Path:
normalized = _normalize_generated_tool_name(tool_name)
return get_generated_tools_runtime_dir(project_root) / f"{normalized}.py"
def get_generated_tool_publication_manifest_path(
project_root: Path | None = None,
) -> Path:
return get_generated_tools_runtime_dir(project_root) / GENERATED_TOOL_PUBLICATION_MANIFEST
class ToolParameterType(str, Enum):
STRING = "string"
INTEGER = "integer"
@ -160,3 +183,10 @@ class ToolPublicationEnvelope(BaseModel):
publication_id: str
published_tool: PublishedToolContract
emitted_at: datetime
class ToolRuntimePublicationManifest(BaseModel):
source_service: ServiceName = ServiceName.ADMIN
target_service: ServiceName = ServiceName.PRODUCT
emitted_at: datetime
publications: tuple[ToolPublicationEnvelope, ...] = ()

@ -1,4 +1,4 @@
import unittest
import unittest
from unittest.mock import MagicMock, patch
from admin_app.db import bootstrap as bootstrap_module
@ -50,6 +50,7 @@ class AdminBootstrapRuntimeTests(unittest.TestCase):
[
"ALTER TABLE tool_drafts ADD COLUMN current_version_number INT NOT NULL DEFAULT 1",
"ALTER TABLE tool_drafts ADD COLUMN version_count INT NOT NULL DEFAULT 1",
"ALTER TABLE tool_drafts ADD COLUMN generation_model VARCHAR(120)",
],
)

@ -483,7 +483,10 @@ class AdminPanelToolsWebTests(unittest.TestCase):
},
)
version_id = intake_response.json()["draft_preview"]["version_id"]
response = client.post(f"/admin/panel/tools/review-queue/{version_id}/review")
response = client.post(
f"/admin/panel/tools/review-queue/{version_id}/review",
json={"decision_notes": "Parecer inicial da diretoria para a revisao humana.", "reviewed_generated_code": True},
)
finally:
app.dependency_overrides.clear()
@ -521,6 +524,42 @@ class AdminPanelToolsWebTests(unittest.TestCase):
self.assertEqual(payload["items"][0]["gate"], "generation_pipeline_required")
self.assertEqual(payload["items"][0]["version_number"], 1)
def test_panel_tools_review_detail_returns_generated_source_for_diretor_session(self):
client, app, _, _, _, _ = self._build_client_with_role(StaffRole.DIRETOR)
try:
intake_response = client.post(
"/admin/panel/tools/drafts/intake",
json={
"domain": "locacao",
"tool_name": "emitir_resumo_locacao",
"display_name": "Emitir resumo de locacao",
"description": "Resume contratos de locacao com filtros operacionais para o time interno.",
"business_goal": "Dar visibilidade rapida aos contratos e aos principais dados da locacao.",
"parameters": [
{
"name": "contrato_id",
"parameter_type": "string",
"description": "Identificador do contrato consultado.",
"required": True,
}
],
},
)
version_id = intake_response.json()["draft_preview"]["version_id"]
pipeline_response = client.post(f"/admin/panel/tools/pipeline/{version_id}/run")
response = client.get(f"/admin/panel/tools/review-queue/{version_id}")
finally:
app.dependency_overrides.clear()
self.assertEqual(intake_response.status_code, 200)
self.assertEqual(pipeline_response.status_code, 200)
self.assertEqual(response.status_code, 200)
payload = response.json()
self.assertEqual(payload["tool_name"], "emitir_resumo_locacao")
self.assertTrue(payload["human_gate"]["review_action_available"])
self.assertIn("async def run", payload["generated_source_code"])
self.assertEqual(len(payload["automated_validations"]), 4)
def test_panel_tools_collaborator_can_run_generation_pipeline_after_manual_intake(self):
client, app, _, _, _, _ = self._build_client_with_role(StaffRole.COLABORADOR)
try:
@ -665,10 +704,19 @@ class AdminPanelToolsWebTests(unittest.TestCase):
)
version_id = intake_response.json()["draft_preview"]["version_id"]
publish_before_approval = client.post(f"/admin/panel/tools/publications/{version_id}/publish")
review_before_pipeline = client.post(f"/admin/panel/tools/review-queue/{version_id}/review")
review_before_pipeline = client.post(
f"/admin/panel/tools/review-queue/{version_id}/review",
json={"decision_notes": "Tentativa de revisao antes da pipeline.", "reviewed_generated_code": True},
)
pipeline_response = client.post(f"/admin/panel/tools/pipeline/{version_id}/run")
review_response = client.post(f"/admin/panel/tools/review-queue/{version_id}/review")
approve_response = client.post(f"/admin/panel/tools/review-queue/{version_id}/approve")
review_response = client.post(
f"/admin/panel/tools/review-queue/{version_id}/review",
json={"decision_notes": "Analisei o codigo completo gerado antes da validacao humana.", "reviewed_generated_code": True},
)
approve_response = client.post(
f"/admin/panel/tools/review-queue/{version_id}/approve",
json={"decision_notes": "Aprovacao formal da diretoria para seguir com a publicacao.", "reviewed_generated_code": True},
)
pre_publications = client.get("/admin/panel/tools/publications")
publish_response = client.post(f"/admin/panel/tools/publications/{version_id}/publish")
final_publications = client.get("/admin/panel/tools/publications")
@ -714,5 +762,109 @@ class AdminPanelToolsWebTests(unittest.TestCase):
self.assertEqual(publication["parameters"][0]["name"], "contrato_id")
def test_panel_tools_director_can_deactivate_active_publication(self):
client, app, _, _, _, _ = self._build_client_with_role(StaffRole.DIRETOR)
try:
intake_response = client.post(
"/admin/panel/tools/drafts/intake",
json={
"domain": "locacao",
"tool_name": "emitir_resumo_locacao",
"display_name": "Emitir resumo de locacao",
"description": "Resume contratos de locacao com filtros operacionais para o time interno.",
"business_goal": "Dar visibilidade rapida aos contratos e aos principais dados da locacao.",
"parameters": [],
},
)
version_id = intake_response.json()["draft_preview"]["version_id"]
client.post(f"/admin/panel/tools/pipeline/{version_id}/run")
client.post(
f"/admin/panel/tools/review-queue/{version_id}/review",
json={"decision_notes": "Analisei o codigo completo antes da ativacao.", "reviewed_generated_code": True},
)
client.post(
f"/admin/panel/tools/review-queue/{version_id}/approve",
json={"decision_notes": "Aprovacao formal para disponibilizar a ferramenta."},
)
client.post(f"/admin/panel/tools/publications/{version_id}/publish")
deactivate_response = client.post(
f"/admin/panel/tools/publications/{version_id}/deactivate",
json={"decision_notes": "Desativacao controlada da ferramenta ativa apos teste concluido."},
)
publications_response = client.get("/admin/panel/tools/publications")
finally:
app.dependency_overrides.clear()
self.assertEqual(deactivate_response.status_code, 200)
self.assertEqual(deactivate_response.json()["status"], "archived")
self.assertIsNone(deactivate_response.json()["queue_entry"])
self.assertEqual(publications_response.status_code, 200)
self.assertNotIn("emitir_resumo_locacao", [item["tool_name"] for item in publications_response.json()["publications"]])
def test_panel_tools_director_can_rollback_active_publication(self):
client, app, _, _, _, _ = self._build_client_with_role(StaffRole.DIRETOR)
try:
first_intake = client.post(
"/admin/panel/tools/drafts/intake",
json={
"domain": "locacao",
"tool_name": "emitir_resumo_locacao",
"display_name": "Emitir resumo de locacao",
"description": "Resume contratos de locacao com filtros operacionais para o time interno.",
"business_goal": "Dar visibilidade rapida aos contratos e aos principais dados da locacao.",
"parameters": [],
},
)
first_version_id = first_intake.json()["draft_preview"]["version_id"]
client.post(f"/admin/panel/tools/pipeline/{first_version_id}/run")
client.post(
f"/admin/panel/tools/review-queue/{first_version_id}/review",
json={"decision_notes": "Primeira revisao completa do codigo gerado.", "reviewed_generated_code": True},
)
client.post(
f"/admin/panel/tools/review-queue/{first_version_id}/approve",
json={"decision_notes": "Primeira aprovacao formal da diretoria."},
)
client.post(f"/admin/panel/tools/publications/{first_version_id}/publish")
second_intake = client.post(
"/admin/panel/tools/drafts/intake",
json={
"domain": "locacao",
"tool_name": "emitir_resumo_locacao",
"display_name": "Emitir resumo de locacao",
"description": "Resume contratos de locacao com mais contexto operacional para o time interno.",
"business_goal": "Dar visibilidade rapida aos contratos com filtros adicionais.",
"parameters": [],
},
)
second_version_id = second_intake.json()["draft_preview"]["version_id"]
client.post(f"/admin/panel/tools/pipeline/{second_version_id}/run")
client.post(
f"/admin/panel/tools/review-queue/{second_version_id}/review",
json={"decision_notes": "Segunda revisao completa do codigo gerado.", "reviewed_generated_code": True},
)
client.post(
f"/admin/panel/tools/review-queue/{second_version_id}/approve",
json={"decision_notes": "Segunda aprovacao formal da diretoria."},
)
client.post(f"/admin/panel/tools/publications/{second_version_id}/publish")
rollback_response = client.post(
f"/admin/panel/tools/publications/{second_version_id}/rollback",
json={"decision_notes": "Rollback controlado para restaurar a versao anterior estavel."},
)
publications_response = client.get("/admin/panel/tools/publications")
finally:
app.dependency_overrides.clear()
self.assertEqual(rollback_response.status_code, 200)
self.assertEqual(rollback_response.json()["status"], "active")
self.assertEqual(rollback_response.json()["version_id"], first_version_id)
publication = next(item for item in publications_response.json()["publications"] if item["tool_name"] == "emitir_resumo_locacao")
self.assertEqual(publication["version_id"], first_version_id)
self.assertTrue(publication["deactivation_action_available"])
if __name__ == "__main__":
unittest.main()

@ -1,4 +1,4 @@
import unittest
import unittest
from admin_app.db.models import ToolDraft
from shared.contracts import ToolLifecycleStatus
@ -19,6 +19,7 @@ class ToolDraftModelTests(unittest.TestCase):
self.assertIn("required_parameter_count", ToolDraft.__table__.columns)
self.assertIn("current_version_number", ToolDraft.__table__.columns)
self.assertIn("version_count", ToolDraft.__table__.columns)
self.assertIn("generation_model", ToolDraft.__table__.columns)
self.assertIn("requires_director_approval", ToolDraft.__table__.columns)
self.assertIn("owner_staff_account_id", ToolDraft.__table__.columns)
self.assertIn("owner_display_name", ToolDraft.__table__.columns)

@ -0,0 +1,180 @@
import threading
import unittest
from types import SimpleNamespace
from unittest.mock import Mock, patch
from admin_app.core import AdminSettings
from admin_app.services.tool_generation_worker_service import ToolGenerationWorkerService
from admin_app.services.tool_management_service import ToolManagementService
from shared.contracts import StaffRole, ToolLifecycleStatus
class ToolGenerationWorkerServiceTests(unittest.TestCase):
def test_execute_generation_pipeline_uses_dedicated_worker_metadata(self):
worker = ToolGenerationWorkerService(AdminSettings())
main_thread_name = threading.current_thread().name
def fake_job(version_id, runner_staff_account_id, runner_name, runner_role):
self.assertNotEqual(threading.current_thread().name, main_thread_name)
self.assertEqual(version_id, 'tool_version::resumo::v1')
self.assertEqual(runner_staff_account_id, 7)
self.assertEqual(runner_name, 'Diretoria')
self.assertEqual(runner_role, StaffRole.DIRETOR)
return {
'service': 'admin_tool_governance',
'version_id': version_id,
'status': 'generated',
'_worker_thread_name': threading.current_thread().name,
}
try:
with patch.object(worker, '_run_generation_pipeline_job', side_effect=fake_job):
payload = worker.execute_generation_pipeline(
version_id='tool_version::resumo::v1',
runner_staff_account_id=7,
runner_name='Diretoria',
runner_role=StaffRole.DIRETOR,
)
finally:
worker.shutdown(wait=True)
execution = payload['execution']
self.assertEqual(execution['mode'], 'dedicated_generation_worker')
self.assertEqual(execution['target'], 'admin_tool_generation_worker')
self.assertEqual(execution['dispatch_state'], 'completed')
self.assertEqual(execution['worker_max_workers'], 1)
self.assertEqual(execution['queued_jobs_before_submit'], 0)
self.assertEqual(execution['worker_pending_jobs'], 0)
self.assertIsNotNone(execution['submitted_at'])
self.assertGreaterEqual(execution['elapsed_ms'], 0)
self.assertTrue(execution['worker_thread_name'].startswith('admin-tool-generation-worker'))
self.assertEqual(payload['version_id'], 'tool_version::resumo::v1')
self.assertEqual(payload['status'], 'generated')
def test_dispatch_generation_pipeline_returns_queued_snapshot_without_waiting_completion(self):
worker = ToolGenerationWorkerService(AdminSettings())
job_started = threading.Event()
release_job = threading.Event()
def fake_job(version_id, runner_staff_account_id, runner_name, runner_role):
job_started.set()
release_job.wait(timeout=2)
return {
'service': 'admin_tool_governance',
'version_id': version_id,
'status': 'generated',
'_worker_thread_name': threading.current_thread().name,
}
try:
with patch.object(worker, '_run_generation_pipeline_job', side_effect=fake_job):
dispatch = worker.dispatch_generation_pipeline(
version_id='tool_version::assinc::v1',
runner_staff_account_id=9,
runner_name='Diretoria',
runner_role=StaffRole.DIRETOR,
)
self.assertEqual(dispatch['mode'], 'dedicated_generation_worker_async')
self.assertIn(dispatch['dispatch_state'], {'queued', 'running'})
self.assertEqual(dispatch['target'], 'admin_tool_generation_worker')
self.assertEqual(dispatch['queued_jobs_before_submit'], 0)
self.assertGreaterEqual(dispatch['poll_after_ms'], 1)
job_started.wait(timeout=2)
running_snapshot = worker.get_generation_pipeline_dispatch('tool_version::assinc::v1')
self.assertIsNotNone(running_snapshot)
self.assertIn(running_snapshot['dispatch_state'], {'running', 'completed'})
release_job.set()
release_job.wait(timeout=2)
finally:
worker.shutdown(wait=True)
class ToolManagementServiceWorkerFallbackTests(unittest.TestCase):
def test_run_generation_pipeline_in_worker_falls_back_to_inline_execution_metadata(self):
service = ToolManagementService(settings=AdminSettings())
with patch.object(
service,
'run_generation_pipeline',
return_value={
'service': 'admin_tool_governance',
'message': 'ok',
'version_id': 'tool_version::inline::v1',
'status': 'generated',
},
) as run_generation_pipeline:
payload = service.run_generation_pipeline_in_worker(
'tool_version::inline::v1',
runner_staff_account_id=4,
runner_name='Colaborador',
runner_role=StaffRole.COLABORADOR,
)
run_generation_pipeline.assert_called_once_with(
'tool_version::inline::v1',
runner_staff_account_id=4,
runner_name='Colaborador',
runner_role=StaffRole.COLABORADOR,
)
self.assertEqual(payload['execution']['mode'], 'inline_admin_service')
self.assertEqual(payload['execution']['target'], 'admin_inline_generation_pipeline')
self.assertEqual(payload['execution']['dispatch_state'], 'completed')
self.assertEqual(payload['execution']['queued_jobs_before_submit'], 0)
self.assertIsNone(payload['execution']['worker_max_workers'])
self.assertEqual(payload['version_id'], 'tool_version::inline::v1')
def test_run_generation_pipeline_in_worker_returns_queued_snapshot_when_dedicated_worker_accepts_job(self):
version = SimpleNamespace(
id=11,
version_id='tool_version::fila::v1',
tool_name='emitir_resumo_locacao',
version_number=1,
status=ToolLifecycleStatus.DRAFT,
summary='Resumo governado em fila.',
owner_display_name='Diretoria',
updated_at=None,
created_at=None,
)
draft_repository = Mock()
draft_repository.get_by_tool_name.return_value = SimpleNamespace(id=3)
version_repository = Mock()
version_repository.get_by_version_id.return_value = version
version_repository.list_versions.return_value = [version]
metadata_repository = Mock()
metadata_repository.get_by_tool_version_id.return_value = SimpleNamespace(display_name='Emitir resumo locacao')
worker = Mock()
worker.dispatch_generation_pipeline.return_value = {
'mode': 'dedicated_generation_worker_async',
'target': 'admin_tool_generation_worker',
'dispatch_state': 'queued',
'worker_max_workers': 1,
'worker_pending_jobs': 1,
'queued_jobs_before_submit': 0,
'submitted_at': '2026-04-02T10:00:00+00:00',
'started_at': None,
'completed_at': None,
'elapsed_ms': None,
'worker_thread_name': None,
'poll_after_ms': 1200,
'last_error': None,
}
service = ToolManagementService(
settings=AdminSettings(),
draft_repository=draft_repository,
version_repository=version_repository,
metadata_repository=metadata_repository,
tool_generation_worker_service=worker,
)
payload = service.run_generation_pipeline_in_worker(
'tool_version::fila::v1',
runner_staff_account_id=1,
runner_name='Diretoria',
runner_role=StaffRole.DIRETOR,
)
self.assertEqual(payload['status'], ToolLifecycleStatus.DRAFT)
self.assertEqual(payload['execution']['dispatch_state'], 'queued')
self.assertEqual(payload['queue_entry']['gate'], 'generation_pipeline_queued')
self.assertEqual(payload['queue_entry']['automated_validation_status'], 'pending')
self.assertIn('request foi liberada', payload['message'])

@ -1,3 +1,4 @@
import asyncio
import unittest
from unittest.mock import patch
from datetime import datetime, timezone
@ -64,6 +65,7 @@ class _FakeToolDraftRepository:
version_count: int,
owner_staff_account_id: int,
owner_display_name: str,
generation_model: str | None = None,
requires_director_approval: bool = True,
commit: bool = True,
) -> ToolDraft:
@ -82,6 +84,7 @@ class _FakeToolDraftRepository:
required_parameter_count=required_parameter_count,
current_version_number=current_version_number,
version_count=version_count,
generation_model=generation_model,
requires_director_approval=requires_director_approval,
owner_staff_account_id=owner_staff_account_id,
owner_display_name=owner_display_name,
@ -107,6 +110,7 @@ class _FakeToolDraftRepository:
version_count: int,
owner_staff_account_id: int,
owner_display_name: str,
generation_model: str | None = None,
requires_director_approval: bool = True,
commit: bool = True,
) -> ToolDraft:
@ -120,6 +124,7 @@ class _FakeToolDraftRepository:
draft.required_parameter_count = required_parameter_count
draft.current_version_number = current_version_number
draft.version_count = version_count
draft.generation_model = generation_model
draft.requires_director_approval = requires_director_approval
draft.owner_staff_account_id = owner_staff_account_id
draft.owner_display_name = owner_display_name
@ -180,6 +185,7 @@ class _FakeToolVersionRepository:
required_parameter_count: int,
owner_staff_account_id: int,
owner_display_name: str,
generation_model: str | None = None,
status: ToolLifecycleStatus = ToolLifecycleStatus.DRAFT,
requires_director_approval: bool = True,
commit: bool = True,
@ -197,6 +203,7 @@ class _FakeToolVersionRepository:
business_goal=business_goal,
parameters_json=parameters_json,
required_parameter_count=required_parameter_count,
generation_model=generation_model,
requires_director_approval=requires_director_approval,
owner_staff_account_id=owner_staff_account_id,
owner_display_name=owner_display_name,
@ -384,6 +391,9 @@ class AdminToolManagementServiceTests(unittest.TestCase):
artifact_repository=self.artifact_repository,
)
def _run_async(self, awaitable):
return asyncio.run(awaitable)
def test_create_draft_submission_persists_initial_tool_version_metadata_and_artifacts(self):
payload = self.service.create_draft_submission(
{
@ -392,6 +402,7 @@ class AdminToolManagementServiceTests(unittest.TestCase):
"display_name": "Consultar vendas por periodo",
"description": "Consulta vendas consolidadas por periodo informado no painel.",
"business_goal": "Ajudar o time interno a acompanhar o desempenho comercial com mais agilidade.",
"generation_model": "gemini-2.5-pro",
"parameters": [
{
"name": "periodo_inicio",
@ -422,11 +433,14 @@ class AdminToolManagementServiceTests(unittest.TestCase):
self.assertEqual(payload["draft_preview"]["draft_id"], "draft_fake_1")
self.assertEqual(payload["draft_preview"]["version_id"], "tool_version::consultar_vendas_periodo::v1")
self.assertEqual(payload["draft_preview"]["version_number"], 1)
self.assertEqual(payload["draft_preview"]["generation_model"], "gemini-2.5-pro")
self.assertEqual(payload["draft_preview"]["version_count"], 1)
self.assertEqual(payload["draft_preview"]["status"], ToolLifecycleStatus.DRAFT)
self.assertEqual(payload["draft_preview"]["owner_name"], "Equipe Interna")
self.assertEqual(len(self.draft_repository.drafts), 1)
self.assertEqual(len(self.version_repository.versions), 1)
self.assertEqual(self.draft_repository.drafts[0].generation_model, "gemini-2.5-pro")
self.assertEqual(self.version_repository.versions[0].generation_model, "gemini-2.5-pro")
self.assertEqual(len(self.metadata_repository.metadata_entries), 1)
self.assertEqual(self.metadata_repository.metadata_entries[0].author_display_name, "Equipe Interna")
self.assertEqual(self.metadata_repository.metadata_entries[0].version_number, 1)
@ -944,6 +958,8 @@ class AdminToolManagementServiceTests(unittest.TestCase):
reviewer_staff_account_id=99,
reviewer_name="Diretoria",
reviewer_role=StaffRole.DIRETOR,
decision_notes="Aguardando a geracao controlada da funcao.",
reviewed_generated_code=True,
)
def test_director_must_review_approve_and_publish_before_activation(self):
@ -979,12 +995,15 @@ class AdminToolManagementServiceTests(unittest.TestCase):
reviewer_staff_account_id=99,
reviewer_name="Diretoria",
reviewer_role=StaffRole.DIRETOR,
decision_notes="Analisei o codigo completo gerado e a estrutura esta aderente ao fluxo governado.",
reviewed_generated_code=True,
)
approve_payload = self.service.approve_version(
version_id,
approver_staff_account_id=99,
approver_name="Diretoria",
approver_role=StaffRole.DIRETOR,
decision_notes="Aprovacao formal registrada apos revisao tecnica e leitura integral do codigo.",
)
publish_payload = self.service.publish_version(
version_id,
@ -1033,6 +1052,61 @@ class AdminToolManagementServiceTests(unittest.TestCase):
publisher_role=StaffRole.DIRETOR,
)
def test_build_review_detail_payload_exposes_generated_source_and_human_history(self):
intake_payload = self.service.create_draft_submission(
{
"domain": "revisao",
"tool_name": "consultar_revisao_aberta",
"display_name": "Consultar revisao aberta",
"description": "Consulta revisoes abertas com filtros administrativos para a oficina.",
"business_goal": "Ajudar o time a localizar revisoes abertas com mais contexto operacional.",
"parameters": [
{
"name": "placa",
"parameter_type": "string",
"description": "Placa usada na busca da revisao.",
"required": True,
}
],
},
owner_staff_account_id=8,
owner_name="Operacao de Oficina",
)
version_id = intake_payload["draft_preview"]["version_id"]
self.service.run_generation_pipeline(
version_id,
runner_staff_account_id=8,
runner_name="Operacao de Oficina",
runner_role=StaffRole.COLABORADOR,
)
self.service.review_version(
version_id,
reviewer_staff_account_id=99,
reviewer_name="Diretoria",
reviewer_role=StaffRole.DIRETOR,
decision_notes="Analise completa do codigo gerado antes da validacao humana.",
reviewed_generated_code=True,
)
self.service.approve_version(
version_id,
approver_staff_account_id=99,
approver_name="Diretoria",
approver_role=StaffRole.DIRETOR,
decision_notes="Aprovacao formal da versao apos revisao humana detalhada.",
)
payload = self.service.build_review_detail_payload(version_id)
self.assertEqual(payload["status"], ToolLifecycleStatus.APPROVED)
self.assertEqual(payload["human_gate"]["publication_action_available"], True)
self.assertIn("async def run", payload["generated_source_code"])
self.assertEqual(len(payload["automated_validations"]), 4)
self.assertEqual(len(payload["decision_history"]), 2)
self.assertEqual(payload["decision_history"][0]["action_key"], ToolArtifactKind.DIRECTOR_REVIEW.value)
self.assertTrue(payload["decision_history"][0]["reviewed_generated_code"])
self.assertIn("aprovacao formal", payload["decision_history"][1]["decision_notes"].lower())
def test_publishing_new_version_archives_previous_active_version(self):
first_intake = self.service.create_draft_submission(
{
@ -1048,8 +1122,21 @@ class AdminToolManagementServiceTests(unittest.TestCase):
)
first_version_id = first_intake["draft_preview"]["version_id"]
self.service.run_generation_pipeline(first_version_id, runner_staff_account_id=7, runner_name="Equipe Interna", runner_role=StaffRole.COLABORADOR)
self.service.review_version(first_version_id, reviewer_staff_account_id=99, reviewer_name="Diretoria", reviewer_role=StaffRole.DIRETOR)
self.service.approve_version(first_version_id, approver_staff_account_id=99, approver_name="Diretoria", approver_role=StaffRole.DIRETOR)
self.service.review_version(
first_version_id,
reviewer_staff_account_id=99,
reviewer_name="Diretoria",
reviewer_role=StaffRole.DIRETOR,
decision_notes="Primeira versao revisada com leitura integral do codigo gerado.",
reviewed_generated_code=True,
)
self.service.approve_version(
first_version_id,
approver_staff_account_id=99,
approver_name="Diretoria",
approver_role=StaffRole.DIRETOR,
decision_notes="Primeira versao aprovada para ativacao controlada.",
)
self.service.publish_version(first_version_id, publisher_staff_account_id=99, publisher_name="Diretoria", publisher_role=StaffRole.DIRETOR)
second_intake = self.service.create_draft_submission(
@ -1066,8 +1153,21 @@ class AdminToolManagementServiceTests(unittest.TestCase):
)
second_version_id = second_intake["draft_preview"]["version_id"]
self.service.run_generation_pipeline(second_version_id, runner_staff_account_id=7, runner_name="Equipe Interna", runner_role=StaffRole.COLABORADOR)
self.service.review_version(second_version_id, reviewer_staff_account_id=99, reviewer_name="Diretoria", reviewer_role=StaffRole.DIRETOR)
self.service.approve_version(second_version_id, approver_staff_account_id=99, approver_name="Diretoria", approver_role=StaffRole.DIRETOR)
self.service.review_version(
second_version_id,
reviewer_staff_account_id=99,
reviewer_name="Diretoria",
reviewer_role=StaffRole.DIRETOR,
decision_notes="Nova versao revisada com comparativo do codigo completo gerado.",
reviewed_generated_code=True,
)
self.service.approve_version(
second_version_id,
approver_staff_account_id=99,
approver_name="Diretoria",
approver_role=StaffRole.DIRETOR,
decision_notes="Nova versao aprovada para substituir a publicacao anterior.",
)
self.service.publish_version(second_version_id, publisher_staff_account_id=99, publisher_name="Diretoria", publisher_role=StaffRole.DIRETOR)
versions_by_number = {version.version_number: version for version in self.version_repository.versions}
@ -1078,7 +1178,269 @@ class AdminToolManagementServiceTests(unittest.TestCase):
self.assertEqual(metadata_by_number[2].status, ToolLifecycleStatus.ACTIVE)
def test_deactivating_active_version_archives_publication_and_removes_tool_from_catalog(self):
intake = self.service.create_draft_submission(
{
"domain": "locacao",
"tool_name": "emitir_resumo_locacao",
"display_name": "Emitir resumo de locacao",
"description": "Resume contratos de locacao com filtros operacionais para o time interno.",
"business_goal": "Dar visibilidade rapida aos contratos e aos principais dados da locacao.",
"parameters": [],
},
owner_staff_account_id=7,
owner_name="Equipe Interna",
)
version_id = intake["draft_preview"]["version_id"]
self.service.run_generation_pipeline(version_id, runner_staff_account_id=7, runner_name="Equipe Interna", runner_role=StaffRole.COLABORADOR)
self.service.review_version(
version_id,
reviewer_staff_account_id=99,
reviewer_name="Diretoria",
reviewer_role=StaffRole.DIRETOR,
decision_notes="Analisei a versao ativa antes da desativacao controlada.",
reviewed_generated_code=True,
)
self.service.approve_version(
version_id,
approver_staff_account_id=99,
approver_name="Diretoria",
approver_role=StaffRole.DIRETOR,
decision_notes="Aprovacao formal para ativar e depois validar a desativacao controlada.",
)
self.service.publish_version(version_id, publisher_staff_account_id=99, publisher_name="Diretoria", publisher_role=StaffRole.DIRETOR)
payload = self.service.deactivate_version(
version_id,
actor_staff_account_id=99,
actor_name="Diretoria",
actor_role=StaffRole.DIRETOR,
decision_notes="Desativacao controlada da tool apos encerramento temporario do uso.",
)
self.assertEqual(payload["status"], ToolLifecycleStatus.ARCHIVED)
self.assertIsNone(payload["queue_entry"])
detail = self.service.build_review_detail_payload(version_id)
self.assertEqual(detail["status"], ToolLifecycleStatus.ARCHIVED)
self.assertFalse(detail["human_gate"]["deactivation_action_available"])
self.assertFalse(detail["human_gate"]["rollback_action_available"])
self.assertEqual(detail["decision_history"][-1]["action_key"], ToolArtifactKind.PUBLICATION_DEACTIVATION.value)
publications = self.service.build_publications_payload()
self.assertNotIn("emitir_resumo_locacao", [item["tool_name"] for item in publications["publications"]])
def test_rollback_restores_latest_archived_version_into_active_catalog(self):
first_intake = self.service.create_draft_submission(
{
"domain": "vendas",
"tool_name": "consultar_funil_comercial",
"display_name": "Consultar funil comercial",
"description": "Consulta o funil comercial consolidado para acompanhamento administrativo.",
"business_goal": "Dar visibilidade ao time interno sobre os principais gargalos do funil.",
"parameters": [],
},
owner_staff_account_id=7,
owner_name="Equipe Interna",
)
first_version_id = first_intake["draft_preview"]["version_id"]
self.service.run_generation_pipeline(first_version_id, runner_staff_account_id=7, runner_name="Equipe Interna", runner_role=StaffRole.COLABORADOR)
self.service.review_version(
first_version_id,
reviewer_staff_account_id=99,
reviewer_name="Diretoria",
reviewer_role=StaffRole.DIRETOR,
decision_notes="Primeira versao revisada antes da futura ativacao controlada.",
reviewed_generated_code=True,
)
self.service.approve_version(
first_version_id,
approver_staff_account_id=99,
approver_name="Diretoria",
approver_role=StaffRole.DIRETOR,
decision_notes="Primeira versao aprovada para publicacao inicial.",
)
self.service.publish_version(first_version_id, publisher_staff_account_id=99, publisher_name="Diretoria", publisher_role=StaffRole.DIRETOR)
second_intake = self.service.create_draft_submission(
{
"domain": "vendas",
"tool_name": "consultar_funil_comercial",
"display_name": "Consultar funil comercial",
"description": "Consulta o funil comercial consolidado com campos adicionais para acompanhamento administrativo.",
"business_goal": "Dar visibilidade ao time interno sobre gargalos, volume e conversao do funil.",
"parameters": [],
},
owner_staff_account_id=7,
owner_name="Equipe Interna",
)
second_version_id = second_intake["draft_preview"]["version_id"]
self.service.run_generation_pipeline(second_version_id, runner_staff_account_id=7, runner_name="Equipe Interna", runner_role=StaffRole.COLABORADOR)
self.service.review_version(
second_version_id,
reviewer_staff_account_id=99,
reviewer_name="Diretoria",
reviewer_role=StaffRole.DIRETOR,
decision_notes="Nova versao revisada com leitura integral antes da substituicao.",
reviewed_generated_code=True,
)
self.service.approve_version(
second_version_id,
approver_staff_account_id=99,
approver_name="Diretoria",
approver_role=StaffRole.DIRETOR,
decision_notes="Nova versao aprovada para substituir a publicacao anterior.",
)
self.service.publish_version(second_version_id, publisher_staff_account_id=99, publisher_name="Diretoria", publisher_role=StaffRole.DIRETOR)
active_detail = self.service.build_review_detail_payload(second_version_id)
self.assertTrue(active_detail["human_gate"]["deactivation_action_available"])
self.assertTrue(active_detail["human_gate"]["rollback_action_available"])
self.assertEqual(active_detail["human_gate"]["rollback_target_version_number"], 1)
payload = self.service.rollback_version(
second_version_id,
actor_staff_account_id=99,
actor_name="Diretoria",
actor_role=StaffRole.DIRETOR,
decision_notes="Rollback controlado para restaurar a versao anterior mais estavel.",
)
versions_by_number = {version.version_number: version for version in self.version_repository.versions}
metadata_by_number = {metadata.version_number: metadata for metadata in self.metadata_repository.metadata_entries}
self.assertEqual(payload["status"], ToolLifecycleStatus.ACTIVE)
self.assertEqual(payload["version_id"], first_version_id)
self.assertEqual(versions_by_number[1].status, ToolLifecycleStatus.ACTIVE)
self.assertEqual(metadata_by_number[1].status, ToolLifecycleStatus.ACTIVE)
self.assertEqual(versions_by_number[2].status, ToolLifecycleStatus.ARCHIVED)
self.assertEqual(metadata_by_number[2].status, ToolLifecycleStatus.ARCHIVED)
restored_detail = self.service.build_review_detail_payload(first_version_id)
self.assertEqual(restored_detail["decision_history"][-1]["action_key"], ToolArtifactKind.PUBLICATION_ROLLBACK.value)
publications = self.service.build_publications_payload()
restored_publication = next(item for item in publications["publications"] if item["tool_name"] == "consultar_funil_comercial")
self.assertEqual(restored_publication["version_id"], first_version_id)
self.assertTrue(restored_publication["deactivation_action_available"])
class AdminToolManagementTransactionalPersistenceTests(unittest.TestCase):
def test_publish_and_deactivate_keep_local_runtime_snapshot_for_product(self):
import json
import shutil
from pathlib import Path
from unittest.mock import patch
draft_repository = _FakeToolDraftRepository()
version_repository = _FakeToolVersionRepository()
metadata_repository = _FakeToolMetadataRepository()
artifact_repository = _FakeToolArtifactRepository()
service = ToolManagementService(
settings=AdminSettings(admin_api_prefix="/admin"),
draft_repository=draft_repository,
version_repository=version_repository,
metadata_repository=metadata_repository,
artifact_repository=artifact_repository,
)
intake_payload = service.create_draft_submission(
{
"domain": "locacao",
"tool_name": "emitir_resumo_locacao",
"display_name": "Emitir resumo de locacao",
"description": "Resume uma locacao ativa com dados importantes para o atendimento.",
"business_goal": "Permitir que a equipe gere um resumo operacional de locacao sem acessar o core.",
"parameters": [
{
"name": "reserva_id",
"parameter_type": ToolParameterType.STRING,
"description": "Identificador da reserva que sera resumida.",
"required": True,
}
],
},
owner_staff_account_id=7,
owner_name="Equipe Interna",
owner_role=StaffRole.COLABORADOR,
)
version_id = intake_payload["draft_preview"]["version_id"]
service.run_generation_pipeline(
version_id,
runner_staff_account_id=7,
runner_name="Equipe Interna",
runner_role=StaffRole.COLABORADOR,
)
service.review_version(
version_id,
reviewer_staff_account_id=99,
reviewer_name="Diretoria",
reviewer_role=StaffRole.DIRETOR,
decision_notes="Revisao humana registrada para liberacao controlada.",
reviewed_generated_code=True,
)
service.approve_version(
version_id,
approver_staff_account_id=99,
approver_name="Diretoria",
approver_role=StaffRole.DIRETOR,
decision_notes="Aprovacao formal liberada para publicacao governada.",
)
sandbox_root = Path.cwd() / ".tmp_test_admin_runtime_snapshot"
shutil.rmtree(sandbox_root, ignore_errors=True)
package_dir = sandbox_root / GENERATED_TOOLS_PACKAGE
manifest_path = package_dir / "published_runtime_tools.json"
def build_file_path(tool_name: str):
return package_dir / f"{tool_name}.py"
try:
with patch(
"admin_app.services.tool_management_service.get_generated_tools_runtime_dir",
return_value=package_dir,
), patch(
"admin_app.services.tool_management_service.get_generated_tool_publication_manifest_path",
return_value=manifest_path,
), patch(
"admin_app.services.tool_management_service.build_generated_tool_file_path",
side_effect=build_file_path,
):
service.publish_version(
version_id,
publisher_staff_account_id=99,
publisher_name="Diretoria",
publisher_role=StaffRole.DIRETOR,
)
manifest = json.loads(manifest_path.read_text(encoding="utf-8"))
self.assertEqual(manifest["target_service"], "product")
self.assertEqual(len(manifest["publications"]), 1)
self.assertEqual(
manifest["publications"][0]["published_tool"]["tool_name"],
"emitir_resumo_locacao",
)
self.assertEqual(
manifest["publications"][0]["published_tool"]["status"],
ToolLifecycleStatus.ACTIVE.value,
)
self.assertTrue(build_file_path("emitir_resumo_locacao").exists())
self.assertIn(
"async def run",
build_file_path("emitir_resumo_locacao").read_text(encoding="utf-8"),
)
service.deactivate_version(
version_id,
actor_staff_account_id=99,
actor_name="Diretoria",
actor_role=StaffRole.DIRETOR,
decision_notes="Desativacao controlada para manter apenas o snapshot local anterior.",
)
manifest_after = json.loads(manifest_path.read_text(encoding="utf-8"))
self.assertEqual(manifest_after["publications"], [])
finally:
shutil.rmtree(sandbox_root, ignore_errors=True)
def setUp(self):
self.engine = create_engine("sqlite:///:memory:")
AdminBase.metadata.create_all(bind=self.engine)
@ -1143,3 +1505,46 @@ class AdminToolManagementTransactionalPersistenceTests(unittest.TestCase):
if __name__ == "__main__":
unittest.main()
class AdminToolManagementWorkerDispatchTests(unittest.TestCase):
def test_run_generation_pipeline_in_worker_falls_back_to_inline_execution_metadata(self):
service = ToolManagementService(settings=AdminSettings(admin_api_prefix="/admin"))
expected_payload = {
"message": "Pipeline executado.",
"version_id": "tool_version::teste::v1",
"tool_name": "teste",
"version_number": 1,
"status": ToolLifecycleStatus.GENERATED,
"current_step": "validation",
"steps": [],
"queue_entry": {
"entry_id": "tool_version::teste::v1",
"version_id": "tool_version::teste::v1",
"version_number": 1,
"tool_name": "teste",
"display_name": "Tool teste",
"status": ToolLifecycleStatus.GENERATED,
"gate": "validation_required",
"summary": "Resumo",
},
"automated_validations": [],
"next_steps": [],
}
with patch.object(service, "run_generation_pipeline", return_value=expected_payload) as run_pipeline:
payload = service.run_generation_pipeline_in_worker(
"tool_version::teste::v1",
runner_staff_account_id=7,
runner_name="Equipe Interna",
runner_role=StaffRole.COLABORADOR,
)
run_pipeline.assert_called_once_with(
"tool_version::teste::v1",
runner_staff_account_id=7,
runner_name="Equipe Interna",
runner_role=StaffRole.COLABORADOR,
)
self.assertEqual(payload["execution"]["mode"], "inline_admin_service")
self.assertEqual(payload["execution"]["target"], "admin_inline_generation_pipeline")

@ -1,4 +1,4 @@
import unittest
import unittest
from admin_app.db.models import ToolVersion
from shared.contracts import ToolLifecycleStatus
@ -17,6 +17,7 @@ class ToolVersionModelTests(unittest.TestCase):
self.assertIn("business_goal", ToolVersion.__table__.columns)
self.assertIn("parameters_json", ToolVersion.__table__.columns)
self.assertIn("required_parameter_count", ToolVersion.__table__.columns)
self.assertIn("generation_model", ToolVersion.__table__.columns)
self.assertIn("requires_director_approval", ToolVersion.__table__.columns)
self.assertIn("owner_staff_account_id", ToolVersion.__table__.columns)
self.assertIn("owner_display_name", ToolVersion.__table__.columns)

@ -504,6 +504,10 @@ class AdminToolsWebTests(unittest.TestCase):
response = client.post(
f"/admin/tools/review-queue/{version_id}/review",
headers={"Authorization": "Bearer token"},
json={
"decision_notes": "Parecer inicial da diretoria para a revisao humana.",
"reviewed_generated_code": True,
},
)
finally:
app.dependency_overrides.clear()
@ -544,6 +548,49 @@ class AdminToolsWebTests(unittest.TestCase):
self.assertEqual(payload["items"][0]["version_number"], 1)
self.assertIn("approved", payload["supported_statuses"])
def test_tools_review_detail_returns_generated_source_for_diretor(self):
client, app, _, _, _, _ = self._build_client_with_role(StaffRole.DIRETOR)
try:
intake_response = client.post(
"/admin/tools/drafts/intake",
headers={"Authorization": "Bearer token"},
json={
"domain": "revisao",
"tool_name": "consultar_revisao_aberta",
"display_name": "Consultar revisao aberta",
"description": "Consulta revisoes abertas com filtros administrativos para a oficina.",
"business_goal": "Ajudar o time a localizar revisoes abertas com mais contexto operacional.",
"parameters": [
{
"name": "placa",
"parameter_type": "string",
"description": "Placa usada na busca da revisao.",
"required": True,
}
],
},
)
version_id = intake_response.json()["draft_preview"]["version_id"]
pipeline_response = client.post(
f"/admin/tools/pipeline/{version_id}/run",
headers={"Authorization": "Bearer token"},
)
response = client.get(
f"/admin/tools/review-queue/{version_id}",
headers={"Authorization": "Bearer token"},
)
finally:
app.dependency_overrides.clear()
self.assertEqual(intake_response.status_code, 200)
self.assertEqual(pipeline_response.status_code, 200)
self.assertEqual(response.status_code, 200)
payload = response.json()
self.assertEqual(payload["tool_name"], "consultar_revisao_aberta")
self.assertTrue(payload["human_gate"]["review_action_available"])
self.assertIn("async def run", payload["generated_source_code"])
self.assertEqual(len(payload["automated_validations"]), 4)
def test_tools_collaborator_can_run_generation_pipeline_after_manual_intake(self):
client, app, _, _, _, _ = self._build_client_with_role(StaffRole.COLABORADOR)
try:
@ -707,6 +754,10 @@ class AdminToolsWebTests(unittest.TestCase):
review_before_pipeline = client.post(
f"/admin/tools/review-queue/{version_id}/review",
headers={"Authorization": "Bearer token"},
json={
"decision_notes": "Tentativa de revisao antes da pipeline.",
"reviewed_generated_code": True,
},
)
pipeline_response = client.post(
f"/admin/tools/pipeline/{version_id}/run",
@ -715,10 +766,18 @@ class AdminToolsWebTests(unittest.TestCase):
review_response = client.post(
f"/admin/tools/review-queue/{version_id}/review",
headers={"Authorization": "Bearer token"},
json={
"decision_notes": "Analisei o codigo completo gerado antes da validacao humana.",
"reviewed_generated_code": True,
},
)
approve_response = client.post(
f"/admin/tools/review-queue/{version_id}/approve",
headers={"Authorization": "Bearer token"},
json={
"decision_notes": "Aprovacao formal da diretoria para seguir com a publicacao.",
"reviewed_generated_code": True,
},
)
pre_publications = client.get("/admin/tools/publications", headers={"Authorization": "Bearer token"})
publish_response = client.post(
@ -769,5 +828,120 @@ class AdminToolsWebTests(unittest.TestCase):
self.assertEqual(publication["parameters"][0]["parameter_type"], "string")
def test_tools_director_can_deactivate_active_publication(self):
client, app, _, _, _, _ = self._build_client_with_role(StaffRole.DIRETOR)
try:
intake_response = client.post(
"/admin/tools/drafts/intake",
headers={"Authorization": "Bearer token"},
json={
"domain": "revisao",
"tool_name": "consultar_revisao_aberta",
"display_name": "Consultar revisao aberta",
"description": "Consulta revisoes abertas com filtros administrativos para a oficina.",
"business_goal": "Ajudar o time a localizar revisoes abertas com mais contexto operacional.",
"parameters": [],
},
)
version_id = intake_response.json()["draft_preview"]["version_id"]
client.post(f"/admin/tools/pipeline/{version_id}/run", headers={"Authorization": "Bearer token"})
client.post(
f"/admin/tools/review-queue/{version_id}/review",
headers={"Authorization": "Bearer token"},
json={"decision_notes": "Analisei o codigo completo antes da ativacao.", "reviewed_generated_code": True},
)
client.post(
f"/admin/tools/review-queue/{version_id}/approve",
headers={"Authorization": "Bearer token"},
json={"decision_notes": "Aprovacao formal para disponibilizar a ferramenta."},
)
client.post(f"/admin/tools/publications/{version_id}/publish", headers={"Authorization": "Bearer token"})
deactivate_response = client.post(
f"/admin/tools/publications/{version_id}/deactivate",
headers={"Authorization": "Bearer token"},
json={"decision_notes": "Desativacao controlada da ferramenta ativa apos teste concluido."},
)
publications_response = client.get("/admin/tools/publications", headers={"Authorization": "Bearer token"})
finally:
app.dependency_overrides.clear()
self.assertEqual(deactivate_response.status_code, 200)
self.assertEqual(deactivate_response.json()["status"], "archived")
self.assertIsNone(deactivate_response.json()["queue_entry"])
self.assertEqual(publications_response.status_code, 200)
self.assertNotIn("consultar_revisao_aberta", [item["tool_name"] for item in publications_response.json()["publications"]])
def test_tools_director_can_rollback_active_publication(self):
client, app, _, _, _, _ = self._build_client_with_role(StaffRole.DIRETOR)
try:
first_intake = client.post(
"/admin/tools/drafts/intake",
headers={"Authorization": "Bearer token"},
json={
"domain": "revisao",
"tool_name": "consultar_revisao_aberta",
"display_name": "Consultar revisao aberta",
"description": "Consulta revisoes abertas com filtros administrativos para a oficina.",
"business_goal": "Ajudar o time a localizar revisoes abertas com mais contexto operacional.",
"parameters": [],
},
)
first_version_id = first_intake.json()["draft_preview"]["version_id"]
client.post(f"/admin/tools/pipeline/{first_version_id}/run", headers={"Authorization": "Bearer token"})
client.post(
f"/admin/tools/review-queue/{first_version_id}/review",
headers={"Authorization": "Bearer token"},
json={"decision_notes": "Primeira revisao completa do codigo gerado.", "reviewed_generated_code": True},
)
client.post(
f"/admin/tools/review-queue/{first_version_id}/approve",
headers={"Authorization": "Bearer token"},
json={"decision_notes": "Primeira aprovacao formal da diretoria."},
)
client.post(f"/admin/tools/publications/{first_version_id}/publish", headers={"Authorization": "Bearer token"})
second_intake = client.post(
"/admin/tools/drafts/intake",
headers={"Authorization": "Bearer token"},
json={
"domain": "revisao",
"tool_name": "consultar_revisao_aberta",
"display_name": "Consultar revisao aberta",
"description": "Consulta revisoes abertas com mais contexto operacional para a oficina.",
"business_goal": "Ajudar o time a localizar revisoes abertas com filtros extras.",
"parameters": [],
},
)
second_version_id = second_intake.json()["draft_preview"]["version_id"]
client.post(f"/admin/tools/pipeline/{second_version_id}/run", headers={"Authorization": "Bearer token"})
client.post(
f"/admin/tools/review-queue/{second_version_id}/review",
headers={"Authorization": "Bearer token"},
json={"decision_notes": "Segunda revisao completa do codigo gerado.", "reviewed_generated_code": True},
)
client.post(
f"/admin/tools/review-queue/{second_version_id}/approve",
headers={"Authorization": "Bearer token"},
json={"decision_notes": "Segunda aprovacao formal da diretoria."},
)
client.post(f"/admin/tools/publications/{second_version_id}/publish", headers={"Authorization": "Bearer token"})
rollback_response = client.post(
f"/admin/tools/publications/{second_version_id}/rollback",
headers={"Authorization": "Bearer token"},
json={"decision_notes": "Rollback controlado para restaurar a versao anterior estavel."},
)
publications_response = client.get("/admin/tools/publications", headers={"Authorization": "Bearer token"})
finally:
app.dependency_overrides.clear()
self.assertEqual(rollback_response.status_code, 200)
self.assertEqual(rollback_response.json()["status"], "active")
self.assertEqual(rollback_response.json()["version_id"], first_version_id)
publication = next(item for item in publications_response.json()["publications"] if item["tool_name"] == "consultar_revisao_aberta")
self.assertEqual(publication["version_id"], first_version_id)
self.assertTrue(publication["deactivation_action_available"])
if __name__ == "__main__":
unittest.main()

@ -158,6 +158,10 @@ class AdminViewBootstrapTests(unittest.TestCase):
self.assertIn('data-contracts-endpoint="/panel/tools/contracts"', response.text)
self.assertIn('data-review-queue-endpoint="/panel/tools/review-queue"', response.text)
self.assertIn('data-publications-endpoint="/panel/tools/publications"', response.text)
self.assertIn('data-tool-review-code', response.text)
self.assertIn('data-tool-review-decision-notes', response.text)
self.assertIn('data-tool-review-action="deactivate"', response.text)
self.assertIn('data-tool-review-action="rollback"', response.text)
self.assertNotIn("Abrir login administrativo", response.text)
def test_collaborator_management_page_redirects_to_login_without_session(self):

@ -1,4 +1,4 @@
import os
import os
import unittest
from datetime import datetime, timedelta
from app.core.time_utils import utc_now
@ -3702,5 +3702,119 @@ class ToolRegistryExecutionTests(unittest.IsolatedAsyncioTestCase):
)
def test_registry_loads_generated_tool_from_local_publication_snapshot(self):
import asyncio
import json
import shutil
import sys
from pathlib import Path
from unittest.mock import patch
sandbox_root = Path.cwd() / ".tmp_test_registry_snapshot_valid"
shutil.rmtree(sandbox_root, ignore_errors=True)
package_dir = sandbox_root / "generated_tools"
package_dir.mkdir(parents=True, exist_ok=True)
(package_dir / "__init__.py").write_text("", encoding="utf-8")
(package_dir / "emitir_resumo_locacao.py").write_text(
"async def run(reserva_id: str):\n return {\"reserva_id\": reserva_id, \"status\": \"ok\"}\n",
encoding="utf-8",
)
manifest_path = package_dir / "published_runtime_tools.json"
manifest_path.write_text(
json.dumps(
{
"source_service": "admin",
"target_service": "product",
"emitted_at": "2026-04-02T12:00:00+00:00",
"publications": [
{
"source_service": "admin",
"target_service": "product",
"publication_id": "metadata::emitir_resumo_locacao::v1",
"emitted_at": "2026-04-02T12:00:00+00:00",
"published_tool": {
"tool_name": "emitir_resumo_locacao",
"display_name": "Emitir resumo de locacao",
"description": "Gera um resumo curto da locacao.",
"version": 1,
"status": "active",
"parameters": [
{
"name": "reserva_id",
"parameter_type": "string",
"description": "Identificador da reserva.",
"required": True,
}
],
"implementation_module": "generated_tools.emitir_resumo_locacao",
"implementation_callable": "run",
},
}
],
},
ensure_ascii=True,
),
encoding="utf-8",
)
registry = ToolRegistry.__new__(ToolRegistry)
registry._tools = []
sys.path.insert(0, str(sandbox_root))
sys.modules.pop("generated_tools", None)
sys.modules.pop("generated_tools.emitir_resumo_locacao", None)
try:
with patch(
"app.services.tools.tool_registry.get_generated_tool_publication_manifest_path",
return_value=manifest_path,
):
registry._load_generated_tool_publications_from_snapshot()
self.assertEqual([tool.name for tool in registry.get_tools()], ["emitir_resumo_locacao"])
result = asyncio.run(registry.execute("emitir_resumo_locacao", {"reserva_id": "LOC-1"}))
self.assertEqual(result["reserva_id"], "LOC-1")
finally:
if str(sandbox_root) in sys.path:
sys.path.remove(str(sandbox_root))
sys.modules.pop("generated_tools", None)
sys.modules.pop("generated_tools.emitir_resumo_locacao", None)
shutil.rmtree(sandbox_root, ignore_errors=True)
def test_registry_ignores_invalid_publication_snapshot_and_keeps_existing_tools(self):
import shutil
import sys
from pathlib import Path
from unittest.mock import patch
async def core_tool(**kwargs):
return kwargs
sandbox_root = Path.cwd() / ".tmp_test_registry_snapshot_invalid"
shutil.rmtree(sandbox_root, ignore_errors=True)
sandbox_root.mkdir(parents=True, exist_ok=True)
manifest_path = sandbox_root / "published_runtime_tools.json"
manifest_path.write_text("{invalid json", encoding="utf-8")
registry = ToolRegistry.__new__(ToolRegistry)
registry._tools = [
ToolDefinition(
name="consultar_estoque",
description="",
parameters={},
handler=core_tool,
)
]
sys.modules.pop("generated_tools", None)
try:
with patch(
"app.services.tools.tool_registry.get_generated_tool_publication_manifest_path",
return_value=manifest_path,
):
registry._load_generated_tool_publications_from_snapshot()
self.assertEqual([tool.name for tool in registry.get_tools()], ["consultar_estoque"])
finally:
sys.modules.pop("generated_tools", None)
shutil.rmtree(sandbox_root, ignore_errors=True)
if __name__ == "__main__":
unittest.main()

@ -1,9 +1,11 @@
import os
import unittest
from types import SimpleNamespace
from unittest.mock import patch
os.environ.setdefault("DEBUG", "false")
from app.core.settings import Settings
from app.services.ai.llm_service import (
INVALID_RECEIPT_WATERMARK_MESSAGE,
VALID_RECEIPT_WATERMARK_MARKER,
@ -82,6 +84,57 @@ class LLMServiceResponseParsingTests(unittest.TestCase):
self.assertEqual(payload, {"response": '{"ok": true}', "tool_call": None})
class LLMServiceRuntimeConfigurationTests(unittest.TestCase):
def setUp(self):
self._vertex_initialized = LLMService._vertex_initialized
self._models = dict(LLMService._models)
self._vertex_tools_cache = dict(LLMService._vertex_tools_cache)
LLMService._vertex_initialized = False
LLMService._models = {}
LLMService._vertex_tools_cache = {}
def tearDown(self):
LLMService._vertex_initialized = self._vertex_initialized
LLMService._models = self._models
LLMService._vertex_tools_cache = self._vertex_tools_cache
def test_constructor_prefers_explicit_atendimento_runtime_models(self):
runtime_settings = Settings(
google_project_id="test-project",
google_location="us-central1",
atendimento_model_name="gemini-atendimento",
atendimento_bundle_model_name="gemini-atendimento-bundle",
vertex_model_name="legacy-runtime",
vertex_bundle_model_name="legacy-bundle",
)
with patch("app.services.ai.llm_service.settings", runtime_settings), patch(
"app.services.ai.llm_service.vertexai.init"
) as vertex_init:
service = LLMService()
vertex_init.assert_called_once_with(project="test-project", location="us-central1")
self.assertEqual(service.model_names[0], "gemini-atendimento")
self.assertEqual(service.bundle_model_names[0], "gemini-atendimento-bundle")
self.assertIn("gemini-2.5-pro", service.model_names)
def test_constructor_falls_back_to_legacy_vertex_runtime_model_names(self):
runtime_settings = Settings(
google_project_id="test-project",
google_location="us-central1",
vertex_model_name="legacy-runtime",
vertex_bundle_model_name="legacy-bundle",
)
with patch("app.services.ai.llm_service.settings", runtime_settings), patch(
"app.services.ai.llm_service.vertexai.init"
):
service = LLMService()
self.assertEqual(service.model_names[0], "legacy-runtime")
self.assertEqual(service.bundle_model_names[0], "legacy-bundle")
class LLMServiceImageWorkflowPromptTests(unittest.TestCase):
def test_build_image_workflow_prompt_preserves_visible_payment_time(self):
service = LLMService.__new__(LLMService)
@ -122,6 +175,7 @@ class LLMServiceImageWorkflowPromptTests(unittest.TestCase):
"Registrar pagamento de aluguel: contrato LOC-20260319-33CD6567; valor R$ 379,80.",
)
class LLMServiceDispatchTests(unittest.IsolatedAsyncioTestCase):
async def test_generate_response_uses_generate_content_when_history_is_empty(self):
service = LLMService.__new__(LLMService)
@ -203,5 +257,4 @@ class LLMServiceDispatchTests(unittest.IsolatedAsyncioTestCase):
self.assertEqual(payload, {"response": "ok", "tool_call": None})
self.assertEqual(model.histories, [history])
self.assertEqual(model.chat.calls, [("teste", {})])
self.assertEqual(model.chat.calls, [("teste", {})])

@ -1,3 +1,4 @@
import json
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
@ -54,12 +55,26 @@ class BootstrapRuntimeTests(unittest.TestCase):
mock_create_all.assert_called_once()
def test_ensure_generated_tools_runtime_package_creates_package_files(self):
with TemporaryDirectory() as temp_dir:
with patch.object(bootstrap_module, "_PROJECT_ROOT", Path(temp_dir)):
import shutil
sandbox_root = Path.cwd() / ".tmp_test_runtime_bootstrap"
shutil.rmtree(sandbox_root, ignore_errors=True)
sandbox_root.mkdir(parents=True, exist_ok=True)
try:
with patch.object(bootstrap_module, "_PROJECT_ROOT", sandbox_root):
package_dir = bootstrap_module._ensure_generated_tools_runtime_package()
self.assertEqual(package_dir.name, "generated_tools")
self.assertTrue(package_dir.exists())
self.assertTrue((package_dir / "__init__.py").exists())
manifest_path = package_dir / "published_runtime_tools.json"
self.assertTrue(manifest_path.exists())
manifest = json.loads(manifest_path.read_text(encoding="utf-8"))
self.assertEqual(manifest["target_service"], "product")
self.assertEqual(manifest["publications"], [])
finally:
shutil.rmtree(sandbox_root, ignore_errors=True)
@patch.object(bootstrap_module, "seed_tools")
@patch.object(bootstrap_module, "seed_mock_data")
@ -128,3 +143,4 @@ class HttpStartupTests(unittest.IsolatedAsyncioTestCase):
if __name__ == "__main__":
unittest.main()

@ -1,4 +1,4 @@
import os
import os
import unittest
from types import SimpleNamespace
from unittest.mock import AsyncMock, patch

Loading…
Cancel
Save