diff --git a/app/api/routes.py b/app/api/routes.py index 2004e6b..e9186ba 100644 --- a/app/api/routes.py +++ b/app/api/routes.py @@ -32,7 +32,8 @@ def get_db(): finally: db.close() - +''' +# Removido momentaniamente para teste do Vertex IA @router.post("/chat", response_model=ChatResponse) async def chat(request: ChatRequest, db: Session = Depends(get_db)): service = OrquestradorService(db) @@ -41,6 +42,16 @@ async def chat(request: ChatRequest, db: Session = Depends(get_db)): user_id=request.user_id, ) return ChatResponse(response=result) +''' +@router.post("/chat", response_model=ChatResponse) +async def chat(request: ChatRequest, db: Session = Depends(get_db)): + service = OrquestradorService(db) + + result = await service.handle_message( + message=request.message + ) + + return ChatResponse(response=result) @router.post("/mock/consultar-estoque") diff --git a/app/api/schemas.py b/app/api/schemas.py index b518003..05ef34d 100644 --- a/app/api/schemas.py +++ b/app/api/schemas.py @@ -3,7 +3,7 @@ from typing import Dict, Any, Optional class ChatRequest(BaseModel): message: str - user_id: str + # user_id: str -> Removido momentaniamente para testar o VertexIA class ChatResponse(BaseModel): response: str diff --git a/app/core/settings.py b/app/core/settings.py index 8b027b0..17c7341 100644 --- a/app/core/settings.py +++ b/app/core/settings.py @@ -14,10 +14,16 @@ class Settings(BaseSettings): mockaroo_api_key: str mockaroo_base_url: str = "https://api.mockaroo.com/api" use_mockaroo_writes: bool = False - + + environment: str = "production" + debug: bool = False + + # Cloud SQL + cloud_sql_connection_name: str | None = None class Config: env_file = ".env" + extra = "ignore" settings = Settings() diff --git a/app/db/database.py b/app/db/database.py index 04fe404..9326567 100644 --- a/app/db/database.py +++ b/app/db/database.py @@ -2,15 +2,31 @@ from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker, declarative_base from app.core.settings import settings -DATABASE_URL = ( - f"postgresql+psycopg2://{settings.db_user}:" - f"{settings.db_password}@" - f"{settings.db_host}:" - f"{settings.db_port}/" - f"{settings.db_name}" -) +if settings.cloud_sql_connection_name: + # Cloud Run + DATABASE_URL = ( + f"postgresql+psycopg2://{settings.db_user}:" + f"{settings.db_password}@/" + f"{settings.db_name}" + f"?host=/cloudsql/{settings.cloud_sql_connection_name}" + ) +else: + # Ambiente local + DATABASE_URL = ( + f"postgresql+psycopg2://{settings.db_user}:" + f"{settings.db_password}@" + f"{settings.db_host}:" + f"{settings.db_port}/" + f"{settings.db_name}" + ) -engine = create_engine(DATABASE_URL, echo=True) +engine = create_engine( + DATABASE_URL, + echo=True, + pool_pre_ping=True, + pool_recycle=3600, + connect_args={"connect_timeout": 10} +) SessionLocal = sessionmaker( autocommit=False, diff --git a/app/main.py b/app/main.py index 674756d..25175ea 100644 --- a/app/main.py +++ b/app/main.py @@ -2,6 +2,8 @@ from fastapi import FastAPI from app.api.routes import router from app.api.tool_routes import router as tool_router from app.db.database import Base, engine +# 👇 IMPORTANTE: registrar models no metadata +from app.db.models import Tool app = FastAPI(title="AI Orquestrador") @@ -12,17 +14,18 @@ app.include_router(tool_router) @app.on_event("startup") def startup_event(): """ - Inicializa o banco de dados na primeira execução: - - Cria todas as tabelas - - Faz seed das tools iniciais + Inicializa o banco de dados e executa seeds se necessário. """ try: - # Cria as tabelas se não existirem + print("🚀 Inicializando banco de dados...") + Base.metadata.create_all(bind=engine) - - # Seed das tools + from app.db.tool_seed import seed_tools seed_tools() + + print("✅ Banco inicializado com sucesso.") + except Exception as e: - print(f"⚠️ Erro ao inicializar banco de dados: {e}") - # Não falha a aplicação, apenas registra o erro + print(f"❌ Erro ao inicializar banco: {e}") + raise e \ No newline at end of file diff --git a/app/services/llm_service.py b/app/services/llm_service.py index 26bfc84..3d83d8e 100644 --- a/app/services/llm_service.py +++ b/app/services/llm_service.py @@ -13,7 +13,7 @@ class LLMService: location=settings.google_location ) - self.model = GenerativeModel("gemini-1.5-pro") + self.model = GenerativeModel("gemini-1.5-flash") def build_vertex_tools(self, tools: List[ToolDefinition]): # Converte as Tools internas (ToolDefinition) para o formato que o Vertex AI entende. @@ -56,11 +56,13 @@ class LLMService: # - histórico (se existir) # - ferramentas disponíveis chat = self.model.start_chat( - history=history or [], - tools=vertex_tools + history=history or [] ) - response = chat.send_message(message) + response = chat.send_message( + message, + tools=vertex_tools + ) # Pegamos a primeira resposta candidata do modelo (a com maior coerência com o assunto) # Estrutura interna: diff --git a/app/services/orquestrador_service.py b/app/services/orquestrador_service.py index a315080..8674cf9 100644 --- a/app/services/orquestrador_service.py +++ b/app/services/orquestrador_service.py @@ -19,7 +19,7 @@ class OrquestradorService: - user_id: identificador do usuário (ainda não está sendo usado aqui, mas futuramente servirá para histórico) """ - async def handle_message(self, message: str, user_id: str) -> str: + async def handle_message(self, message: str) -> str: tools = self.registry.get_tools()