services: # Arthur Agent API app: build: . container_name: arthur_agent ports: - "8000:8000" volumes: - .:/app environment: - PYTHONUNBUFFERED=1 - POSTGRES_HOST=postgres - QDRANT_HOST=qdrant - OLLAMA_BASE_URL=http://ollama:11434 env_file: - .env depends_on: postgres: condition: service_healthy qdrant: condition: service_started networks: - arthur_net command: uvicorn src.main:app --host 0.0.0.0 --port 8000 --reload # PostgreSQL Database (Audit Logs) postgres: image: postgres:16-alpine container_name: arthur_postgres ports: - "5432:5432" environment: POSTGRES_USER: ${POSTGRES_USER:-arthur} POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-Arth#Sup0rt3_2026!xK9} POSTGRES_DB: ${POSTGRES_DB:-arthur_db} volumes: - postgres_data:/var/lib/postgresql/data networks: - arthur_net healthcheck: test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-arthur}"] interval: 5s timeout: 5s retries: 5 # Vector Database (RAG) qdrant: image: qdrant/qdrant:latest container_name: arthur_qdrant ports: - "6333:6333" - "6334:6334" # gRPC port volumes: - qdrant_data:/qdrant/storage environment: QDRANT__SERVICE__GRPC_PORT: 6334 networks: - arthur_net # Ollama - Local LLM Server ollama: image: ollama/ollama:latest container_name: arthur_ollama ports: - "11434:11434" volumes: - ollama_data:/root/.ollama networks: - arthur_net # deploy: # resources: # reservations: # devices: # - driver: nvidia # count: 1 # capabilities: [gpu] # Remove deploy section if no GPU is available or comment out above lines # Ollama Model Puller (Ephemeral) ollama-init: image: curlimages/curl:latest container_name: arthur_ollama_init depends_on: - ollama networks: - arthur_net entrypoint: > /bin/sh -c " echo 'Waiting for Ollama to start...'; while ! curl -s http://ollama:11434/api/tags > /dev/null; do sleep 2; done; echo 'Pulling Llama 3.2 1B (Triage)...'; curl -X POST http://ollama:11434/api/pull -d '{\"name\": \"llama3.2:1b\"}'; echo 'Pulling Llama 3.1 8B (Specialist)...'; curl -X POST http://ollama:11434/api/pull -d '{\"name\": \"llama3.1:8b\"}'; echo 'Models pulled successfully!'; " # Langfuse - AI Tracing langfuse: image: langfuse/langfuse:2 container_name: arthur_langfuse ports: - "3000:3000" environment: DATABASE_URL: postgresql://${POSTGRES_USER:-arthur}:Arth%23Sup0rt3_2026%21xK9@postgres:5432/langfuse NEXTAUTH_SECRET: ${LANGFUSE_SECRET:-ArthurLangfuseSecret2026} NEXTAUTH_URL: http://localhost:3000 SALT: ${LANGFUSE_SALT:-ArthurSalt2026Random} depends_on: postgres: condition: service_healthy networks: - arthur_net volumes: postgres_data: qdrant_data: ollama_data: networks: arthur_net: driver: bridge