# ============================================================================= # GOXY ML/AI Service - Environment Variables Template # ============================================================================= # Copy this file to .env and fill in your actual values # NEVER commit .env file with real credentials to git! # ============================================================================= # ============================================================================= # APPLICATION SETTINGS # ============================================================================= APP_NAME=goxy-ml-service APP_VERSION=0.1.0 APP_ENV=production DEBUG=false LOG_LEVEL=INFO # ============================================================================= # SERVER CONFIGURATION # ============================================================================= HOST=0.0.0.0 PORT=7860 WORKERS=2 RELOAD=false # ============================================================================= # DATABASE CONFIGURATION # ============================================================================= # For Hugging Face Spaces, use external PostgreSQL (Supabase, Neon, etc.) # Example for Supabase: postgresql+asyncpg://postgres:[PASSWORD]@db.[PROJECT-REF].supabase.co:5432/postgres # Example for Neon: postgresql+asyncpg://[USERNAME]:[PASSWORD]@[HOST]/[DATABASE]?sslmode=require DATABASE_URL=postgresql+asyncpg://goxy_user:goxy_password@localhost:5432/goxy_ml_db DB_POOL_SIZE=5 DB_MAX_OVERFLOW=10 DB_POOL_TIMEOUT=30 DB_POOL_RECYCLE=3600 DB_ECHO=false # ============================================================================= # REDIS CONFIGURATION (Optional - use Upstash for free tier) # ============================================================================= # Example for Upstash: rediss://default:[PASSWORD]@[HOST]:6379 REDIS_URL=redis://localhost:6379/0 REDIS_MAX_CONNECTIONS=5 REDIS_SOCKET_TIMEOUT=5 REDIS_SOCKET_CONNECT_TIMEOUT=5 # ============================================================================= # SECURITY SETTINGS # ============================================================================= # IMPORTANT: Generate a strong secret key! Use: openssl rand -hex 32 SECRET_KEY=your-super-secret-key-change-this-in-production-min-32-chars JWT_ALGORITHM=HS256 JWT_EXPIRATION=3600 JWT_REFRESH_EXPIRATION=604800 API_KEY_HEADER=X-API-Key # ============================================================================= # CORS SETTINGS # ============================================================================= # Add your frontend URLs here CORS_ORIGINS=["https://yourdomain.com","http://localhost:3000"] CORS_CREDENTIALS=true CORS_METHODS=["*"] CORS_HEADERS=["*"] # ============================================================================= # ML MODEL CONFIGURATION # ============================================================================= # LLM Provider: 'grok' for xAI API, 'hf' for local HuggingFace models LLM_PROVIDER=hf LLM_MODEL_NAME=gpt2 MODEL_NAME=gpt2 MODEL_PATH=./data/models/ # For Moderation MODERATION_MODEL_NAME=unitary/toxic-bert # Model Settings MAX_LENGTH=100 TEMPERATURE=0.7 TOP_P=0.9 TOP_K=50 DEVICE=cpu BATCH_SIZE=1 USE_CACHE=true ENABLE_STREAMING=false # ============================================================================= # xAI GROK API CONFIGURATION (if using LLM_PROVIDER=grok) # ============================================================================= # Get your API key from: https://console.x.ai/ XAI_API_KEY=your-xai-api-key-here XAI_API_BASE_URL=https://api.x.ai/v1 XAI_MODEL=grok-beta XAI_MAX_TOKENS=500 XAI_TIMEOUT=30 # ============================================================================= # RATE LIMITING # ============================================================================= RATE_LIMIT_ENABLED=true RATE_LIMIT_PER_MINUTE=60 RATE_LIMIT_BURST=10 # ============================================================================= # MONITORING & METRICS # ============================================================================= METRICS_ENABLED=true METRICS_PORT=9090 HEALTH_CHECK_ENABLED=true # ============================================================================= # FILE UPLOAD SETTINGS # ============================================================================= MAX_UPLOAD_SIZE=10485760 ALLOWED_EXTENSIONS=[".txt",".csv",".json",".jsonl"] UPLOAD_DIR=./data/uploads/ # ============================================================================= # DATASET SETTINGS # ============================================================================= DATASET_DIR=./data/datasets/ DATASET_MAX_SIZE=1000000 DATASET_MIN_QUALITY_SCORE=0.7 # ============================================================================= # TRAINING SETTINGS # ============================================================================= TRAINING_ENABLED=true TRAINING_BATCH_SIZE=8 TRAINING_EPOCHS=3 TRAINING_LEARNING_RATE=0.00005 TRAINING_CHECKPOINT_DIR=./data/models/checkpoints/ TRAINING_LOG_DIR=./logs/training/ # ============================================================================= # CACHE SETTINGS # ============================================================================= CACHE_ENABLED=true CACHE_TTL=3600 CACHE_MAX_SIZE=1000 CACHE_DIR=./data/cache/ # ============================================================================= # LOGGING SETTINGS # ============================================================================= LOG_DIR=./logs/ LOG_FILE=app.log LOG_ROTATION=daily LOG_RETENTION=30 LOG_FORMAT=json # ============================================================================= # HUGGING FACE SPACES SPECIFIC # ============================================================================= # Hugging Face uses port 7860 by default # Set these in Spaces settings, not in .env # SPACE_ID=your-username/your-space-name # HF_TOKEN=your-huggingface-token (if needed for private models)