From cb51186830d9a1913ef9ee62dc18d9bf22f13495 Mon Sep 17 00:00:00 2001 From: mohamad Date: Wed, 28 May 2025 08:23:22 +0200 Subject: [PATCH] feat: Add production deployment configuration and environment setup - Introduced `docker-compose.prod.yml` to define services for production deployment, including PostgreSQL, FastAPI backend, frontend, and Redis. - Created `env.production.template` to outline necessary environment variables for production, ensuring sensitive data is not committed. - Added `PRODUCTION.md` as a deployment guide detailing the setup process using Docker Compose and Gitea Actions for CI/CD. - Implemented Gitea workflows for build, test, and deployment processes to streamline production updates. - Updated backend and frontend Dockerfiles for optimized production builds and configurations. - Enhanced application settings to support environment-specific configurations, including CORS and health checks. --- .gitea/workflows/build-test.yml | 71 +++++++++++ .gitea/workflows/deploy-prod.yml | 78 ++++++++++++ PRODUCTION.md | 196 +++++++++++++++++++++++++++++++ be/Dockerfile.prod | 64 ++++++++++ be/app/config.py | 68 +++++++++-- be/app/main.py | 57 ++++++--- docker-compose.prod.yml | 110 +++++++++++++++++ env.production.template | 46 ++++++++ fe/Dockerfile.prod | 68 +++++++++++ 9 files changed, 731 insertions(+), 27 deletions(-) create mode 100644 .gitea/workflows/build-test.yml create mode 100644 .gitea/workflows/deploy-prod.yml create mode 100644 PRODUCTION.md create mode 100644 be/Dockerfile.prod create mode 100644 docker-compose.prod.yml create mode 100644 env.production.template create mode 100644 fe/Dockerfile.prod diff --git a/.gitea/workflows/build-test.yml b/.gitea/workflows/build-test.yml new file mode 100644 index 0000000..5ad911b --- /dev/null +++ b/.gitea/workflows/build-test.yml @@ -0,0 +1,71 @@ +name: Build and Test + +on: + push: + branches: + - main + - develop + pull_request: + branches: + - main + - develop + +jobs: + build-and-test: + runs-on: ubuntu-latest + services: + postgres: + image: postgres:17-alpine + env: + POSTGRES_USER: testuser + POSTGRES_PASSWORD: testpassword + POSTGRES_DB: testdb + ports: + - 5432:5432 + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + + steps: + - name: Checkout code + uses: actions/checkout@v3 + + - name: Set up Python + uses: actions/setup-python@v3 + with: + python-version: '3.11' + + - name: Set up Node.js + uses: actions/setup-node@v3 + with: + node-version: '24' + + - name: Install backend dependencies + working-directory: ./be + run: | + pip install --upgrade pip + pip install -r requirements.txt + + - name: Install frontend dependencies + working-directory: ./fe + run: npm ci + + - name: Run backend tests + working-directory: ./be + env: + DATABASE_URL: postgresql+asyncpg://testuser:testpassword@localhost:5432/testdb + SECRET_KEY: testsecretkey + GEMINI_API_KEY: testgeminikey # Mock or skip tests requiring this if not available + SESSION_SECRET_KEY: testsessionsecret + run: pytest + + - name: Build frontend + working-directory: ./fe + run: npm run build + + # Add frontend test command if you have one e.g. npm test + # - name: Run frontend tests + # working-directory: ./fe + # run: npm test \ No newline at end of file diff --git a/.gitea/workflows/deploy-prod.yml b/.gitea/workflows/deploy-prod.yml new file mode 100644 index 0000000..95441f0 --- /dev/null +++ b/.gitea/workflows/deploy-prod.yml @@ -0,0 +1,78 @@ +name: Deploy to Production + +on: + push: + branches: + - main # Trigger deployment only on pushes to main + +jobs: + deploy: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Log in to Docker Hub (or your registry) + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + # For Gitea Container Registry, you might use: + # registry: your-gitea-instance.com:5000 + # username: ${{ gitea.actor }} + # password: ${{ secrets.GITEA_TOKEN }} + + - name: Build and push backend image + uses: docker/build-push-action@v4 + with: + context: ./be + file: ./be/Dockerfile.prod + push: true + tags: ${{ secrets.DOCKER_USERNAME }}/mitlist-backend:latest # Replace with your image name + # Gitea registry example: your-gitea-instance.com:5000/${{ gitea.repository_owner }}/${{ gitea.repository_name }}-backend:latest + + - name: Build and push frontend image + uses: docker/build-push-action@v4 + with: + context: ./fe + file: ./fe/Dockerfile.prod + push: true + tags: ${{ secrets.DOCKER_USERNAME }}/mitlist-frontend:latest # Replace with your image name + # Gitea registry example: your-gitea-instance.com:5000/${{ gitea.repository_owner }}/${{ gitea.repository_name }}-frontend:latest + build-args: | + VITE_API_URL=${{ secrets.VITE_API_URL }} + VITE_SENTRY_DSN=${{ secrets.VITE_SENTRY_DSN }} + + - name: Deploy to server + uses: appleboy/ssh-action@master + with: + host: ${{ secrets.SERVER_HOST }} + username: ${{ secrets.SERVER_USERNAME }} + key: ${{ secrets.SSH_PRIVATE_KEY }} + port: ${{ secrets.SERVER_PORT || 22 }} + script: | + cd /path/to/your/app # e.g., /srv/mitlist + echo "POSTGRES_USER=${{ secrets.POSTGRES_USER }}" > .env.production + echo "POSTGRES_PASSWORD=${{ secrets.POSTGRES_PASSWORD }}" >> .env.production + echo "POSTGRES_DB=${{ secrets.POSTGRES_DB }}" >> .env.production + echo "DATABASE_URL=${{ secrets.DATABASE_URL }}" >> .env.production + echo "SECRET_KEY=${{ secrets.SECRET_KEY }}" >> .env.production + echo "SESSION_SECRET_KEY=${{ secrets.SESSION_SECRET_KEY }}" >> .env.production + echo "GEMINI_API_KEY=${{ secrets.GEMINI_API_KEY }}" >> .env.production + echo "REDIS_PASSWORD=${{ secrets.REDIS_PASSWORD }}" >> .env.production + echo "SENTRY_DSN=${{ secrets.SENTRY_DSN }}" >> .env.production + echo "CORS_ORIGINS=${{ secrets.CORS_ORIGINS }}" >> .env.production + echo "FRONTEND_URL=${{ secrets.FRONTEND_URL }}" >> .env.production + echo "VITE_API_URL=${{ secrets.VITE_API_URL }}" >> .env.production + echo "VITE_SENTRY_DSN=${{ secrets.VITE_SENTRY_DSN }}" >> .env.production + echo "ENVIRONMENT=production" >> .env.production + echo "LOG_LEVEL=INFO" >> .env.production + + # Ensure docker-compose.prod.yml is present on the server or copy it + # git pull # If repo is cloned on server + docker-compose -f docker-compose.prod.yml pull + docker-compose -f docker-compose.prod.yml up -d --remove-orphans + docker image prune -af \ No newline at end of file diff --git a/PRODUCTION.md b/PRODUCTION.md new file mode 100644 index 0000000..54212a7 --- /dev/null +++ b/PRODUCTION.md @@ -0,0 +1,196 @@ +# Production Deployment Guide (Gitea Actions) + +This guide covers deploying the mitlist application to a production environment using Docker Compose and Gitea Actions for CI/CD. + +## 🚀 Quick Start + +1. **Clone the repository** (if not already done): + ```bash + git clone + cd mitlist + ``` + +2. **Configure Gitea Secrets**: + In your Gitea repository settings, go to "Secrets" and add the following secrets. These will be used by the `deploy-prod.yml` workflow. + + * `DOCKER_USERNAME`: Your Docker Hub username (or username for your container registry). + * `DOCKER_PASSWORD`: Your Docker Hub password (or token for your container registry). + * `SERVER_HOST`: IP address or hostname of your production server. + * `SERVER_USERNAME`: Username for SSH access to your production server. + * `SSH_PRIVATE_KEY`: Your private SSH key for accessing the production server. + * `SERVER_PORT`: (Optional) SSH port for your server (defaults to 22). + * `POSTGRES_USER`: Production database username. + * `POSTGRES_PASSWORD`: Production database password. + * `POSTGRES_DB`: Production database name. + * `DATABASE_URL`: Production database connection string. + * `SECRET_KEY`: FastAPI application secret key. + * `SESSION_SECRET_KEY`: FastAPI session secret key. + * `GEMINI_API_KEY`: API key for Gemini. + * `REDIS_PASSWORD`: Password for Redis. + * `SENTRY_DSN`: (Optional) Sentry DSN for backend error tracking. + * `CORS_ORIGINS`: Comma-separated list of allowed CORS origins for production (e.g., `https://yourdomain.com`). + * `FRONTEND_URL`: The public URL of your frontend (e.g., `https://yourdomain.com`). + * `VITE_API_URL`: The public API URL for the frontend (e.g., `https://yourdomain.com/api`). + * `VITE_SENTRY_DSN`: (Optional) Sentry DSN for frontend error tracking. + +3. **Prepare your Production Server**: + * Install Docker and Docker Compose (see Prerequisites section below). + * Ensure your server can be accessed via SSH using the key you added to Gitea secrets. + * Create the deployment directory on your server (e.g., `/srv/mitlist`). + * Copy the `docker-compose.prod.yml` file to this directory on your server. + +4. **Push to `main` branch**: + Once the Gitea workflows (`.gitea/workflows/build-test.yml` and `.gitea/workflows/deploy-prod.yml`) are in your repository, pushing to the `main` branch will automatically trigger the deployment workflow. + +## 📋 Prerequisites (Server Setup) + +### System Requirements +- **OS**: Ubuntu 20.04+ / CentOS 8+ / Debian 11+ +- **RAM**: Minimum 2GB, Recommended 4GB+ +- **Storage**: Minimum 20GB free space +- **CPU**: 2+ cores recommended + +### Software Dependencies (on Production Server) +- Docker 20.10+ +- Docker Compose 2.0+ + +### Installation Commands + +**Ubuntu/Debian:** +```bash +# Update system +sudo apt update && sudo apt upgrade -y + +# Install Docker +curl -fsSL https://get.docker.com -o get-docker.sh +sudo sh get-docker.sh +sudo usermod -aG docker $USER # Add your deployment user to docker group + +# Install Docker Compose +sudo apt install docker-compose-plugin + +# Reboot or log out/in to apply group changes +# sudo reboot +``` + +**CentOS/RHEL:** +```bash +# Update system +sudo yum update -y + +# Install Docker +sudo yum install -y yum-utils +sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo +sudo yum install docker-ce docker-ce-cli containerd.io docker-compose-plugin +sudo systemctl start docker +sudo systemctl enable docker +sudo usermod -aG docker $USER # Add your deployment user to docker group + +# Reboot or log out/in to apply group changes +# sudo reboot +``` + +## 🔧 Configuration Overview + +* **`docker-compose.prod.yml`**: Defines the production services (database, backend, frontend, redis). This file needs to be on your production server in the deployment directory. +* **`.gitea/workflows/build-test.yml`**: Gitea workflow that builds and runs tests on every push to `main` or `develop`, and on pull requests to these branches. +* **`.gitea/workflows/deploy-prod.yml`**: Gitea workflow that triggers on pushes to the `main` branch. It builds and pushes Docker images to your container registry and then SSHes into your production server to update environment variables and restart services using `docker-compose`. +* **`env.production.template`**: A template file showing the environment variables needed. These are now set directly in the Gitea deployment workflow via secrets. + +## 🚀 Deployment Process (via Gitea Actions) + +1. **Code Push**: Developer pushes code to the `main` branch. +2. **Build & Test Workflow**: (Optional, if you keep `build-test.yml` active on `main` as well) The `build-test.yml` workflow runs, ensuring code quality. +3. **Deploy Workflow Trigger**: The `deploy-prod.yml` workflow is triggered. +4. **Checkout Code**: The workflow checks out the latest code. +5. **Login to Registry**: Logs into your specified Docker container registry. +6. **Build & Push Images**: Builds the production Docker images for the backend and frontend and pushes them to the registry. +7. **SSH to Server**: Connects to your production server via SSH. +8. **Set Environment Variables**: Creates/updates the `.env.production` file on the server using the Gitea secrets. +9. **Pull New Images**: Runs `docker-compose pull` to fetch the newly pushed images. +10. **Restart Services**: Runs `docker-compose up -d` to restart the services with the new images and configuration. +11. **Prune Images**: Cleans up old, unused Docker images on the server. + +## 🏗️ Simplified Architecture + +With the removal of nginx as a reverse proxy, the architecture is simpler: + +``` +[ User / Internet ] + | + v +[ Frontend Service (Port 80) ] <-- Serves Vue.js app (e.g., via `serve`) + | + v (API Calls) +[ Backend Service (Internal Port 8000) ] <-- FastAPI + | | + v v +[ PostgreSQL ] [ Redis ] +(Database) (Cache) +``` + +* The **Frontend** service now directly exposes port 80 (or another port you configure) to the internet. +* The **Backend** service is still internal and accessed by the frontend via its Docker network name (`backend:8000`). + +**Note on SSL/HTTPS**: Since nginx is removed, SSL termination is not handled by this setup. You would typically handle SSL at a higher level, for example: + * Using a cloud provider's load balancer with SSL termination. + * Placing another reverse proxy (like Caddy, Traefik, or a dedicated nginx instance) in front of your Docker setup on the server, configured for SSL. + * Using services like Cloudflare that can provide SSL for your domain. + +## 📊 Monitoring & Logging + +### Health Checks +* **Backend**: `http:///api/health` (assuming your backend health endpoint is accessible if you map its port in `docker-compose.prod.yml` or if the frontend proxies it). +* **Frontend**: The `serve` package used by the frontend doesn't have a dedicated health check endpoint by default. You can check if the main page loads. + +### Log Access +```bash +# On your production server, in the deployment directory +docker-compose -f docker-compose.prod.yml logs -f + +# Specific service logs +docker-compose -f docker-compose.prod.yml logs -f backend +docker-compose -f docker-compose.prod.yml logs -f frontend +``` + +## 🔄 Maintenance + +### Database Backups +Manual backups can still be performed on the server: +```bash +# Ensure your .env.production file is sourced or vars are available +docker exec postgres_db_prod pg_dump -U $POSTGRES_USER $POSTGRES_DB > backup-$(date +%Y%m%d).sql +``` +Consider automating this with a cron job on your server. + +### Updates +Updates are now handled by pushing to the `main` branch, which triggers the Gitea deployment workflow. + +## 🐛 Troubleshooting + +### Gitea Workflow Failures +* Check the Gitea Actions logs for the specific workflow run to identify errors. +* Ensure all secrets are correctly configured in Gitea. +* Verify Docker Hub/registry credentials. +* Check SSH connectivity to your server from the Gitea runner (if using self-hosted runners, ensure network access). + +### Service Not Starting on Server +* SSH into your server. +* Navigate to your deployment directory (e.g., `/srv/mitlist`). +* Check logs: `docker-compose -f docker-compose.prod.yml logs ` +* Ensure `.env.production` has the correct values. +* Check `docker ps` to see running containers. + +### Frontend Not Accessible +* Verify the frontend service is running (`docker ps`). +* Check frontend logs: `docker-compose -f docker-compose.prod.yml logs frontend`. +* Ensure the port mapping in `docker-compose.prod.yml` for the frontend service (e.g., `80:3000`) is correct and not blocked by a firewall on your server. + +## 📝 Changelog + +### v1.1.0 (Gitea Actions Deployment) +- Removed nginx reverse proxy and related shell scripts. +- Frontend now served directly using `serve`. +- Added Gitea Actions workflows for CI (build/test) and CD (deploy to production). +- Updated deployment documentation to reflect Gitea Actions strategy. +- Simplified `docker-compose.prod.yml`. \ No newline at end of file diff --git a/be/Dockerfile.prod b/be/Dockerfile.prod new file mode 100644 index 0000000..758027a --- /dev/null +++ b/be/Dockerfile.prod @@ -0,0 +1,64 @@ +# Multi-stage build for production +FROM python:3.11-slim as base + +# Set environment variables +ENV PYTHONDONTWRITEBYTECODE=1 \ + PYTHONUNBUFFERED=1 \ + PYTHONHASHSEED=random \ + PIP_NO_CACHE_DIR=1 \ + PIP_DISABLE_PIP_VERSION_CHECK=1 + +# Install system dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + gcc \ + build-essential \ + libpq-dev \ + curl \ + && rm -rf /var/lib/apt/lists/* + +# Create non-root user +RUN groupadd -r appuser && useradd -r -g appuser appuser + +# Development stage +FROM base as development +WORKDIR /app +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt +COPY . . +RUN chown -R appuser:appuser /app +USER appuser +CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000", "--reload"] + +# Production stage +FROM base as production +WORKDIR /app + +# Install production dependencies +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt + +# Copy application code +COPY . . + +# Create necessary directories and set permissions +RUN mkdir -p /app/logs && \ + chown -R appuser:appuser /app + +# Switch to non-root user +USER appuser + +# Health check +HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \ + CMD curl -f http://localhost:8000/health || exit 1 + +# Expose port +EXPOSE 8000 + +# Production command with optimizations +CMD ["uvicorn", "app.main:app", \ + "--host", "0.0.0.0", \ + "--port", "8000", \ + "--workers", "4", \ + "--worker-class", "uvicorn.workers.UvicornWorker", \ + "--access-log", \ + "--log-level", "info"] \ No newline at end of file diff --git a/be/app/config.py b/be/app/config.py index dbc8d71..4361114 100644 --- a/be/app/config.py +++ b/be/app/config.py @@ -14,6 +14,9 @@ class Settings(BaseSettings): GEMINI_API_KEY: str | None = None SENTRY_DSN: str | None = None # Sentry DSN for error tracking + # --- Environment Settings --- + ENVIRONMENT: str = "development" # development, staging, production + # --- JWT Settings --- (SECRET_KEY is used by FastAPI-Users) SECRET_KEY: str # Must be set via environment variable TOKEN_TYPE: str = "bearer" # Default token type for JWT authentication @@ -66,15 +69,10 @@ Organic Bananas API_OPENAPI_URL: str = "/api/openapi.json" API_DOCS_URL: str = "/api/docs" API_REDOC_URL: str = "/api/redoc" - CORS_ORIGINS: list[str] = [ - "http://localhost:5173", # Frontend dev server - "http://localhost:5174", # Alternative Vite port - "http://localhost:8000", # Backend server - "http://127.0.0.1:5173", # Frontend with IP - "http://127.0.0.1:5174", # Alternative Vite with IP - "http://127.0.0.1:8000", # Backend with IP - ] - FRONTEND_URL: str = "http://localhost:5173" # URL for the frontend application + + # CORS Origins - environment dependent + CORS_ORIGINS: str = "http://localhost:5173,http://localhost:5174,http://localhost:8000,http://127.0.0.1:5173,http://127.0.0.1:5174,http://127.0.0.1:8000" + FRONTEND_URL: str = "http://localhost:5173" # URL for the frontend application # --- API Metadata --- API_TITLE: str = "Shared Lists API" @@ -128,11 +126,46 @@ Organic Bananas # Session Settings SESSION_SECRET_KEY: str = "your-session-secret-key" # Change this in production ACCESS_TOKEN_EXPIRE_MINUTES: int = 480 # 8 hours instead of 30 minutes + + # Redis Settings + REDIS_URL: str = "redis://localhost:6379" + REDIS_PASSWORD: str = "" + class Config: env_file = ".env" env_file_encoding = 'utf-8' extra = "ignore" + @property + def cors_origins_list(self) -> List[str]: + """Convert CORS_ORIGINS string to list""" + return [origin.strip() for origin in self.CORS_ORIGINS.split(",")] + + @property + def is_production(self) -> bool: + """Check if running in production environment""" + return self.ENVIRONMENT.lower() == "production" + + @property + def is_development(self) -> bool: + """Check if running in development environment""" + return self.ENVIRONMENT.lower() == "development" + + @property + def docs_url(self) -> str | None: + """Return docs URL only in development""" + return self.API_DOCS_URL if self.is_development else None + + @property + def redoc_url(self) -> str | None: + """Return redoc URL only in development""" + return self.API_REDOC_URL if self.is_development else None + + @property + def openapi_url(self) -> str | None: + """Return OpenAPI URL only in development""" + return self.API_OPENAPI_URL if self.is_development else None + settings = Settings() # Validation for critical settings @@ -147,8 +180,23 @@ if not settings.SECRET_KEY: if len(settings.SECRET_KEY) < 32: raise ValueError("SECRET_KEY must be at least 32 characters long for security") +# Production-specific validations +if settings.is_production: + if settings.SESSION_SECRET_KEY == "your-session-secret-key": + raise ValueError("SESSION_SECRET_KEY must be changed from default value in production") + + if not settings.SENTRY_DSN: + logger.warning("SENTRY_DSN not set in production environment. Error tracking will be unavailable.") + if settings.GEMINI_API_KEY is None: logger.error("CRITICAL: GEMINI_API_KEY environment variable not set. Gemini features will be unavailable.") else: # Optional: Log partial key for confirmation (avoid logging full key) - logger.info(f"GEMINI_API_KEY loaded (starts with: {settings.GEMINI_API_KEY[:4]}...).") \ No newline at end of file + logger.info(f"GEMINI_API_KEY loaded (starts with: {settings.GEMINI_API_KEY[:4]}...).") + +# Log environment information +logger.info(f"Application starting in {settings.ENVIRONMENT} environment") +if settings.is_production: + logger.info("Production mode: API documentation disabled") +else: + logger.info(f"Development mode: API documentation available at {settings.API_DOCS_URL}") \ No newline at end of file diff --git a/be/app/main.py b/be/app/main.py index 3365eb6..f9105e9 100644 --- a/be/app/main.py +++ b/be/app/main.py @@ -27,19 +27,19 @@ class RefreshResponse(BaseModel): refresh_token: str token_type: str = "bearer" -# Initialize Sentry -sentry_sdk.init( - dsn=settings.SENTRY_DSN, - integrations=[ - FastApiIntegration(), - ], - # Set traces_sample_rate to 1.0 to capture 100% of transactions for performance monitoring. - # We recommend adjusting this value in production. - traces_sample_rate=1.0, - # If you wish to associate users to errors (assuming you are using - # FastAPI's users system) you may enable sending PII data. - send_default_pii=True -) +# Initialize Sentry only if DSN is provided +if settings.SENTRY_DSN: + sentry_sdk.init( + dsn=settings.SENTRY_DSN, + integrations=[ + FastApiIntegration(), + ], + # Adjust traces_sample_rate for production + traces_sample_rate=0.1 if settings.is_production else 1.0, + environment=settings.ENVIRONMENT, + # Enable PII data only in development + send_default_pii=not settings.is_production + ) # --- Logging Setup --- logging.basicConfig( @@ -49,8 +49,16 @@ logging.basicConfig( logger = logging.getLogger(__name__) # --- FastAPI App Instance --- -app = FastAPI( +# Create API metadata with environment-dependent settings +api_metadata = { **API_METADATA, + "docs_url": settings.docs_url, + "redoc_url": settings.redoc_url, + "openapi_url": settings.openapi_url, +} + +app = FastAPI( + **api_metadata, openapi_tags=API_TAGS ) @@ -63,7 +71,7 @@ app.add_middleware( # --- CORS Middleware --- app.add_middleware( CORSMiddleware, - allow_origins=settings.CORS_ORIGINS, + allow_origins=settings.cors_origins_list, allow_credentials=True, allow_methods=["*"], allow_headers=["*"], @@ -175,6 +183,17 @@ app.include_router(oauth_router, prefix="/auth", tags=["auth"]) app.include_router(api_router, prefix=settings.API_PREFIX) # --- End Include API Routers --- +# Health check endpoint +@app.get("/health", tags=["Health"]) +async def health_check(): + """ + Health check endpoint for load balancers and monitoring. + """ + return { + "status": settings.HEALTH_STATUS_OK, + "environment": settings.ENVIRONMENT, + "version": settings.API_VERSION + } # --- Root Endpoint (Optional - outside the main API structure) --- @app.get("/", tags=["Root"]) @@ -184,7 +203,11 @@ async def read_root(): Useful for basic reachability checks. """ logger.info("Root endpoint '/' accessed.") - return {"message": "Welcome to the API"} + return { + "message": settings.ROOT_MESSAGE, + "environment": settings.ENVIRONMENT, + "version": settings.API_VERSION + } # --- End Root Endpoint --- @@ -192,7 +215,7 @@ async def read_root(): @app.on_event("startup") async def startup_event(): """Initialize services on startup.""" - logger.info("Application startup: Connecting to database...") + logger.info(f"Application startup in {settings.ENVIRONMENT} environment...") # You might perform initial checks or warm-up here # await database.engine.connect() # Example check (get_db handles sessions per request) init_scheduler() diff --git a/docker-compose.prod.yml b/docker-compose.prod.yml new file mode 100644 index 0000000..e9935ec --- /dev/null +++ b/docker-compose.prod.yml @@ -0,0 +1,110 @@ +services: + db: + image: postgres:17-alpine + container_name: postgres_db_prod + environment: + POSTGRES_USER: ${POSTGRES_USER} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_DB: ${POSTGRES_DB} + volumes: + - postgres_data:/var/lib/postgresql/data + - ./be/init-db.sql:/docker-entrypoint-initdb.d/init-db.sql:ro + networks: + - app-network + healthcheck: + test: [ "CMD-SHELL", "pg_isready -U $${POSTGRES_USER} -d $${POSTGRES_DB}" ] + interval: 30s + timeout: 10s + retries: 3 + start_period: 30s + restart: unless-stopped + deploy: + resources: + limits: + memory: 512M + reservations: + memory: 256M + + backend: + container_name: fastapi_backend_prod + build: + context: ./be + dockerfile: Dockerfile.prod + target: production + environment: + - DATABASE_URL=${DATABASE_URL} + - GEMINI_API_KEY=${GEMINI_API_KEY} + - SECRET_KEY=${SECRET_KEY} + - SESSION_SECRET_KEY=${SESSION_SECRET_KEY} + - SENTRY_DSN=${SENTRY_DSN} + - LOG_LEVEL=INFO + - ENVIRONMENT=production + - CORS_ORIGINS=${CORS_ORIGINS} + - FRONTEND_URL=${FRONTEND_URL} + networks: + - app-network + depends_on: + db: + condition: service_healthy + restart: unless-stopped + deploy: + resources: + limits: + memory: 1G + reservations: + memory: 512M + healthcheck: + test: [ "CMD", "curl", "-f", "http://localhost:8000/health" ] + interval: 30s + timeout: 10s + retries: 3 + start_period: 30s + + frontend: + container_name: frontend_prod + build: + context: ./fe + dockerfile: Dockerfile.prod + target: production + args: + - VITE_API_URL=${VITE_API_URL} + - VITE_SENTRY_DSN=${VITE_SENTRY_DSN} + ports: + - "80:3000" + networks: + - app-network + depends_on: + - backend + restart: unless-stopped + deploy: + resources: + limits: + memory: 256M + reservations: + memory: 128M + + redis: + image: redis:7-alpine + container_name: redis_prod + command: redis-server --appendonly yes --requirepass ${REDIS_PASSWORD} + volumes: + - redis_data:/data + networks: + - app-network + restart: unless-stopped + deploy: + resources: + limits: + memory: 256M + reservations: + memory: 128M + +volumes: + postgres_data: + driver: local + redis_data: + driver: local + +networks: + app-network: + driver: bridge diff --git a/env.production.template b/env.production.template new file mode 100644 index 0000000..a3e8582 --- /dev/null +++ b/env.production.template @@ -0,0 +1,46 @@ +# Production Environment Variables Template +# Copy this file to .env.production and fill in the actual values +# NEVER commit the actual .env.production file to version control + +# Database Configuration +POSTGRES_USER=mitlist_user +POSTGRES_PASSWORD=your_secure_database_password_here +POSTGRES_DB=mitlist_prod +DATABASE_URL=postgresql+asyncpg://mitlist_user:your_secure_database_password_here@db:5432/mitlist_prod + +# Security Keys (Generate with: openssl rand -hex 32) +SECRET_KEY=your_secret_key_here_minimum_32_characters_long +SESSION_SECRET_KEY=your_session_secret_key_here_minimum_32_characters_long + +# API Keys +GEMINI_API_KEY=your_gemini_api_key_here + +# Redis Configuration +REDIS_PASSWORD=your_redis_password_here + +# Sentry Configuration (Optional but recommended) +SENTRY_DSN=your_sentry_dsn_here + +# CORS Configuration +CORS_ORIGINS=https://yourdomain.com,https://www.yourdomain.com +FRONTEND_URL=https://yourdomain.com + +# Frontend Build Variables +VITE_API_URL=https://yourdomain.com/api +VITE_SENTRY_DSN=your_frontend_sentry_dsn_here +VITE_ROUTER_MODE=history + +# OAuth Configuration (if using) +GOOGLE_CLIENT_ID=your_google_client_id +GOOGLE_CLIENT_SECRET=your_google_client_secret +GOOGLE_REDIRECT_URI=https://yourdomain.com/auth/google/callback + +APPLE_CLIENT_ID=your_apple_client_id +APPLE_TEAM_ID=your_apple_team_id +APPLE_KEY_ID=your_apple_key_id +APPLE_PRIVATE_KEY=your_apple_private_key +APPLE_REDIRECT_URI=https://yourdomain.com/auth/apple/callback + +# Production Settings +ENVIRONMENT=production +LOG_LEVEL=INFO \ No newline at end of file diff --git a/fe/Dockerfile.prod b/fe/Dockerfile.prod new file mode 100644 index 0000000..29d5f83 --- /dev/null +++ b/fe/Dockerfile.prod @@ -0,0 +1,68 @@ +# Multi-stage build for production +FROM node:24-alpine AS base + +# Install dependencies only when needed +FROM base AS deps +WORKDIR /app +COPY package*.json ./ +RUN npm ci --only=production && npm cache clean --force + +# Development stage +FROM base AS development +WORKDIR /app +COPY package*.json ./ +RUN npm ci +COPY . . +CMD ["npm", "run", "dev"] + +# Build stage +FROM base AS build +WORKDIR /app + +# Copy package files +COPY package*.json ./ + +# Install all dependencies (including devDependencies) +RUN npm ci + +# Copy source code +COPY . . + +# Build arguments for environment variables +ARG VITE_API_URL +ARG VITE_SENTRY_DSN +ARG VITE_ROUTER_MODE=history + +# Set environment variables for build +ENV VITE_API_URL=$VITE_API_URL +ENV VITE_SENTRY_DSN=$VITE_SENTRY_DSN +ENV VITE_ROUTER_MODE=$VITE_ROUTER_MODE +ENV NODE_ENV=production + +# Build the application +RUN npm run build + +# Production stage +FROM node:24-alpine AS production # Using node image to use serve + +# Install serve globally +RUN npm install -g serve + +# Set working directory +WORKDIR /app + +# Copy built assets from build stage +COPY --from=build /app/dist . + +# Create a default static.json for serve to handle SPA routing +RUN echo '{ \n "rewrites": [ \n { "source": "**", "destination": "/index.html" } \n ] \n}' > static.json + +# Expose port 3000 (serve default) +EXPOSE 3000 + +# Health check (optional, depends on serve capabilities or custom health endpoint) +# HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \ +# CMD curl -f http://localhost:3000/ || exit 1 + +# Start serve +CMD ["serve", "-s", ".", "-l", "3000"] \ No newline at end of file