#!/bin/bash # scripts/ci/deploy-production.sh - Деплой на production окружение set -e echo "🚀 Deploying to production environment..." # Переменные REGISTRY=${DOCKER_REGISTRY:-"registry.hub.docker.com"} PROJECT_NAME="catlink" VERSION=${DRONE_TAG:-${DRONE_COMMIT_SHA:0:8}} PRODUCTION_HOST=${PRODUCTION_HOST:-"catlink.dev"} PRODUCTION_USER=${PRODUCTION_USER:-"deploy"} PRODUCTION_PORT=${PRODUCTION_PORT:-"22"} echo "📋 Production deployment information:" echo " • Registry: $REGISTRY" echo " • Project: $PROJECT_NAME" echo " • Version: $VERSION" echo " • Host: $PRODUCTION_HOST" echo " • User: $PRODUCTION_USER" # Строгая проверка для production echo "🔒 Performing production deployment checks..." # Проверка обязательных переменных REQUIRED_VARS=( "PRODUCTION_HOST" "PRODUCTION_SSH_KEY" "PRODUCTION_SECRET_KEY" "PRODUCTION_POSTGRES_PASSWORD" "PRODUCTION_EMAIL_HOST" "PRODUCTION_EMAIL_PASSWORD" ) for var in "${REQUIRED_VARS[@]}"; do if [ -z "${!var}" ]; then echo "❌ Required production variable $var is not set!" exit 1 fi done # Проверка версии (только теги для production) if [[ ! "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]] && [ -z "$FORCE_PRODUCTION_DEPLOY" ]; then echo "❌ Production deployment requires a proper version tag (vX.Y.Z)" echo "Current version: $VERSION" echo "Set FORCE_PRODUCTION_DEPLOY=true to override this check" exit 1 fi # Проверка существования образов echo "🔍 Verifying production images exist..." for image in "backend" "frontend"; do if ! docker manifest inspect "$REGISTRY/$PROJECT_NAME-$image:$VERSION" > /dev/null 2>&1; then echo "❌ Production image $REGISTRY/$PROJECT_NAME-$image:$VERSION not found!" exit 1 fi echo " ✅ $REGISTRY/$PROJECT_NAME-$image:$VERSION verified" done # Настройка SSH для production echo "🔐 Setting up secure SSH connection to production..." mkdir -p ~/.ssh chmod 700 ~/.ssh # Создание SSH ключа echo "$PRODUCTION_SSH_KEY" | base64 -d > ~/.ssh/id_production chmod 600 ~/.ssh/id_production # Добавление хоста в known_hosts ssh-keyscan -p "$PRODUCTION_PORT" "$PRODUCTION_HOST" >> ~/.ssh/known_hosts 2>/dev/null || true # SSH конфигурация для production cat > ~/.ssh/config << EOF Host production HostName $PRODUCTION_HOST User $PRODUCTION_USER Port $PRODUCTION_PORT IdentityFile ~/.ssh/id_production StrictHostKeyChecking yes UserKnownHostsFile ~/.ssh/known_hosts ServerAliveInterval 60 ServerAliveCountMax 3 EOF # Проверка подключения к production серверу echo "🔗 Testing production server connection..." if ! ssh production "echo 'Production connection successful'" > /dev/null 2>&1; then echo "❌ Failed to connect to production server" exit 1 fi echo "✅ Successfully connected to production server" # Подготовка файлов для production деплоя echo "📦 Preparing production deployment files..." mkdir -p /tmp/production-deploy # Создание production docker-compose cat > /tmp/production-deploy/docker-compose.production.yml << EOF version: '3.8' services: postgres: image: postgres:15-alpine environment: POSTGRES_DB: catlink_production POSTGRES_USER: catlink_user POSTGRES_PASSWORD: \${POSTGRES_PASSWORD} POSTGRES_INITDB_ARGS: "--auth-host=md5" volumes: - postgres_data:/var/lib/postgresql/data - ./backups:/backups healthcheck: test: ["CMD-SHELL", "pg_isready -U catlink_user -d catlink_production"] interval: 30s timeout: 10s retries: 5 start_period: 60s restart: always deploy: resources: limits: memory: 2G cpus: '1.0' reservations: memory: 1G cpus: '0.5' logging: driver: "json-file" options: max-size: "10m" max-file: "3" redis: image: redis:7-alpine command: > redis-server --appendonly yes --maxmemory 512mb --maxmemory-policy allkeys-lru --save 900 1 --save 300 10 --save 60 10000 volumes: - redis_data:/data healthcheck: test: ["CMD", "redis-cli", "ping"] interval: 30s timeout: 10s retries: 5 restart: always deploy: resources: limits: memory: 512M cpus: '0.5' reservations: memory: 256M cpus: '0.25' logging: driver: "json-file" options: max-size: "5m" max-file: "3" web: image: $REGISTRY/$PROJECT_NAME-backend:$VERSION environment: - DJANGO_ENV=production - DEBUG=False - SECRET_KEY=\${SECRET_KEY} - DATABASE_URL=postgresql://catlink_user:\${POSTGRES_PASSWORD}@postgres:5432/catlink_production - REDIS_URL=redis://redis:6379/0 - ALLOWED_HOSTS=$PRODUCTION_HOST,www.$PRODUCTION_HOST - CORS_ALLOWED_ORIGINS=https://$PRODUCTION_HOST,https://www.$PRODUCTION_HOST - EMAIL_HOST=\${EMAIL_HOST} - EMAIL_PORT=587 - EMAIL_USE_TLS=True - EMAIL_HOST_USER=\${EMAIL_HOST_USER} - EMAIL_HOST_PASSWORD=\${EMAIL_HOST_PASSWORD} - DEFAULT_FROM_EMAIL=noreply@$PRODUCTION_HOST - SENTRY_DSN=\${SENTRY_DSN} - CELERY_BROKER_URL=redis://redis:6379/1 volumes: - media_data:/app/media - static_data:/app/staticfiles - ./logs:/app/logs depends_on: postgres: condition: service_healthy redis: condition: service_healthy healthcheck: test: ["CMD", "curl", "-f", "http://localhost:8000/api/health/"] interval: 30s timeout: 15s retries: 5 start_period: 60s restart: always deploy: replicas: 2 resources: limits: memory: 1G cpus: '0.8' reservations: memory: 512M cpus: '0.4' logging: driver: "json-file" options: max-size: "20m" max-file: "5" labels: - "traefik.enable=true" - "traefik.http.routers.catlink-api.rule=Host(\`$PRODUCTION_HOST\`,\`www.$PRODUCTION_HOST\`) && PathPrefix(\`/api\`)" - "traefik.http.routers.catlink-api.tls=true" - "traefik.http.routers.catlink-api.tls.certresolver=letsencrypt" - "traefik.http.services.catlink-api.loadbalancer.server.port=8000" - "traefik.http.services.catlink-api.loadbalancer.healthcheck.path=/api/health/" frontend: image: $REGISTRY/$PROJECT_NAME-frontend:$VERSION environment: - NODE_ENV=production - NEXT_PUBLIC_API_URL=https://$PRODUCTION_HOST/api - NEXT_PUBLIC_APP_ENV=production - NEXT_PUBLIC_SENTRY_DSN=\${FRONTEND_SENTRY_DSN} - NEXT_PUBLIC_GOOGLE_ANALYTICS=\${GOOGLE_ANALYTICS_ID} depends_on: web: condition: service_healthy healthcheck: test: ["CMD", "curl", "-f", "http://localhost:3000/health"] interval: 30s timeout: 15s retries: 5 start_period: 60s restart: always deploy: replicas: 2 resources: limits: memory: 512M cpus: '0.5' reservations: memory: 256M cpus: '0.25' logging: driver: "json-file" options: max-size: "10m" max-file: "3" labels: - "traefik.enable=true" - "traefik.http.routers.catlink.rule=Host(\`$PRODUCTION_HOST\`,\`www.$PRODUCTION_HOST\`)" - "traefik.http.routers.catlink.tls=true" - "traefik.http.routers.catlink.tls.certresolver=letsencrypt" - "traefik.http.services.catlink.loadbalancer.server.port=3000" - "traefik.http.services.catlink.loadbalancer.healthcheck.path=/health" - "traefik.http.middlewares.catlink-redirect.redirectregex.regex=^https://www.$PRODUCTION_HOST/(.*)" - "traefik.http.middlewares.catlink-redirect.redirectregex.replacement=https://$PRODUCTION_HOST/\$\${1}" - "traefik.http.routers.catlink.middlewares=catlink-redirect" celery: image: $REGISTRY/$PROJECT_NAME-backend:$VERSION command: celery -A backend worker -l info --concurrency=2 environment: - DJANGO_ENV=production - DEBUG=False - SECRET_KEY=\${SECRET_KEY} - DATABASE_URL=postgresql://catlink_user:\${POSTGRES_PASSWORD}@postgres:5432/catlink_production - REDIS_URL=redis://redis:6379/0 - CELERY_BROKER_URL=redis://redis:6379/1 volumes: - media_data:/app/media - ./logs:/app/logs depends_on: postgres: condition: service_healthy redis: condition: service_healthy restart: always deploy: resources: limits: memory: 512M cpus: '0.5' reservations: memory: 256M cpus: '0.25' logging: driver: "json-file" options: max-size: "10m" max-file: "3" celery-beat: image: $REGISTRY/$PROJECT_NAME-backend:$VERSION command: celery -A backend beat -l info --scheduler django_celery_beat.schedulers:DatabaseScheduler environment: - DJANGO_ENV=production - DEBUG=False - SECRET_KEY=\${SECRET_KEY} - DATABASE_URL=postgresql://catlink_user:\${POSTGRES_PASSWORD}@postgres:5432/catlink_production - REDIS_URL=redis://redis:6379/0 - CELERY_BROKER_URL=redis://redis:6379/1 volumes: - ./logs:/app/logs depends_on: postgres: condition: service_healthy redis: condition: service_healthy restart: always deploy: resources: limits: memory: 256M cpus: '0.25' reservations: memory: 128M cpus: '0.1' logging: driver: "json-file" options: max-size: "5m" max-file: "3" volumes: postgres_data: driver: local driver_opts: type: none o: bind device: /opt/catlink/data/postgres redis_data: driver: local driver_opts: type: none o: bind device: /opt/catlink/data/redis media_data: driver: local driver_opts: type: none o: bind device: /opt/catlink/data/media static_data: driver: local driver_opts: type: none o: bind device: /opt/catlink/data/static networks: default: external: name: traefik_default EOF # Создание production environment файла cat > /tmp/production-deploy/.env.production << EOF # Production Environment Variables COMPOSE_PROJECT_NAME=catlink-production # Database POSTGRES_PASSWORD=$PRODUCTION_POSTGRES_PASSWORD # Django SECRET_KEY=$PRODUCTION_SECRET_KEY # Email EMAIL_HOST=$PRODUCTION_EMAIL_HOST EMAIL_HOST_USER=$PRODUCTION_EMAIL_USER EMAIL_HOST_PASSWORD=$PRODUCTION_EMAIL_PASSWORD # Monitoring (если настроено) SENTRY_DSN=${PRODUCTION_SENTRY_DSN:-} FRONTEND_SENTRY_DSN=${PRODUCTION_FRONTEND_SENTRY_DSN:-} GOOGLE_ANALYTICS_ID=${PRODUCTION_GOOGLE_ANALYTICS_ID:-} # Application info APP_VERSION=$VERSION DEPLOY_DATE=$(date -u +"%Y-%m-%dT%H:%M:%SZ") COMMIT_SHA=${DRONE_COMMIT_SHA:-$(git rev-parse HEAD 2>/dev/null || echo "unknown")} # Backup settings BACKUP_SCHEDULE=0 2 * * * BACKUP_RETENTION_DAYS=30 BACKUP_S3_BUCKET=${PRODUCTION_BACKUP_S3_BUCKET:-} EOF # Создание скрипта управления production cat > /tmp/production-deploy/manage-production.sh << 'EOF' #!/bin/bash # Production management script set -e COMPOSE_FILE="docker-compose.production.yml" PROJECT_NAME="catlink-production" BACKUP_DIR="/opt/catlink/backups" # Функция логирования log() { echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" | tee -a /var/log/catlink-production.log } case "$1" in "start") log "🚀 Starting production environment..." docker-compose -f $COMPOSE_FILE -p $PROJECT_NAME up -d log "✅ Production environment started" ;; "stop") log "🛑 Stopping production environment..." docker-compose -f $COMPOSE_FILE -p $PROJECT_NAME down log "✅ Production environment stopped" ;; "restart") log "🔄 Restarting production environment..." docker-compose -f $COMPOSE_FILE -p $PROJECT_NAME restart log "✅ Production environment restarted" ;; "logs") docker-compose -f $COMPOSE_FILE -p $PROJECT_NAME logs -f ${2:-} ;; "status") docker-compose -f $COMPOSE_FILE -p $PROJECT_NAME ps ;; "update") log "📦 Updating production environment..." # Создание бэкапа перед обновлением $0 backup # Обновление образов docker-compose -f $COMPOSE_FILE -p $PROJECT_NAME pull # Rolling update docker-compose -f $COMPOSE_FILE -p $PROJECT_NAME up -d --no-deps --scale web=1 web sleep 30 docker-compose -f $COMPOSE_FILE -p $PROJECT_NAME up -d --no-deps --scale web=2 web # Обновление frontend docker-compose -f $COMPOSE_FILE -p $PROJECT_NAME up -d --no-deps --scale frontend=1 frontend sleep 30 docker-compose -f $COMPOSE_FILE -p $PROJECT_NAME up -d --no-deps --scale frontend=2 frontend log "✅ Production environment updated" ;; "backup") log "💾 Creating production backup..." mkdir -p $BACKUP_DIR BACKUP_FILE="$BACKUP_DIR/backup-production-$(date +%Y%m%d-%H%M%S).sql" docker-compose -f $COMPOSE_FILE -p $PROJECT_NAME exec -T postgres pg_dump -U catlink_user catlink_production > "$BACKUP_FILE" # Компрессия бэкапа gzip "$BACKUP_FILE" # Бэкап медиа файлов tar -czf "$BACKUP_DIR/media-backup-$(date +%Y%m%d-%H%M%S).tar.gz" -C /opt/catlink/data media/ # Очистка старых бэкапов (старше 30 дней) find $BACKUP_DIR -name "*.gz" -mtime +30 -delete log "✅ Backup created: $BACKUP_FILE.gz" ;; "restore") if [ -z "$2" ]; then echo "Usage: $0 restore " exit 1 fi log "🔄 Restoring from backup: $2" # Остановка приложения docker-compose -f $COMPOSE_FILE -p $PROJECT_NAME stop web frontend celery celery-beat # Восстановление БД gunzip -c "$2" | docker-compose -f $COMPOSE_FILE -p $PROJECT_NAME exec -T postgres psql -U catlink_user catlink_production # Запуск приложения docker-compose -f $COMPOSE_FILE -p $PROJECT_NAME start web frontend celery celery-beat log "✅ Restore completed" ;; "migrate") log "🔄 Running database migrations..." docker-compose -f $COMPOSE_FILE -p $PROJECT_NAME exec web python manage.py migrate log "✅ Migrations completed" ;; "collectstatic") log "📦 Collecting static files..." docker-compose -f $COMPOSE_FILE -p $PROJECT_NAME exec web python manage.py collectstatic --noinput log "✅ Static files collected" ;; "shell") docker-compose -f $COMPOSE_FILE -p $PROJECT_NAME exec web python manage.py shell ;; "health") echo "🏥 Production health check:" # Проверка контейнеров echo "📦 Container status:" docker-compose -f $COMPOSE_FILE -p $PROJECT_NAME ps # Проверка эндпоинтов echo "" echo "🌐 Endpoint health:" api_status=$(curl -s -o /dev/null -w "%{http_code}" "https://catlink.dev/api/health/" || echo "000") frontend_status=$(curl -s -o /dev/null -w "%{http_code}" "https://catlink.dev/" || echo "000") if [ "$api_status" = "200" ]; then echo " ✅ API: OK ($api_status)" else echo " ❌ API: FAILED ($api_status)" fi if [ "$frontend_status" = "200" ]; then echo " ✅ Frontend: OK ($frontend_status)" else echo " ❌ Frontend: FAILED ($frontend_status)" fi ;; *) echo "Production Management Script" echo "" echo "Usage: $0 {start|stop|restart|logs|status|update|backup|restore|migrate|collectstatic|shell|health}" echo "" echo "Commands:" echo " start - Start production environment" echo " stop - Stop production environment" echo " restart - Restart production environment" echo " logs - Show logs (optionally specify service)" echo " status - Show containers status" echo " update - Update images and restart (with backup)" echo " backup - Create database and media backup" echo " restore - Restore from backup file" echo " migrate - Run database migrations" echo " collectstatic- Collect static files" echo " shell - Open Django shell" echo " health - Check production health" exit 1 ;; esac EOF chmod +x /tmp/production-deploy/manage-production.sh # Создание скрипта мониторинга production cat > /tmp/production-deploy/monitor-production.sh << 'EOF' #!/bin/bash # Production monitoring script PROJECT_NAME="catlink-production" HEALTH_CHECK_URL="https://catlink.dev/api/health/" FRONTEND_URL="https://catlink.dev/" LOG_FILE="/var/log/catlink-monitor.log" # Функция логирования log() { echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" | tee -a $LOG_FILE } log "🔍 Starting production monitoring..." # Проверка контейнеров log "📦 Checking container status..." container_status=$(docker-compose -p $PROJECT_NAME ps --format json | jq -r '.[] | "\(.Name): \(.State)"') log "Container status: $container_status" # Проверка здоровья сервисов log "🏥 Checking service health..." # Backend API api_response=$(curl -s -w "HTTPSTATUS:%{http_code};TIME:%{time_total}" "$HEALTH_CHECK_URL" || echo "HTTPSTATUS:000;TIME:0") api_status=$(echo $api_response | grep -o "HTTPSTATUS:[0-9]*" | cut -d: -f2) api_time=$(echo $api_response | grep -o "TIME:[0-9.]*" | cut -d: -f2) # Frontend frontend_response=$(curl -s -w "HTTPSTATUS:%{http_code};TIME:%{time_total}" "$FRONTEND_URL" || echo "HTTPSTATUS:000;TIME:0") frontend_status=$(echo $frontend_response | grep -o "HTTPSTATUS:[0-9]*" | cut -d: -f2) frontend_time=$(echo $frontend_response | grep -o "TIME:[0-9.]*" | cut -d: -f2) # Проверка производительности log "📊 Performance metrics:" log "API: $api_status (${api_time}s)" log "Frontend: $frontend_status (${api_time}s)" # Проверка дискового пространства disk_usage=$(df -h /opt/catlink | tail -1 | awk '{print $5}' | sed 's/%//') log "Disk usage: ${disk_usage}%" # Проверка памяти memory_usage=$(free | grep Mem | awk '{printf "%.0f", $3/$2 * 100}') log "Memory usage: ${memory_usage}%" # Алерты if [ "$api_status" != "200" ] || [ "$frontend_status" != "200" ]; then log "🚨 ALERT: Service health check failed!" fi if [ "$disk_usage" -gt 85 ]; then log "🚨 ALERT: Disk usage above 85%!" fi if [ "$memory_usage" -gt 90 ]; then log "🚨 ALERT: Memory usage above 90%!" fi log "✅ Monitoring completed" EOF chmod +x /tmp/production-deploy/monitor-production.sh # Копирование файлов на production сервер echo "📤 Copying deployment files to production server..." scp -r /tmp/production-deploy/* production:/opt/catlink/ # Выполнение деплоя на production с дополнительными проверками echo "🚀 Executing production deployment..." ssh production << EOF set -e cd /opt/catlink # Проверка готовности к деплою echo "🔍 Pre-deployment checks..." # Проверка свободного места available_space=\$(df -h . | tail -1 | awk '{print \$4}' | sed 's/G//') if [ "\${available_space%.*}" -lt 5 ]; then echo "❌ Insufficient disk space for deployment (less than 5GB available)" exit 1 fi # Создание полного бэкапа перед деплоем echo "💾 Creating pre-deployment backup..." if [ -f docker-compose.production.yml ]; then ./manage-production.sh backup fi # Создание директорий для данных mkdir -p /opt/catlink/data/{postgres,redis,media,static} mkdir -p /opt/catlink/backups mkdir -p /opt/catlink/logs # Установка правильных прав chown -R 1000:1000 /opt/catlink/data/ chmod -R 755 /opt/catlink/data/ # Загрузка переменных окружения source .env.production # Остановка старой версии (если существует) if [ -f docker-compose.production.yml ]; then echo "🛑 Stopping current production deployment..." ./manage-production.sh stop fi # Очистка старых образов echo "🧹 Cleaning up old images..." docker image prune -f # Загрузка новых образов echo "📥 Pulling new production images..." docker-compose -f docker-compose.production.yml pull # Запуск новой версии echo "🚀 Starting new production deployment..." ./manage-production.sh start # Ожидание готовности сервисов echo "⏳ Waiting for services to be ready..." sleep 60 # Выполнение миграций echo "🔄 Running database migrations..." ./manage-production.sh migrate # Сбор статических файлов echo "📦 Collecting static files..." ./manage-production.sh collectstatic # Проверка здоровья production echo "🏥 Performing production health check..." ./manage-production.sh health echo "✅ Production deployment completed successfully!" EOF # Финальная проверка production деплоя echo "🔍 Final production verification..." sleep 30 # Расширенная проверка production api_status=$(curl -s -o /dev/null -w "%{http_code}" "https://$PRODUCTION_HOST/api/health/" || echo "000") frontend_status=$(curl -s -o /dev/null -w "%{http_code}" "https://$PRODUCTION_HOST/" || echo "000") admin_status=$(curl -s -o /dev/null -w "%{http_code}" "https://$PRODUCTION_HOST/admin/" || echo "000") echo "📊 Production verification results:" echo " • API Health: $api_status" echo " • Frontend: $frontend_status" echo " • Admin Panel: $admin_status" if [ "$api_status" = "200" ] && [ "$frontend_status" = "200" ] && [ "$admin_status" = "200" ]; then echo "✅ Production deployment verified successfully!" # Уведомления об успешном деплое echo "📢 Sending production deployment notifications..." # Slack уведомление if [ -n "$SLACK_WEBHOOK_URL" ]; then curl -X POST -H 'Content-type: application/json' \ --data "{ \"text\": \"🎉 *CatLink $VERSION* successfully deployed to production!\", \"attachments\": [ { \"color\": \"good\", \"fields\": [ { \"title\": \"Environment\", \"value\": \"🚀 Production\", \"short\": true }, { \"title\": \"URL\", \"value\": \"https://$PRODUCTION_HOST\", \"short\": true }, { \"title\": \"Version\", \"value\": \"$VERSION\", \"short\": true }, { \"title\": \"Status\", \"value\": \"✅ Live & Healthy\", \"short\": true } ] } ] }" \ "$SLACK_WEBHOOK_URL" || echo "Failed to send Slack notification" fi else echo "❌ Production deployment verification failed!" # Получение логов для диагностики echo "📋 Getting production logs for diagnosis..." ssh production "cd /opt/catlink && ./manage-production.sh logs --tail=100" # Критическое уведомление if [ -n "$SLACK_WEBHOOK_URL" ]; then curl -X POST -H 'Content-type: application/json' \ --data "{ \"text\": \"🚨 *CRITICAL: CatLink $VERSION production deployment failed!*\", \"attachments\": [ { \"color\": \"danger\", \"fields\": [ { \"title\": \"API Status\", \"value\": \"$api_status\", \"short\": true }, { \"title\": \"Frontend Status\", \"value\": \"$frontend_status\", \"short\": true } ] } ] }" \ "$SLACK_WEBHOOK_URL" || true fi exit 1 fi # Создание подробного отчета о production деплое cat > /tmp/production-deploy-report.md << EOF # 🚀 Production Deployment Report ## 📋 Deployment Summary - **Version**: $VERSION - **Environment**: 🚀 Production - **URL**: https://$PRODUCTION_HOST - **Deployed At**: $(date -u +"%Y-%m-%d %H:%M:%S UTC") - **Deployed By**: ${DRONE_COMMIT_AUTHOR:-"CI/CD Pipeline"} - **Build**: #${DRONE_BUILD_NUMBER:-"manual"} ## ✅ Verification Results - **API Health**: $api_status ✅ - **Frontend**: $frontend_status ✅ - **Admin Panel**: $admin_status ✅ - **Database**: Migrations applied successfully ✅ - **Static Files**: Collected successfully ✅ - **Health Checks**: All services healthy ✅ ## 🔗 Production Links - [🌐 Application](https://$PRODUCTION_HOST) - [📚 API Documentation](https://$PRODUCTION_HOST/api/docs/) - [🔧 Admin Panel](https://$PRODUCTION_HOST/admin/) ## 📊 Deployment Metrics - **Deployment Time**: $(date -u +"%Y-%m-%d %H:%M:%S UTC") - **Downtime**: Minimal (rolling deployment) - **Images Size**: Production optimized - **Health Check**: All endpoints responding ## 🔐 Security & Compliance - ✅ HTTPS enabled with Let's Encrypt - ✅ Security headers configured - ✅ Non-root container execution - ✅ Resource limits applied - ✅ Logging configured - ✅ Backup system active ## 🛠️ Management Commands \`\`\`bash # SSH to production server ssh production # Check production health ./manage-production.sh health # View logs ./manage-production.sh logs # Create backup ./manage-production.sh backup # Monitor production ./monitor-production.sh \`\`\` ## 📈 Next Steps - [ ] Monitor application metrics - [ ] Verify all features working correctly - [ ] Update monitoring dashboards - [ ] Schedule next backup - [ ] Update documentation --- **🎉 Production deployment completed successfully!** *This is an automated deployment report generated by the CI/CD pipeline.* EOF echo "" echo "🎉 PRODUCTION DEPLOYMENT COMPLETED SUCCESSFULLY! 🎉" echo "" echo "🔗 Production URLs:" echo " 🌐 Application: https://$PRODUCTION_HOST" echo " 📚 API Docs: https://$PRODUCTION_HOST/api/docs/" echo " 🔧 Admin Panel: https://$PRODUCTION_HOST/admin/" echo "" echo "📄 Deployment report: /tmp/production-deploy-report.md" echo "📊 Version $VERSION is now LIVE! 🚀"