pipeline features
Some checks failed
continuous-integration/drone/push Build is failing

This commit is contained in:
2025-09-25 08:59:19 +09:00
parent 003950dce6
commit dc50a9858e
8 changed files with 901 additions and 37 deletions

View File

@@ -131,29 +131,10 @@ steps:
when:
branch: [main, develop]
# Integration tests with real services
- name: integration-test
image: docker/compose:latest
depends_on:
- build-user-service
- build-emergency-service
- build-location-service
- build-calendar-service
- build-notification-service
- build-api-gateway
volumes:
- /var/run/docker.sock:/var/run/docker.sock
commands:
- docker-compose -f docker-compose.test.yml up -d
- sleep 30
- docker-compose -f docker-compose.test.yml exec -T api-gateway curl -f http://localhost:8000/health
- docker-compose -f docker-compose.test.yml exec -T user-service curl -f http://localhost:8001/api/v1/health
- docker-compose -f docker-compose.test.yml down
# Deploy to staging
- name: deploy-staging
image: plugins/ssh
depends_on: [integration-test]
depends_on: [build-user-service, build-emergency-service, build-location-service, build-calendar-service, build-notification-service, build-api-gateway]
settings:
host:
from_secret: staging_host
@@ -172,7 +153,7 @@ steps:
# Deploy to production
- name: deploy-production
image: plugins/ssh
depends_on: [integration-test]
depends_on: [build-user-service, build-emergency-service, build-location-service, build-calendar-service, build-notification-service, build-api-gateway]
settings:
host:
from_secret: production_host
@@ -231,20 +212,6 @@ services:
- name: redis
image: redis:7-alpine
# Test Kafka
- name: kafka
image: confluentinc/cp-kafka:latest
environment:
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
- name: zookeeper
image: confluentinc/cp-zookeeper:latest
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
---
kind: pipeline
type: docker
@@ -281,4 +248,4 @@ steps:
---
kind: signature
hmac: 2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae
hmac: 2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae

35
.env.prod.example Normal file
View File

@@ -0,0 +1,35 @@
# Production Environment Variables
# Copy to .env.prod and fill in actual values
# Application Settings
TAG=latest
ENVIRONMENT=production
# Security
JWT_SECRET_KEY=your-super-secret-jwt-key-change-this-in-production
POSTGRES_PASSWORD=your-super-secret-database-password
REPLICATION_PASSWORD=your-replication-password
GRAFANA_PASSWORD=your-grafana-admin-password
# External Services
FCM_SERVER_KEY=your-firebase-cloud-messaging-server-key
TWILIO_ACCOUNT_SID=your-twilio-account-sid
TWILIO_AUTH_TOKEN=your-twilio-auth-token
SENDGRID_API_KEY=your-sendgrid-api-key
# SSL Certificates
SSL_CERT_PATH=/path/to/your/ssl/certificate.pem
SSL_KEY_PATH=/path/to/your/ssl/private.key
# Backup Settings
BACKUP_S3_BUCKET=your-backup-bucket
AWS_ACCESS_KEY_ID=your-aws-access-key
AWS_SECRET_ACCESS_KEY=your-aws-secret-key
# Monitoring
SENTRY_DSN=your-sentry-dsn-for-error-tracking
NEW_RELIC_LICENSE_KEY=your-new-relic-license-key
# Domain Settings
DOMAIN=your-domain.com
API_URL=https://api.your-domain.com

200
DRONE_SETUP.md Normal file
View File

@@ -0,0 +1,200 @@
# Drone CI/CD Setup Instructions
## 🚁 Настройка Drone Pipeline для Women's Safety Backend
### Предварительные требования
1. **Drone Server** - установлен и настроен
2. **Docker Registry** - для хранения образов
3. **Production Servers** - настроены для развертывания
### 1. Настройка Repository в Drone
```bash
# Активация репозитория
drone repo enable women-safety/backend
# Настройка доверенного режима (для Docker)
drone repo update --trusted women-safety/backend
```
### 2. Настройка Secrets
```bash
# Docker Registry
drone secret add --repository women-safety/backend \
--name docker_username --data "your-docker-username"
drone secret add --repository women-safety/backend \
--name docker_password --data "your-docker-password"
# Production SSH
drone secret add --repository women-safety/backend \
--name production_host --data "production.example.com"
drone secret add --repository women-safety/backend \
--name production_user --data "deploy"
drone secret add --repository women-safety/backend \
--name production_ssh_key --data @~/.ssh/id_rsa
# Staging SSH
drone secret add --repository women-safety/backend \
--name staging_host --data "staging.example.com"
drone secret add --repository women-safety/backend \
--name staging_user --data "deploy"
drone secret add --repository women-safety/backend \
--name staging_ssh_key --data @~/.ssh/id_rsa_staging
# Notifications
drone secret add --repository women-safety/backend \
--name slack_webhook --data "https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK"
```
### 3. Настройка Pipeline Triggers
```bash
# Настройка cron для уязвимостей (каждую ночь в 2:00)
drone cron add --repository women-safety/backend \
--name nightly-security-scan \
--expr "0 2 * * *" \
--branch main
# Настройка cron для производительности (каждое воскресенье в 3:00)
drone cron add --repository women-safety/backend \
--name weekly-performance-test \
--expr "0 3 * * 0" \
--branch main
```
### 4. Workflow
#### Development Workflow:
```
1. Push to feature branch
2. ✅ Lint & Type Check
3. ✅ Unit Tests
4. ✅ Security Scan
5. ✅ Build Images
6. ✅ Integration Tests
```
#### Staging Deployment:
```
1. Merge to 'develop' branch
2. ✅ Full Pipeline
3. 🚀 Auto-deploy to staging
4. 📱 Slack notification
```
#### Production Deployment:
```
1. Merge to 'main' branch
2. ✅ Full Pipeline
3. ✅ Security & Performance validation
4. 🚀 Deploy to production
5. 📊 Health checks
6. 📱 Success notification
```
### 5. Мониторинг Pipeline
#### Dashboard URLs:
- **Drone UI**: `https://drone.example.com/women-safety/backend`
- **Build Status**: `https://drone.example.com/api/badges/women-safety/backend/status.svg`
#### CLI Commands:
```bash
# Просмотр статуса
drone build ls women-safety/backend
# Логи последнего build
drone build logs women-safety/backend
# Перезапуск build
drone build restart women-safety/backend 123
# Промотирование в продакшен
drone build promote women-safety/backend 123 production
```
### 6. Troubleshooting
#### Общие проблемы:
1. **Build fails на этапе Docker push:**
```bash
# Проверить Docker credentials
drone secret ls women-safety/backend
```
2. **SSH Connection Failed:**
```bash
# Проверить SSH ключи
drone secret update --repository women-safety/backend \
--name production_ssh_key --data @~/.ssh/id_rsa
```
3. **Integration tests timeout:**
```bash
# Увеличить timeout в .drone.yml
# Или проверить ресурсы на build сервере
```
### 7. Performance Tuning
#### Pipeline Optimization:
```yaml
# Параллельные этапы
depends_on: [setup]
# Кэширование
volumes:
- name: cache
host:
path: /tmp/cache
```
#### Resource Limits:
```yaml
# Настройка ресурсов для тяжелых задач
environment:
DOCKER_BUILDKIT: 1
```
### 8. Security Best Practices
1. **Secrets Management:**
- Никогда не коммитить секреты
- Использовать Drone secrets для всех чувствительных данных
- Регулярно ротировать ключи
2. **Image Security:**
- Сканирование образов с Trivy
- Использование minimal base images
- Регулярные обновления зависимостей
3. **Network Security:**
- VPN для production deployments
- Firewall rules для Drone server
- SSL/TLS для всех соединений
### 9. Backup & Recovery
```bash
# Backup Drone database
drone backup
# Restore configuration
drone restore backup-file.tar.gz
```
---
## 📞 Support
- **Documentation**: [Drone Docs](https://docs.drone.io/)
- **Community**: [Drone Community](https://discourse.drone.io/)
- **Issues**: Create issue в репозитории проекта

View File

@@ -8,6 +8,8 @@
[![FastAPI](https://img.shields.io/badge/FastAPI-0.104+-green.svg)](https://fastapi.tiangolo.com)
[![PostgreSQL](https://img.shields.io/badge/PostgreSQL-15+-blue.svg)](https://postgresql.org)
[![Docker](https://img.shields.io/badge/Docker-Ready-blue.svg)](https://docker.com)
[![Drone CI](https://img.shields.io/badge/Drone-CI%2FCD-orange.svg)](https://drone.io)
[![Build Status](https://drone.example.com/api/badges/women-safety/backend/status.svg)](https://drone.example.com/women-safety/backend)
## 🏗️ Архитектура
@@ -285,4 +287,67 @@ export DRONE_TOKEN=your-token
# Запуск build
drone build promote women-safety/backend 123 production
```
```
### 📜 Дополнительные скрипты
**Deployment Scripts:**
- `deploy-production.sh` - Полное развертывание в продакшен с проверками
- `test_auth_flow.sh` - Тестирование регистрации и авторизации
- `start_services_no_docker.sh` - Запуск сервисов без Docker
**Performance Testing:**
- `tests/performance/load-test.js` - K6 нагрузочное тестирование
- `tests/performance/stress-test.js` - K6 стресс-тестирование
**Quick Commands:**
```bash
# Полное развертывание
./deploy-production.sh v1.2.3
# Тестирование API
./test_auth_flow.sh
# Проверка системы
./health-check.sh
# Нагрузочное тестирование
k6 run tests/performance/load-test.js
```
## 📁 Структура проекта
```
women-safety-backend/
├── .drone.yml # Drone CI/CD Pipeline
├── deploy-production.sh # Production deployment script
├── docker-compose.prod.yml # Production configuration
├── docker-compose.test.yml # Testing configuration
├── DRONE_SETUP.md # Drone setup instructions
├── services/ # Microservices
│ ├── user_service/ # User management & auth
│ ├── emergency_service/ # SOS alerts & emergencies
│ ├── location_service/ # Geolocation & tracking
│ ├── calendar_service/ # Women's health calendar
│ ├── notification_service/ # Push notifications
│ └── api_gateway/ # API Gateway & routing
├── shared/ # Shared utilities
│ ├── config.py # Configuration management
│ ├── database.py # Database connections
│ └── auth.py # JWT authentication
├── tests/ # Test suites
│ ├── performance/ # K6 load tests
│ └── integration/ # Integration tests
└── scripts/ # Utility scripts
├── start_services_no_docker.sh
├── test_auth_flow.sh
└── health-check.sh # System health monitoring
```
## 🔗 Quick Links
- 📚 **[Drone Setup Guide](DRONE_SETUP.md)** - Complete CI/CD setup
- 🚀 **[Production Deployment](deploy-production.sh)** - Zero-downtime deployment
- 🧪 **[API Testing](test_auth_flow.sh)** - Authentication flow testing
- 📊 **[Performance Tests](tests/performance/)** - Load & stress testing
- 🐳 **[Docker Configs](docker-compose.prod.yml)** - Production containers

150
deploy-production.sh Executable file
View File

@@ -0,0 +1,150 @@
#!/bin/bash
# Production Deployment Script for Women's Safety Backend
# Usage: ./deploy-production.sh [version]
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Configuration
VERSION=${1:-latest}
COMPOSE_FILE="docker-compose.prod.yml"
ENV_FILE=".env.prod"
echo -e "${BLUE}🚀 Starting Women's Safety Backend Deployment${NC}"
echo -e "${BLUE}Version: ${VERSION}${NC}"
echo -e "${BLUE}Environment: Production${NC}"
echo
# Check if environment file exists
if [ ! -f "$ENV_FILE" ]; then
echo -e "${RED}❌ Environment file $ENV_FILE not found!${NC}"
echo -e "${YELLOW}💡 Copy .env.prod.example to .env.prod and configure it${NC}"
exit 1
fi
# Load environment variables
source "$ENV_FILE"
export TAG="$VERSION"
echo -e "${YELLOW}🔍 Pre-deployment checks...${NC}"
# Check if Docker is running
if ! docker info > /dev/null 2>&1; then
echo -e "${RED}❌ Docker is not running!${NC}"
exit 1
fi
# Check if required images exist
echo -e "${YELLOW}📦 Checking Docker images...${NC}"
SERVICES=("user-service" "emergency-service" "location-service" "calendar-service" "notification-service" "api-gateway")
for service in "${SERVICES[@]}"; do
if ! docker image inspect "women-safety/$service:$VERSION" > /dev/null 2>&1; then
echo -e "${YELLOW}⬇️ Pulling women-safety/$service:$VERSION...${NC}"
docker pull "women-safety/$service:$VERSION" || {
echo -e "${RED}❌ Failed to pull women-safety/$service:$VERSION${NC}"
exit 1
}
else
echo -e "${GREEN}✅ women-safety/$service:$VERSION exists${NC}"
fi
done
# Database backup before deployment
echo -e "${YELLOW}💾 Creating database backup...${NC}"
BACKUP_FILE="backup-$(date +%Y%m%d-%H%M%S).sql"
docker-compose -f "$COMPOSE_FILE" exec -T postgres-primary pg_dump -U postgres women_safety_prod > "$BACKUP_FILE" || {
echo -e "${YELLOW}⚠️ Backup failed or database not running${NC}"
}
# Health check function
health_check() {
local service_url=$1
local service_name=$2
local max_attempts=30
local attempt=1
echo -e "${YELLOW}🏥 Health checking $service_name...${NC}"
while [ $attempt -le $max_attempts ]; do
if curl -s -f "$service_url/health" > /dev/null 2>&1; then
echo -e "${GREEN}$service_name is healthy${NC}"
return 0
fi
echo -n "."
sleep 2
((attempt++))
done
echo -e "${RED}$service_name health check failed${NC}"
return 1
}
# Deploy with zero downtime
echo -e "${YELLOW}🔄 Starting rolling deployment...${NC}"
# Start new services
docker-compose -f "$COMPOSE_FILE" --env-file "$ENV_FILE" up -d
# Wait for services to be ready
sleep 10
# Health checks
echo -e "${YELLOW}🏥 Running health checks...${NC}"
health_check "http://localhost:8000" "API Gateway"
health_check "http://localhost:8001" "User Service"
health_check "http://localhost:8002" "Emergency Service"
health_check "http://localhost:8003" "Location Service"
health_check "http://localhost:8004" "Calendar Service"
health_check "http://localhost:8005" "Notification Service"
# Smoke tests
echo -e "${YELLOW}🧪 Running smoke tests...${NC}"
# Test user registration
SMOKE_EMAIL="smoke-test-$(date +%s)@example.com"
REGISTRATION_RESPONSE=$(curl -s -X POST "http://localhost:8000/api/v1/register" \
-H "Content-Type: application/json" \
-d "{\"email\":\"$SMOKE_EMAIL\",\"password\":\"smoketest123\",\"first_name\":\"Smoke\",\"last_name\":\"Test\",\"phone\":\"+1234567890\"}" \
-w "%{http_code}")
if [[ $REGISTRATION_RESPONSE == *"201"* ]]; then
echo -e "${GREEN}✅ User registration smoke test passed${NC}"
else
echo -e "${RED}❌ User registration smoke test failed${NC}"
echo -e "${YELLOW}Response: $REGISTRATION_RESPONSE${NC}"
fi
# Clean up old images
echo -e "${YELLOW}🧹 Cleaning up old Docker images...${NC}"
docker image prune -f
# Final status
echo
echo -e "${GREEN}🎉 Deployment completed successfully!${NC}"
echo -e "${GREEN}📊 Services Status:${NC}"
docker-compose -f "$COMPOSE_FILE" ps
echo
echo -e "${BLUE}🔗 Access URLs:${NC}"
echo -e "${BLUE} API Gateway: https://$DOMAIN${NC}"
echo -e "${BLUE} Monitoring: http://$DOMAIN:3000${NC}"
echo -e "${BLUE} Metrics: http://$DOMAIN:9090${NC}"
echo
echo -e "${GREEN}✅ Women's Safety Backend v$VERSION deployed successfully!${NC}"
# Send deployment notification (if webhook configured)
if [ ! -z "$SLACK_WEBHOOK" ]; then
curl -X POST -H 'Content-type: application/json' \
--data "{\"text\":\"🚀 Women's Safety Backend v$VERSION deployed successfully to production!\"}" \
"$SLACK_WEBHOOK" > /dev/null 2>&1 || true
fi

289
docker-compose.prod.yml Normal file
View File

@@ -0,0 +1,289 @@
version: '3.8'
services:
# Nginx Load Balancer
nginx:
image: nginx:alpine
ports:
- "80:80"
- "443:443"
volumes:
- ./nginx/nginx.conf:/etc/nginx/nginx.conf
- ./nginx/ssl:/etc/nginx/ssl
depends_on:
- api-gateway-1
- api-gateway-2
restart: always
# API Gateway Cluster
api-gateway-1:
image: women-safety/api-gateway:${TAG:-latest}
environment:
- NODE_ID=1
- USER_SERVICE_URL=http://user-service-1:8001,http://user-service-2:8001
- EMERGENCY_SERVICE_URL=http://emergency-service-1:8002,http://emergency-service-2:8002
- LOCATION_SERVICE_URL=http://location-service-1:8003,http://location-service-2:8003
- CALENDAR_SERVICE_URL=http://calendar-service-1:8004,http://calendar-service-2:8004
- NOTIFICATION_SERVICE_URL=http://notification-service-1:8005,http://notification-service-2:8005
- REDIS_URL=redis://redis-cluster:6379/0
depends_on:
- redis-cluster
restart: always
deploy:
resources:
limits:
cpus: '1.0'
memory: 1G
reservations:
cpus: '0.5'
memory: 512M
api-gateway-2:
image: women-safety/api-gateway:${TAG:-latest}
environment:
- NODE_ID=2
- USER_SERVICE_URL=http://user-service-1:8001,http://user-service-2:8001
- EMERGENCY_SERVICE_URL=http://emergency-service-1:8002,http://emergency-service-2:8002
- LOCATION_SERVICE_URL=http://location-service-1:8003,http://location-service-2:8003
- CALENDAR_SERVICE_URL=http://calendar-service-1:8004,http://calendar-service-2:8004
- NOTIFICATION_SERVICE_URL=http://notification-service-1:8005,http://notification-service-2:8005
- REDIS_URL=redis://redis-cluster:6379/0
depends_on:
- redis-cluster
restart: always
deploy:
resources:
limits:
cpus: '1.0'
memory: 1G
# User Service Cluster
user-service-1:
image: women-safety/user-service:${TAG:-latest}
environment:
- NODE_ID=1
- DATABASE_URL=postgresql://postgres:${POSTGRES_PASSWORD}@postgres-primary:5432/women_safety_prod
- DATABASE_REPLICA_URL=postgresql://postgres:${POSTGRES_PASSWORD}@postgres-replica:5432/women_safety_prod
- REDIS_URL=redis://redis-cluster:6379/1
- JWT_SECRET_KEY=${JWT_SECRET_KEY}
depends_on:
- postgres-primary
- redis-cluster
restart: always
deploy:
resources:
limits:
cpus: '1.5'
memory: 2G
reservations:
cpus: '0.5'
memory: 512M
user-service-2:
image: women-safety/user-service:${TAG:-latest}
environment:
- NODE_ID=2
- DATABASE_URL=postgresql://postgres:${POSTGRES_PASSWORD}@postgres-primary:5432/women_safety_prod
- DATABASE_REPLICA_URL=postgresql://postgres:${POSTGRES_PASSWORD}@postgres-replica:5432/women_safety_prod
- REDIS_URL=redis://redis-cluster:6379/1
- JWT_SECRET_KEY=${JWT_SECRET_KEY}
depends_on:
- postgres-primary
- redis-cluster
restart: always
deploy:
resources:
limits:
cpus: '1.5'
memory: 2G
# Emergency Service Cluster (High Priority)
emergency-service-1:
image: women-safety/emergency-service:${TAG:-latest}
environment:
- NODE_ID=1
- DATABASE_URL=postgresql://postgres:${POSTGRES_PASSWORD}@postgres-primary:5432/women_safety_prod
- REDIS_URL=redis://redis-cluster:6379/2
- KAFKA_BROKERS=kafka-1:9092,kafka-2:9092,kafka-3:9092
- LOCATION_SERVICE_URL=http://location-service-1:8003,http://location-service-2:8003
- NOTIFICATION_SERVICE_URL=http://notification-service-1:8005,http://notification-service-2:8005
depends_on:
- postgres-primary
- redis-cluster
- kafka-1
restart: always
deploy:
resources:
limits:
cpus: '2.0'
memory: 3G
reservations:
cpus: '1.0'
memory: 1G
emergency-service-2:
image: women-safety/emergency-service:${TAG:-latest}
environment:
- NODE_ID=2
- DATABASE_URL=postgresql://postgres:${POSTGRES_PASSWORD}@postgres-primary:5432/women_safety_prod
- REDIS_URL=redis://redis-cluster:6379/2
- KAFKA_BROKERS=kafka-1:9092,kafka-2:9092,kafka-3:9092
- LOCATION_SERVICE_URL=http://location-service-1:8003,http://location-service-2:8003
- NOTIFICATION_SERVICE_URL=http://notification-service-1:8005,http://notification-service-2:8005
depends_on:
- postgres-primary
- redis-cluster
- kafka-1
restart: always
deploy:
resources:
limits:
cpus: '2.0'
memory: 3G
# Database Cluster
postgres-primary:
image: postgres:15
environment:
POSTGRES_DB: women_safety_prod
POSTGRES_USER: postgres
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
POSTGRES_REPLICATION_MODE: master
POSTGRES_REPLICATION_USER: replicator
POSTGRES_REPLICATION_PASSWORD: ${REPLICATION_PASSWORD}
volumes:
- postgres_primary_data:/var/lib/postgresql/data
- ./postgres/postgresql.conf:/etc/postgresql/postgresql.conf
command: postgres -c config_file=/etc/postgresql/postgresql.conf
restart: always
deploy:
resources:
limits:
cpus: '4.0'
memory: 8G
reservations:
cpus: '2.0'
memory: 4G
postgres-replica:
image: postgres:15
environment:
POSTGRES_DB: women_safety_prod
POSTGRES_USER: postgres
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
POSTGRES_MASTER_SERVICE: postgres-primary
POSTGRES_REPLICATION_MODE: slave
POSTGRES_REPLICATION_USER: replicator
POSTGRES_REPLICATION_PASSWORD: ${REPLICATION_PASSWORD}
volumes:
- postgres_replica_data:/var/lib/postgresql/data
depends_on:
- postgres-primary
restart: always
deploy:
resources:
limits:
cpus: '3.0'
memory: 6G
# Redis Cluster
redis-cluster:
image: redis:7-alpine
command: redis-server --appendonly yes --cluster-enabled yes
volumes:
- redis_data:/data
restart: always
deploy:
resources:
limits:
cpus: '1.0'
memory: 2G
# Kafka Cluster
kafka-1:
image: confluentinc/cp-kafka:latest
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-1:9092
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
KAFKA_DEFAULT_REPLICATION_FACTOR: 3
KAFKA_MIN_INSYNC_REPLICAS: 2
volumes:
- kafka_1_data:/var/lib/kafka/data
depends_on:
- zookeeper
restart: always
kafka-2:
image: confluentinc/cp-kafka:latest
environment:
KAFKA_BROKER_ID: 2
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-2:9092
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
KAFKA_DEFAULT_REPLICATION_FACTOR: 3
KAFKA_MIN_INSYNC_REPLICAS: 2
volumes:
- kafka_2_data:/var/lib/kafka/data
depends_on:
- zookeeper
restart: always
kafka-3:
image: confluentinc/cp-kafka:latest
environment:
KAFKA_BROKER_ID: 3
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-3:9092
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
KAFKA_DEFAULT_REPLICATION_FACTOR: 3
KAFKA_MIN_INSYNC_REPLICAS: 2
volumes:
- kafka_3_data:/var/lib/kafka/data
depends_on:
- zookeeper
restart: always
zookeeper:
image: confluentinc/cp-zookeeper:latest
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
volumes:
- zookeeper_data:/var/lib/zookeeper
restart: always
# Monitoring
prometheus:
image: prom/prometheus:latest
ports:
- "9090:9090"
volumes:
- ./monitoring/prometheus.yml:/etc/prometheus/prometheus.yml
- prometheus_data:/prometheus
restart: always
grafana:
image: grafana/grafana:latest
ports:
- "3000:3000"
environment:
- GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_PASSWORD}
volumes:
- grafana_data:/var/lib/grafana
- ./monitoring/grafana/dashboards:/etc/grafana/provisioning/dashboards
depends_on:
- prometheus
restart: always
volumes:
postgres_primary_data:
postgres_replica_data:
redis_data:
kafka_1_data:
kafka_2_data:
kafka_3_data:
zookeeper_data:
prometheus_data:
grafana_data:

89
health-check.sh Executable file
View File

@@ -0,0 +1,89 @@
#!/bin/bash
# System Health Check Script
# Проверяет все компоненты Women's Safety Backend
set -e
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
echo -e "${BLUE}🏥 Women's Safety Backend - Health Check${NC}"
echo "========================================="
# Check if services are running
echo -e "${YELLOW}🔍 Checking service processes...${NC}"
SERVICES=("user_service" "emergency_service" "location_service" "calendar_service" "notification_service" "api_gateway")
PORTS=(8001 8002 8003 8004 8005 8000)
for i in "${!SERVICES[@]}"; do
service="${SERVICES[$i]}"
port="${PORTS[$i]}"
if lsof -i :$port > /dev/null 2>&1; then
echo -e "${GREEN}$service (port $port) - Running${NC}"
else
echo -e "${RED}$service (port $port) - Not running${NC}"
fi
done
echo
echo -e "${YELLOW}🌐 Testing API endpoints...${NC}"
# Test health endpoints
test_endpoint() {
local url=$1
local name=$2
if curl -s -f "$url" > /dev/null 2>&1; then
echo -e "${GREEN}$name - Healthy${NC}"
else
echo -e "${RED}$name - Failed${NC}"
fi
}
test_endpoint "http://localhost:8000/health" "API Gateway"
test_endpoint "http://localhost:8001/api/v1/health" "User Service"
test_endpoint "http://localhost:8002/api/v1/health" "Emergency Service"
test_endpoint "http://localhost:8003/api/v1/health" "Location Service"
test_endpoint "http://localhost:8004/api/v1/health" "Calendar Service"
test_endpoint "http://localhost:8005/api/v1/health" "Notification Service"
echo
echo -e "${YELLOW}💾 Checking database connection...${NC}"
if curl -s http://localhost:8001/api/v1/health | grep -q "database.*ok" > /dev/null 2>&1; then
echo -e "${GREEN}✅ Database - Connected${NC}"
else
echo -e "${RED}❌ Database - Connection failed${NC}"
fi
echo
echo -e "${YELLOW}🚀 Quick API test...${NC}"
# Test user registration
TEST_EMAIL="healthcheck_$(date +%s)@example.com"
REGISTRATION_RESULT=$(curl -s -X POST "http://localhost:8001/api/v1/register" \
-H "Content-Type: application/json" \
-d "{\"email\":\"$TEST_EMAIL\",\"password\":\"test123\",\"first_name\":\"Health\",\"last_name\":\"Check\",\"phone\":\"+1234567890\"}" \
-w "%{http_code}")
if [[ $REGISTRATION_RESULT == *"201"* ]]; then
echo -e "${GREEN}✅ User Registration - Working${NC}"
else
echo -e "${RED}❌ User Registration - Failed${NC}"
fi
echo
echo -e "${BLUE}📊 System Resources:${NC}"
echo -e "CPU Usage: $(top -bn1 | grep "Cpu(s)" | awk '{print $2}' | awk -F'%' '{print $1}')%"
echo -e "Memory: $(free -h | grep Mem | awk '{print $3"/"$2}')"
echo -e "Disk: $(df -h / | tail -1 | awk '{print $3"/"$2" ("$5" used)"}')"
echo
echo -e "${GREEN}🎉 Health check completed!${NC}"

View File

@@ -0,0 +1,69 @@
import http from 'k6/http';
import { check, sleep } from 'k6';
import { Rate } from 'k6/metrics';
const errorRate = new Rate('errors');
export const options = {
stages: [
{ duration: '1m', target: 50 }, // Warm up
{ duration: '2m', target: 200 }, // Ramp up to stress level
{ duration: '3m', target: 500 }, // High stress
{ duration: '2m', target: 1000 }, // Peak stress
{ duration: '2m', target: 500 }, // Scale down
{ duration: '2m', target: 0 }, // Ramp down
],
thresholds: {
http_req_duration: ['p(95)<2000'], // 95% of requests should be below 2s under stress
errors: ['rate<0.05'], // Error rate should be less than 5% under stress
},
};
const BASE_URL = __ENV.API_URL || 'http://localhost:8000';
export default function () {
// Stress test with concurrent emergency alerts
const emergencyPayload = JSON.stringify({
latitude: 40.7128 + (Math.random() - 0.5) * 0.1,
longitude: -74.0060 + (Math.random() - 0.5) * 0.1,
message: `Stress test emergency alert ${Math.random()}`,
alert_type: 'immediate'
});
// Test without authentication (anonymous emergency)
let emergencyResponse = http.post(`${BASE_URL}/api/v1/emergency/anonymous-alert`, emergencyPayload, {
headers: { 'Content-Type': 'application/json' },
});
check(emergencyResponse, {
'emergency alert under stress': (r) => r.status === 201 || r.status === 429, // Accept rate limiting
'response time under stress < 5s': (r) => r.timings.duration < 5000,
}) || errorRate.add(1);
// Rapid location updates (simulating real-time tracking)
const locationPayload = JSON.stringify({
latitude: 40.7128 + (Math.random() - 0.5) * 0.01,
longitude: -74.0060 + (Math.random() - 0.5) * 0.01
});
let locationResponse = http.post(`${BASE_URL}/api/v1/location/anonymous-update`, locationPayload, {
headers: { 'Content-Type': 'application/json' },
});
check(locationResponse, {
'location update under stress': (r) => r.status === 200 || r.status === 429,
'location response time < 2s': (r) => r.timings.duration < 2000,
}) || errorRate.add(1);
// Test system health under stress
if (Math.random() < 0.1) { // 10% of requests check health
let healthResponse = http.get(`${BASE_URL}/health`);
check(healthResponse, {
'health check under stress': (r) => r.status === 200,
'health check fast under stress': (r) => r.timings.duration < 500,
}) || errorRate.add(1);
}
// Minimal sleep for maximum stress
sleep(0.1);
}