Перейти к основному содержимому

Nginx Load Balancing

Настройка балансировки нагрузки с Nginx для высоконагруженных приложений.

Типы балансировки

Round Robin (по умолчанию)

# /etc/nginx/conf.d/load-balancer.conf

upstream backend {
server 192.168.1.10:8080;
server 192.168.1.11:8080;
server 192.168.1.12:8080;
}

server {
listen 80;
server_name example.com;

location / {
proxy_pass http://backend;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}

Weighted Round Robin

upstream backend {
server 192.168.1.10:8080 weight=3; # Получает 3/6 запросов
server 192.168.1.11:8080 weight=2; # Получает 2/6 запросов
server 192.168.1.12:8080 weight=1; # Получает 1/6 запросов
}

Least Connections

upstream backend {
least_conn;
server 192.168.1.10:8080;
server 192.168.1.11:8080;
server 192.168.1.12:8080;
}

IP Hash (sticky sessions)

upstream backend {
ip_hash;
server 192.168.1.10:8080;
server 192.168.1.11:8080;
server 192.168.1.12:8080;
}

Продвинутые настройки upstream

Конфигурация с параметрами сервера

upstream backend {
server 192.168.1.10:8080 weight=3 max_fails=3 fail_timeout=30s;
server 192.168.1.11:8080 weight=2 max_fails=3 fail_timeout=30s;
server 192.168.1.12:8080 weight=1 max_fails=3 fail_timeout=30s backup;
server 192.168.1.13:8080 down; # Временно отключен
}

server {
listen 80;
server_name api.example.com;

location / {
proxy_pass http://backend;

# Настройки прокси
proxy_connect_timeout 30s;
proxy_send_timeout 30s;
proxy_read_timeout 30s;

# Буферизация
proxy_buffering on;
proxy_buffer_size 4k;
proxy_buffers 8 4k;

# Заголовки
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;

# Обработка ошибок
proxy_next_upstream error timeout invalid_header http_500 http_502 http_503;
}
}

Health Check

upstream backend {
server 192.168.1.10:8080 max_fails=2 fail_timeout=10s;
server 192.168.1.11:8080 max_fails=2 fail_timeout=10s;
server 192.168.1.12:8080 max_fails=2 fail_timeout=10s;
}

server {
listen 80;
server_name example.com;

location /health {
access_log off;
return 200 "healthy\n";
add_header Content-Type text/plain;
}

location / {
proxy_pass http://backend;

# Health check
proxy_next_upstream error timeout http_502 http_503 http_504;
proxy_next_upstream_tries 3;
proxy_next_upstream_timeout 10s;
}
}

SSL Load Balancing

HTTPS балансировка

upstream backend_ssl {
server 192.168.1.10:8443;
server 192.168.1.11:8443;
server 192.168.1.12:8443;
}

server {
listen 443 ssl http2;
server_name secure.example.com;

ssl_certificate /etc/letsencrypt/live/secure.example.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/secure.example.com/privkey.pem;

ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256;
ssl_prefer_server_ciphers off;

location / {
proxy_pass https://backend_ssl;

# SSL настройки для upstream
proxy_ssl_verify off;
proxy_ssl_session_reuse on;

proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
}
}

SSL Termination

upstream backend_http {
server 192.168.1.10:8080;
server 192.168.1.11:8080;
server 192.168.1.12:8080;
}

server {
listen 443 ssl http2;
server_name app.example.com;

ssl_certificate /etc/letsencrypt/live/app.example.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/app.example.com/privkey.pem;

location / {
proxy_pass http://backend_http; # HTTP к backend серверам

proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
proxy_set_header X-Forwarded-Port 443;
}
}

# Редирект HTTP на HTTPS
server {
listen 80;
server_name app.example.com;
return 301 https://$server_name$request_uri;
}

Балансировка по типу контента

Разделение статики и API

upstream api_backend {
server 192.168.1.10:8080;
server 192.168.1.11:8080;
}

upstream static_backend {
server 192.168.1.20:80;
server 192.168.1.21:80;
}

server {
listen 80;
server_name myapp.example.com;

# API запросы
location /api/ {
proxy_pass http://api_backend;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}

# Статические файлы
location ~* \.(css|js|png|jpg|jpeg|gif|ico|svg|woff|woff2|ttf|eot)$ {
proxy_pass http://static_backend;
expires 1y;
add_header Cache-Control "public, immutable";
}

# Основное приложение
location / {
proxy_pass http://api_backend;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
}

Маршрутизация по поддоменам

upstream app1_backend {
server 192.168.1.10:8080;
server 192.168.1.11:8080;
}

upstream app2_backend {
server 192.168.1.20:8080;
server 192.168.1.21:8080;
}

# Приложение 1
server {
listen 80;
server_name app1.example.com;

location / {
proxy_pass http://app1_backend;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
}

# Приложение 2
server {
listen 80;
server_name app2.example.com;

location / {
proxy_pass http://app2_backend;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
}

WebSocket балансировка

Настройка для WebSocket

upstream websocket_backend {
server 192.168.1.10:8080;
server 192.168.1.11:8080;
}

map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}

server {
listen 80;
server_name ws.example.com;

location /ws {
proxy_pass http://websocket_backend;

# WebSocket заголовки
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;

# Увеличенные таймауты для WebSocket
proxy_connect_timeout 60s;
proxy_send_timeout 60s;
proxy_read_timeout 60s;
}
}

Мониторинг и логирование

Кастомный формат логов

log_format load_balancer '$remote_addr - $remote_user [$time_local] '
'"$request" $status $body_bytes_sent '
'"$http_referer" "$http_user_agent" '
'upstream_addr=$upstream_addr '
'upstream_status=$upstream_status '
'upstream_response_time=$upstream_response_time '
'request_time=$request_time';

server {
listen 80;
server_name example.com;

access_log /var/log/nginx/load_balancer.log load_balancer;

location / {
proxy_pass http://backend;
# ... остальные настройки
}
}

Статус страница для мониторинга

server {
listen 8080;
server_name localhost;

location /nginx_status {
stub_status on;
access_log off;
allow 127.0.0.1;
allow 192.168.1.0/24;
deny all;
}

location /upstream_status {
# Требует модуль nginx-module-upstream
upstream_show;
access_log off;
allow 127.0.0.1;
allow 192.168.1.0/24;
deny all;
}
}

Отказоустойчивость

Конфигурация с backup серверами

upstream backend {
server 192.168.1.10:8080 max_fails=3 fail_timeout=30s;
server 192.168.1.11:8080 max_fails=3 fail_timeout=30s;
server 192.168.1.12:8080 backup; # Резервный сервер
server 192.168.1.13:8080 backup; # Еще один резервный
}

server {
listen 80;
server_name example.com;

location / {
proxy_pass http://backend;

# Retry механизм
proxy_next_upstream error timeout http_502 http_503 http_504;
proxy_next_upstream_tries 3;
proxy_next_upstream_timeout 30s;

# Кэширование при ошибках
proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504;
}
}

Graceful shutdown

#!/bin/bash
# graceful-shutdown.sh

SERVER_IP="192.168.1.10"
NGINX_CONFIG="/etc/nginx/conf.d/load-balancer.conf"

# Пометить сервер как down
sed -i "s/server $SERVER_IP:8080;/server $SERVER_IP:8080 down;/" $NGINX_CONFIG

# Перезагрузить конфигурацию
nginx -s reload

echo "Сервер $SERVER_IP помечен как down"
echo "Ожидание завершения активных соединений..."
sleep 30

# Здесь можно добавить остановку приложения на сервере
Производительность

Используйте keepalive соединения между Nginx и upstream серверами для уменьшения латентности.

Мониторинг

Обязательно настройте мониторинг состояния upstream серверов и автоматические уведомления при отказах.