Complete Nginx Configuration Guide: Reverse Proxy, Load Balancing & More

Complete Nginx Configuration Guide for DevOps

Introduction

Nginx is a powerful, high-performance web server, reverse proxy, and load balancer that handles millions of requests per second. This comprehensive guide covers everything DevOps engineers need to know about Nginx configuration for modern infrastructure.

Nginx Architecture

Master-Worker Process Model:

Nginx Processes:
├── master process (root)
│   ├── Reads configuration
│   ├── Manages worker processes
│   └── Handles signals
└── worker processes (www-data)
    ├── Handles connections
    ├── Processes requests
    └── Executes modules

# View running processes
ps aux | grep nginx
# Output:
# root       1234  0.0  0.1  12345  6789 ?        Ss   10:00   0:00 nginx: master process
# www-data   1235  0.1  0.3  45678 12345 ?        S    10:00   0:05 nginx: worker process
                

Event-Driven Architecture:

Event Processing Model:
- Asynchronous, non-blocking I/O
- One worker handles multiple connections (epoll/kqueue)
- No thread-per-connection model
- Highly efficient for high concurrency

# Connection handling
worker_connections 1024;  # Connections per worker
worker_processes auto;    # Auto CPU detection
use epoll;                # Linux event method

# Typical hardware scaling:
# 2 CPU cores, 4GB RAM → worker_processes 2;
# 8 CPU cores, 16GB RAM → worker_processes 8;
                

Basic Configuration Structure

Main Configuration File (nginx.conf):

# /etc/nginx/nginx.conf
user www-data;
worker_processes auto;
pid /run/nginx.pid;
include /etc/nginx/modules-enabled/*.conf;

events {
    worker_connections 768;
    # multi_accept on;
    use epoll;
}

http {
    ##
    # Basic Settings
    ##
    sendfile on;
    tcp_nopush on;
    tcp_nodelay on;
    keepalive_timeout 65;
    types_hash_max_size 2048;
    server_tokens off;

    # MIME types
    include /etc/nginx/mime.types;
    default_type application/octet-stream;

    ##
    # SSL Settings
    ##
    ssl_protocols TLSv1.2 TLSv1.3;
    ssl_prefer_server_ciphers on;

    ##
    # Logging Settings
    ##
    access_log /var/log/nginx/access.log;
    error_log /var/log/nginx/error.log;

    ##
    # Gzip Settings
    ##
    gzip on;
    gzip_vary on;
    gzip_proxied any;
    gzip_comp_level 6;
    gzip_types text/plain text/css text/xml text/javascript 
               application/json application/javascript application/xml+rss 
               application/atom+xml image/svg+xml;

    ##
    # Virtual Host Configs
    ##
    include /etc/nginx/conf.d/*.conf;
    include /etc/nginx/sites-enabled/*;
}
                

Virtual Host Configuration:

# /etc/nginx/sites-available/example.com
server {
    listen 80;
    listen [::]:80;
    server_name example.com www.example.com;
    
    root /var/www/example.com/html;
    index index.html index.htm index.nginx-debian.html;

    location / {
        try_files $uri $uri/ =404;
    }

    # Static file cache
    location ~* \.(jpg|jpeg|png|gif|ico|css|js)$ {
        expires 1y;
        add_header Cache-Control "public, immutable";
    }

    # Disable access to hidden files
    location ~ /\. {
        deny all;
    }

    # Custom error pages
    error_page 404 /404.html;
    error_page 500 502 503 504 /50x.html;
}
                

Reverse Proxy Configuration

Basic Reverse Proxy:

server {
    listen 80;
    server_name api.example.com;
    
    location / {
        proxy_pass http://backend-server:8080;
        
        # Essential proxy headers
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_set_header X-Forwarded-Proto $scheme;
        
        # Connection settings
        proxy_connect_timeout 60s;
        proxy_send_timeout 60s;
        proxy_read_timeout 60s;
        
        # Buffer settings
        proxy_buffering on;
        proxy_buffer_size 4k;
        proxy_buffers 8 4k;
        proxy_busy_buffers_size 8k;
        
        # Disable buffering for streaming
        proxy_buffering off;
    }
}
                

Advanced Reverse Proxy with Microservices:

# API Gateway Pattern
upstream auth_service {
    server auth1:3001;
    server auth2:3001;
}

upstream user_service {
    server users1:3002;
    server users2:3002;
}

upstream order_service {
    orders:3003;
}

server {
    listen 80;
    server_name api.example.com;
    
    # Authentication service
    location /api/v1/auth {
        proxy_pass http://auth_service;
        include proxy_params;
        
        # Rate limiting
        limit_req zone=auth burst=20 nodelay;
        
        # CORS headers
        add_header 'Access-Control-Allow-Origin' '*' always;
        add_header 'Access-Control-Allow-Methods' 'GET, POST, PUT, DELETE, OPTIONS' always;
    }
    
    # User service
    location /api/v1/users {
        proxy_pass http://user_service;
        include proxy_params;
        
        # JWT validation
        auth_request /validate-jwt;
        auth_request_set $auth_status $upstream_status;
    }
    
    # Order service
    location /api/v1/orders {
        proxy_pass http://order_service;
        include proxy_params;
        
        # WebSocket support
        proxy_http_version 1.1;
        proxy_set_header Upgrade $http_upgrade;
        proxy_set_header Connection "upgrade";
    }
    
    # JWT validation endpoint
    location = /validate-jwt {
        internal;
        proxy_pass http://auth_service/validate;
        proxy_pass_request_body off;
        proxy_set_header Content-Length "";
        proxy_set_header X-Original-URI $request_uri;
    }
    
    # Health checks
    location /health {
        access_log off;
        return 200 "healthy\n";
        add_header Content-Type text/plain;
    }
}
                

WebSocket Proxy Configuration:

server {
    listen 80;
    server_name ws.example.com;
    
    location / {
        proxy_pass http://websocket-backend;
        
        proxy_http_version 1.1;
        proxy_set_header Upgrade $http_upgrade;
        proxy_set_header Connection "upgrade";
        proxy_set_header Host $host;
        
        # WebSocket specific timeouts
        proxy_read_timeout 86400s;
        proxy_send_timeout 86400s;
        
        # Enable keepalive
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_set_header X-Forwarded-Proto $scheme;
        
        # Buffer settings for WebSocket
        proxy_buffering off;
    }
}
                

Load Balancing Configuration

Load Balancing Methods:

# Round Robin (default)
upstream backend {
    server backend1.example.com;
    server backend2.example.com;
    server backend3.example.com;
}

# Least Connections
upstream backend {
    least_conn;
    server backend1.example.com;
    server backend2.example.com;
}

# IP Hash (Session Persistence)
upstream backend {
    ip_hash;
    server backend1.example.com;
    server backend2.example.com;
}

# Generic Hash
upstream backend {
    hash $request_uri consistent;
    server backend1.example.com;
    server backend2.example.com;
}

# Weighted Load Balancing
upstream backend {
    server backend1.example.com weight=3;
    server backend2.example.com weight=2;
    server backend3.example.com weight=1;
}
                

Advanced Load Balancing with Health Checks:

upstream app_servers {
    zone backend 64k;
    least_conn;
    
    server 10.0.1.10:8080 max_fails=3 fail_timeout=30s;
    server 10.0.1.11:8080 max_fails=3 fail_timeout=30s;
    server 10.0.1.12:8080 max_fails=3 fail_timeout=30s;
    
    # Backup servers
    server 10.0.2.10:8080 backup;
    server 10.0.2.11:8080 backup;
    
    # Keepalive connections to upstream
    keepalive 32;
}

server {
    listen 80;
    server_name app.example.com;
    
    location / {
        proxy_pass http://app_servers;
        
        # Health check endpoint
        health_check uri=/health interval=5s fails=3 passes=2;
        
        # Connection reuse
        proxy_http_version 1.1;
        proxy_set_header Connection "";
        
        # Timeouts
        proxy_connect_timeout 5s;
        proxy_send_timeout 10s;
        proxy_read_timeout 30s;
        
        # Retry logic
        proxy_next_upstream error timeout http_500 http_502 http_503 http_504;
        proxy_next_upstream_tries 3;
        proxy_next_upstream_timeout 30s;
    }
    
    # Status page for load balancer
    location /nginx_status {
        stub_status on;
        access_log off;
        allow 10.0.0.0/8;
        deny all;
    }
}
                

Load Balancing with Docker:

# Dynamic upstream with Docker API
upstream docker_backend {
    zone docker_zone 64k;
    
    # Template will be filled by Docker DNS
    server docker-backend:8080 resolve;
}

server {
    listen 80;
    server_name docker.example.com;
    
    # Resolver for Docker DNS
    resolver 127.0.0.11 valid=30s;  # Docker's embedded DNS
    
    location / {
        set $upstream docker_backend;
        proxy_pass http://$upstream;
        
        # Dynamic DNS resolution
        proxy_pass http://docker-backend:8080;
        
        # Headers for Docker
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_set_header X-Forwarded-Proto $scheme;
    }
}

# Using nginx-proxy (Docker container)
docker run -d -p 80:80 -v /var/run/docker.sock:/tmp/docker.sock:ro \
    -v /etc/nginx/conf.d \
    --name nginx-proxy \
    jwilder/nginx-proxy
                

SSL/TLS Configuration

Basic SSL Configuration:

server {
    listen 443 ssl http2;
    listen [::]:443 ssl http2;
    server_name example.com www.example.com;
    
    # SSL certificate paths
    ssl_certificate /etc/ssl/certs/example.com.crt;
    ssl_certificate_key /etc/ssl/private/example.com.key;
    
    # SSL configuration
    ssl_protocols TLSv1.2 TLSv1.3;
    ssl_ciphers ECDHE-RSA-AES256-GCM-SHA512:DHE-RSA-AES256-GCM-SHA512;
    ssl_prefer_server_ciphers off;
    
    # SSL session cache
    ssl_session_cache shared:SSL:10m;
    ssl_session_timeout 10m;
    ssl_session_tickets off;
    
    # OCSP stapling
    ssl_stapling on;
    ssl_stapling_verify on;
    ssl_trusted_certificate /etc/ssl/certs/ca-certificates.crt;
    
    # HSTS
    add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
    
    # DH parameters (generate: openssl dhparam -out /etc/nginx/dhparam.pem 2048)
    ssl_dhparam /etc/nginx/dhparam.pem;
    
    # Enable SSL for specific URIs only
    location /secure {
        # ... configuration ...
    }
}
                

Auto SSL with Let's Encrypt:

# Main server block for ACME challenge
server {
    listen 80;
    server_name example.com www.example.com;
    
    location /.well-known/acme-challenge/ {
        root /var/www/letsencrypt;
    }
    
    # Redirect all HTTP to HTTPS
    location / {
        return 301 https://$server_name$request_uri;
    }
}

# HTTPS server block
server {
    listen 443 ssl http2;
    server_name example.com www.example.com;
    
    # Let's Encrypt certificates
    ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem;
    ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem;
    
    # Include SSL configuration
    include /etc/nginx/ssl.conf;
    
    # Rest of configuration...
}

# SSL configuration file (ssl.conf)
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305';
ssl_prefer_server_ciphers off;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
ssl_session_tickets off;

# Auto-renewal script
#!/bin/bash
certbot renew --quiet --post-hook "systemctl reload nginx"
                

SSL Termination for Backend:

# SSL termination at Nginx, HTTP to backend
server {
    listen 443 ssl;
    server_name app.example.com;
    
    ssl_certificate /path/to/cert.pem;
    ssl_certificate_key /path/to/key.pem;
    
    location / {
        proxy_pass http://backend-server:8080;
        proxy_set_header X-Forwarded-Proto https;
        
        # SSL client certificate verification (mutual TLS)
        proxy_set_header X-SSL-Client-Cert $ssl_client_cert;
        proxy_set_header X-SSL-Client-Verify $ssl_client_verify;
        proxy_set_header X-SSL-Client-S-DN $ssl_client_s_dn;
    }
}

# SSL passthrough (TCP load balancing)
stream {
    upstream ssl_backend {
        server backend1:443;
        server backend2:443;
    }
    
    server {
        listen 443;
        proxy_pass ssl_backend;
        proxy_ssl on;
        proxy_ssl_verify on;
        proxy_ssl_trusted_certificate /etc/nginx/ssl/trusted_ca.crt;
    }
}
                

Caching Strategies

Proxy Cache Configuration:

# Define cache path and parameters
http {
    proxy_cache_path /var/cache/nginx levels=1:2 keys_zone=my_cache:10m 
                     max_size=10g inactive=60m use_temp_path=off;
    
    # Cache bypass conditions
    map $request_method $skip_cache {
        default     0;
        POST        1;
        PUT         1;
        DELETE      1;
        PATCH       1;
    }
    
    server {
        location / {
            proxy_pass http://backend;
            
            # Cache configuration
            proxy_cache my_cache;
            proxy_cache_key "$scheme$request_method$host$request_uri$is_args$args";
            proxy_cache_valid 200 302 10m;
            proxy_cache_valid 404 1m;
            proxy_cache_valid any 5m;
            
            # Bypass cache
            proxy_cache_bypass $skip_cache;
            proxy_no_cache $skip_cache;
            
            # Cache locks for stampede protection
            proxy_cache_lock on;
            proxy_cache_lock_timeout 5s;
            
            # Cache revalidation
            proxy_cache_revalidate on;
            proxy_cache_use_stale error timeout updating 
                                   http_500 http_502 http_503 http_504;
            
            # Add cache status header
            add_header X-Cache-Status $upstream_cache_status;
            
            # Cache purge endpoint (secured)
            location ~ /purge(/.*) {
                allow 10.0.0.0/8;
                deny all;
                proxy_cache_purge my_cache "$scheme$request_method$host$1$is_args$args";
            }
        }
    }
}
                

Microservices Cache Strategy:

# Different cache policies per service
map $uri $cache_zone {
    ~^/api/v1/products   product_cache;
    ~^/api/v1/users      user_cache;
    ~^/api/v1/orders     order_cache;
    default              no_cache;
}

map $uri $cache_time {
    ~^/api/v1/products   30m;
    ~^/api/v1/users      5m;
    ~^/api/v1/orders     1m;
    default              0;
}

upstream api_gateway {
    server api-gateway:8080;
}

server {
    location /api/v1/ {
        proxy_pass http://api_gateway;
        
        # Dynamic cache selection
        proxy_cache $cache_zone;
        proxy_cache_valid 200 $cache_time;
        
        # Vary cache by Authorization header
        proxy_cache_key "$scheme$request_method$host$request_uri$http_authorization";
        
        # Cache conditions
        proxy_cache_methods GET HEAD;
        proxy_cache_min_uses 2;
        
        # Edge side includes (ESI)
        ssi on;
        proxy_set_header Accept-Encoding "";
    }
}
                

Security Hardening

Basic Security Configuration:

# /etc/nginx/nginx.conf security settings
server_tokens off;  # Hide Nginx version
more_clear_headers Server;  # Remove Server header entirely

# Security headers
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-XSS-Protection "1; mode=block" always;
add_header Referrer-Policy "strict-origin-when-cross-origin" always;
add_header Content-Security-Policy "default-src 'self';" always;

# Request limiting
limit_req_zone $binary_remote_addr zone=one:10m rate=10r/s;
limit_conn_zone $binary_remote_addr zone=addr:10m;

# File upload limits
client_max_body_size 10m;
client_body_buffer_size 128k;
client_body_timeout 60s;

# Hide PHP files
location ~ \.php$ {
    deny all;
}

# Protect sensitive files
location ~ /\.(ht|git|svn) {
    deny all;
}
                

Rate Limiting and DDoS Protection:

# Rate limiting zones
limit_req_zone $binary_remote_addr zone=login:10m rate=5r/m;
limit_req_zone $binary_remote_addr zone=api:10m rate=100r/s;
limit_req_zone $binary_remote_addr zone=static:10m rate=500r/s;

# GeoIP blocking
geo $blocked_country {
    default 0;
    # Block specific countries
    CN 1;
    RU 1;
    KP 1;
}

# Map for blocking
map $blocked_country $block_access {
    0 "";
    1 "Blocked";
}

server {
    listen 80;
    
    # Country blocking
    if ($block_access = "Blocked") {
        return 403;
    }
    
    # Login page rate limiting
    location /login {
        limit_req zone=login burst=3 nodelay;
        limit_req_status 429;
        
        # Additional security
        limit_conn addr 10;
        
        proxy_pass http://backend;
    }
    
    # API rate limiting
    location /api/ {
        limit_req zone=api burst=50 nodelay;
        limit_req_status 429;
        
        # Per IP connection limit
        limit_conn addr 100;
        
        proxy_pass http://backend;
    }
    
    # Static files - higher limits
    location /static/ {
        limit_req zone=static burst=200 nodelay;
        
        # Cache static files
        expires 1y;
        add_header Cache-Control "public, immutable";
    }
    
    # Bot protection
    location / {
        # Block bad user agents
        if ($http_user_agent ~* (wget|curl|libwww-perl|python|nikto|wikto|scan|java|winhttp|HTTrack|clshttp|archiver|loader|email|harvest|extract|grab|miner|sqlmap)) {
            return 403;
        }
        
        proxy_pass http://backend;
    }
}
                

WAF-like Protection with Nginx:

# SQL injection protection
set $block_sql_injection 0;
if ($query_string ~ "union.*select.*\(") {
    set $block_sql_injection 1;
}
if ($query_string ~ "concat.*\(") {
    set $block_sql_injection 1;
}

# XSS protection
set $block_xss 0;
if ($query_string ~ "") {
    set $block_xss 1;
}

# Directory traversal
set $block_traversal 0;
if ($query_string ~ "\.\./") {
    set $block_traversal 1;
}

# Apply blocking
if ($block_sql_injection = 1) {
    return 403;
}
if ($block_xss = 1) {
    return 403;
}
if ($block_traversal = 1) {
    return 403;
}

# Additional security checks
location ~* \.(php|asp|aspx|jsp|pl)$ {
    deny all;
}

# Block suspicious request methods
if ($request_method !~ ^(GET|HEAD|POST)$) {
    return 405;
}

# Block large request body sizes
client_max_body_size 10M;
client_body_buffer_size 128k;

# Timeouts to prevent slowloris attacks
client_body_timeout 10s;
client_header_timeout 10s;
send_timeout 10s;
keepalive_timeout 5s 5s;
                

Performance Optimization

Connection Optimization:

# http context optimizations
http {
    # Connection handling
    sendfile on;
    tcp_nopush on;
    tcp_nodelay on;
    
    # Keepalive settings
    keepalive_timeout 30s;
    keepalive_requests 100;
    
    # Buffer optimizations
    client_body_buffer_size 16k;
    client_header_buffer_size 1k;
    large_client_header_buffers 4 8k;
    output_buffers 4 32k;
    postpone_output 1460;
    
    # Timeouts
    client_body_timeout 12s;
    client_header_timeout 12s;
    send_timeout 10s;
    
    # File descriptors
    worker_rlimit_nofile 65535;
    
    # Gzip compression
    gzip on;
    gzip_vary on;
    gzip_proxied any;
    gzip_comp_level 6;
    gzip_min_length 256;
    gzip_types
        application/atom+xml
        application/javascript
        application/json
        application/ld+json
        application/manifest+json
        application/rss+xml
        application/vnd.geo+json
        application/vnd.ms-fontobject
        application/x-font-ttf
        application/x-web-app-manifest+json
        application/xhtml+xml
        application/xml
        font/opentype
        image/bmp
        image/svg+xml
        image/x-icon
        text/cache-manifest
        text/css
        text/plain
        text/vcard
        text/vnd.rim.location.xloc
        text/vtt
        text/x-component
        text/x-cross-domain-policy;
    
    # Brotli compression (if module installed)
    brotli on;
    brotli_comp_level 6;
    brotli_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
}
                

Static File Optimization:

server {
    location ~* \.(jpg|jpeg|png|gif|ico|cur|gz|svg|svgz|mp4|ogg|ogv|webm|htc)$ {
        expires 1y;
        add_header Cache-Control "public, immutable";
        
        # Enable gzip for SVG
        gzip_static on;
        
        # No access log for static files
        access_log off;
        
        # Try to serve compressed version first
        gzip_vary on;
    }
    
    location ~* \.(css|js)$ {
        expires 1y;
        add_header Cache-Control "public, immutable";
        
        # Brotli compression
        brotli_static on;
        
        # Source maps
        location ~* \.js\.map$ {
            expires 1y;
            add_header Cache-Control "public";
        }
    }
    
    # Web font optimization
    location ~* \.(woff|woff2|ttf|eot)$ {
        expires 1y;
        add_header Cache-Control "public, immutable";
        add_header Access-Control-Allow-Origin "*";
    }
    
    # Video streaming optimization
    location ~* \.(mp4|webm)$ {
        mp4;
        mp4_buffer_size 1M;
        mp4_max_buffer_size 5M;
        
        # Range requests for video streaming
        add_header Accept-Ranges bytes;
    }
}
                

Docker Configuration

Nginx Dockerfile:

# Multi-stage build for optimized Nginx
FROM nginx:1.23-alpine AS builder

# Install build dependencies
RUN apk add --no-cache --virtual .build-deps \
    gcc \
    libc-dev \
    make \
    openssl-dev \
    pcre-dev \
    zlib-dev \
    linux-headers \
    curl \
    gnupg \
    libxslt-dev \
    gd-dev \
    geoip-dev

# Download and compile modules
RUN curl -fSL https://nginx.org/download/nginx-1.23.3.tar.gz -o nginx.tar.gz \
    && mkdir -p /usr/src/nginx \
    && tar -xzf nginx.tar.gz -C /usr/src/nginx --strip-components=1 \
    && rm nginx.tar.gz

# Compile with additional modules
WORKDIR /usr/src/nginx
RUN ./configure \
    --prefix=/etc/nginx \
    --sbin-path=/usr/sbin/nginx \
    --modules-path=/usr/lib/nginx/modules \
    --conf-path=/etc/nginx/nginx.conf \
    --error-log-path=/var/log/nginx/error.log \
    --http-log-path=/var/log/nginx/access.log \
    --pid-path=/var/run/nginx.pid \
    --lock-path=/var/run/nginx.lock \
    --http-client-body-temp-path=/var/cache/nginx/client_temp \
    --http-proxy-temp-path=/var/cache/nginx/proxy_temp \
    --http-fastcgi-temp-path=/var/cache/nginx/fastcgi_temp \
    --http-uwsgi-temp-path=/var/cache/nginx/uwsgi_temp \
    --http-scgi-temp-path=/var/cache/nginx/scgi_temp \
    --with-compat \
    --with-http_ssl_module \
    --with-http_realip_module \
    --with-http_addition_module \
    --with-http_sub_module \
    --with-http_dav_module \
    --with-http_flv_module \
    --with-http_mp4_module \
    --with-http_gunzip_module \
    --with-http_gzip_static_module \
    --with-http_random_index_module \
    --with-http_secure_link_module \
    --with-http_stub_status_module \
    --with-http_auth_request_module \
    --with-http_xslt_module \
    --with-http_image_filter_module \
    --with-http_geoip_module \
    --with-http_perl_module \
    --with-threads \
    --with-stream \
    --with-stream_ssl_module \
    --with-stream_realip_module \
    --with-stream_geoip_module \
    --with-http_slice_module \
    --with-mail \
    --with-mail_ssl_module \
    --with-file-aio \
    --with-http_v2_module \
    --with-http_v3_module \
    && make -j$(nproc) \
    && make install

FROM alpine:3.17

# Install runtime dependencies
RUN apk add --no-cache \
    ca-certificates \
    openssl \
    pcre \
    zlib \
    geoip

# Copy compiled nginx
COPY --from=builder /usr/sbin/nginx /usr/sbin/nginx
COPY --from=builder /etc/nginx /etc/nginx
COPY --from=builder /usr/lib/nginx /usr/lib/nginx

# Create nginx user
RUN addgroup -S nginx && adduser -S -G nginx nginx

# Create necessary directories
RUN mkdir -p /var/cache/nginx /var/log/nginx \
    && chown -R nginx:nginx /var/cache/nginx /var/log/nginx \
    && chmod -R 755 /var/cache/nginx /var/log/nginx

# Copy configuration
COPY nginx.conf /etc/nginx/nginx.conf
COPY conf.d/ /etc/nginx/conf.d/

# Health check
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
    CMD wget -q -O- http://localhost:8080/health || exit 1

EXPOSE 80 443

STOPSIGNAL SIGQUIT

USER nginx

CMD ["nginx", "-g", "daemon off;"]
                

Docker Compose Configuration:

version: '3.8'

services:
  nginx:
    build: .
    container_name: nginx-proxy
    ports:
      - "80:80"
      - "443:443"
    volumes:
      - ./sites-enabled:/etc/nginx/sites-enabled
      - ./ssl:/etc/nginx/ssl
      - ./logs:/var/log/nginx
      - ./html:/usr/share/nginx/html
    environment:
      - NGINX_HOST=example.com
      - NGINX_PORT=80
      - TZ=UTC
    networks:
      - frontend
      - backend
    depends_on:
      - app1
      - app2
    restart: unless-stopped
    deploy:
      resources:
        limits:
          memory: 512M
        reservations:
          memory: 256M

  app1:
    image: node:18-alpine
    container_name: app1
    environment:
      - NODE_ENV=production
    volumes:
      - ./app:/app
    working_dir: /app
    command: node server.js
    networks:
      - backend
    expose:
      - "3000"

  app2:
    image: node:18-alpine
    container_name: app2
    environment:
      - NODE_ENV=production
    volumes:
      - ./app:/app
    working_dir: /app
    command: node server.js
    networks:
      - backend
    expose:
      - "3000"

networks:
  frontend:
    driver: bridge
  backend:
    driver: bridge
    internal: true  # Backend network not accessible from host
                

Kubernetes Ingress Configuration

Nginx Ingress Controller:

# Nginx Ingress Controller Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-ingress-controller
  namespace: ingress-nginx
spec:
  replicas: 3
  selector:
    matchLabels:
      app: nginx-ingress
  template:
    metadata:
      labels:
        app: nginx-ingress
      annotations:
        prometheus.io/scrape: "true"
        prometheus.io/port: "10254"
    spec:
      serviceAccountName: nginx-ingress-serviceaccount
      containers:
      - name: nginx-ingress-controller
        image: k8s.gcr.io/ingress-nginx/controller:v1.5.1
        args:
          - /nginx-ingress-controller
          - --configmap=$(POD_NAMESPACE)/nginx-configuration
          - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
          - --udp-services-configmap=$(POD_NAMESPACE)/udp-services
          - --publish-service=$(POD_NAMESPACE)/ingress-nginx
          - --annotations-prefix=nginx.ingress.kubernetes.io
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        ports:
        - name: http
          containerPort: 80
        - name: https
          containerPort: 443
        - name: metrics
          containerPort: 10254
        livenessProbe:
          httpGet:
            path: /healthz
            port: 10254
            scheme: HTTP
        readinessProbe:
          httpGet:
            path: /healthz
            port: 10254
            scheme: HTTP
        resources:
          requests:
            cpu: 100m
            memory: 90Mi
---
apiVersion: v1
kind: Service
metadata:
  name: ingress-nginx
  namespace: ingress-nginx
spec:
  type: LoadBalancer
  externalTrafficPolicy: Local
  ports:
  - name: http
    port: 80
    targetPort: http
    protocol: TCP
  - name: https
    port: 443
    targetPort: https
    protocol: TCP
  selector:
    app: nginx-ingress
                

Ingress Resource Examples:

# Basic Ingress
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: basic-ingress
  annotations:
    nginx.ingress.kubernetes.io/rewrite-target: /
spec:
  rules:
  - host: myapp.example.com
    http:
      paths:
      - path: /
        pathType: Prefix
        backend:
          service:
            name: myapp-service
            port:
              number: 80

# TLS Ingress
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: tls-ingress
  annotations:
    nginx.ingress.kubernetes.io/ssl-redirect: "true"
spec:
  tls:
  - hosts:
    - myapp.example.com
    secretName: tls-secret
  rules:
  - host: myapp.example.com
    http:
      paths:
      - path: /
        pathType: Prefix
        backend:
          service:
            name: myapp-service
            port:
              number: 80

# Path-based Routing
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: path-ingress
  annotations:
    nginx.ingress.kubernetes.io/rewrite-target: /$2
spec:
  rules:
  - host: myapp.example.com
    http:
      paths:
      - path: /api(/|$)(.*)
        pathType: Prefix
        backend:
          service:
            name: api-service
            port:
              number: 8080
      - path: /admin(/|$)(.*)
        pathType: Prefix
        backend:
          service:
            name: admin-service
            port:
              number: 8081
      - path: /
        pathType: Prefix
        backend:
          service:
            name: web-service
            port:
              number: 80

# Rate Limiting Ingress
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: rate-limited-ingress
  annotations:
    nginx.ingress.kubernetes.io/limit-rps: "100"
    nginx.ingress.kubernetes.io/limit-burst: "200"
    nginx.ingress.kubernetes.io/limit-whitelist: "10.0.0.0/8"
spec:
  rules:
  - host: api.example.com
    http:
      paths:
      - path: /
        pathType: Prefix
        backend:
          service:
            name: api-service
            port:
              number: 8080
                

Monitoring & Logging

Stub Status Module:

# Enable status page
server {
    location /nginx_status {
        stub_status on;
        access_log off;
        allow 127.0.0.1;
        allow 10.0.0.0/8;
        deny all;
    }
}

# Sample output:
# Active connections: 291
# server accepts handled requests
#  16630948 16630948 31070465
# Reading: 6 Writing: 179 Waiting: 106

# Prometheus metrics with nginx-exporter
location /metrics {
    stub_status on;
    access_log off;
    allow 127.0.0.1;
    deny all;
}

# Custom log formats for analytics
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
                '$status $body_bytes_sent "$http_referer" '
                '"$http_user_agent" "$http_x_forwarded_for" '
                '$request_time $upstream_response_time '
                '$upstream_addr $upstream_status';

log_format json_analytics escape=json
    '{'
    '"time_local":"$time_local",'
    '"remote_addr":"$remote_addr",'
    '"remote_user":"$remote_user",'
    '"request":"$request",'
    '"status":$status,'
    '"body_bytes_sent":$body_bytes_sent,'
    '"request_time":$request_time,'
    '"http_referer":"$http_referer",'
    '"http_user_agent":"$http_user_agent",'
    '"http_x_forwarded_for":"$http_x_forwarded_for"'
    '}';

# Access log with JSON format
access_log /var/log/nginx/access.log json_analytics;
                

Real-time Monitoring Script:

#!/bin/bash
# nginx-monitor.sh

INTERVAL=5  # seconds
STATUS_URL="http://localhost/nginx_status"

monitor_nginx() {
    while true; do
        clear
        echo "=== Nginx Monitoring Dashboard ==="
        echo "Time: $(date)"
        echo
        
        # Get status
        STATUS=$(curl -s $STATUS_URL)
        
        # Parse status
        ACTIVE_CONNECTIONS=$(echo "$STATUS" | awk 'NR==1 {print $3}')
        ACCEPTS=$(echo "$STATUS" | awk 'NR==3 {print $1}')
        HANDLED=$(echo "$STATUS" | awk 'NR==3 {print $2}')
        REQUESTS=$(echo "$STATUS" | awk 'NR==3 {print $3}')
        READING=$(echo "$STATUS" | awk 'NR==4 {print $2}')
        WRITING=$(echo "$STATUS" | awk 'NR==4 {print $4}')
        WAITING=$(echo "$STATUS" | awk 'NR==4 {print $6}')
        
        # Calculate request rate
        if [ -n "$LAST_REQUESTS" ]; then
            REQ_DIFF=$((REQUESTS - LAST_REQUESTS))
            REQ_PER_SEC=$((REQ_DIFF / INTERVAL))
        fi
        LAST_REQUESTS=$REQUESTS
        
        # Display metrics
        echo "Active Connections: $ACTIVE_CONNECTIONS"
        echo "Requests/sec: $REQ_PER_SEC"
        echo "Reading: $READING  Writing: $WRITING  Waiting: $WAITING"
        echo "Total Requests: $REQUESTS"
        echo
        
        # Check error log
        ERRORS=$(tail -5 /var/log/nginx/error.log 2>/dev/null | wc -l)
        if [ "$ERRORS" -gt 0 ]; then
            echo "Recent Errors:"
            tail -3 /var/log/nginx/error.log
        fi
        
        sleep $INTERVAL
    done
}

# Start monitoring
monitor_nginx
                

Real-World Examples

Microservices API Gateway:

# API Gateway configuration
upstream auth_service {
    server auth-service:3001;
}

upstream user_service {
    server user-service:3002;
}

upstream product_service {
    server product-service:3003;
}

upstream order_service {
    server order-service:3004;
}

server {
    listen 80;
    server_name api.company.com;
    
    # CORS headers
    add_header 'Access-Control-Allow-Origin' '*' always;
    add_header 'Access-Control-Allow-Methods' 'GET, POST, PUT, DELETE, OPTIONS' always;
    add_header 'Access-Control-Allow-Headers' 'Authorization, Content-Type' always;
    add_header 'Access-Control-Expose-Headers' 'X-Total-Count' always;
    
    # Authentication endpoint
    location /auth/ {
        proxy_pass http://auth_service/;
        
        # Rate limiting for auth endpoints
        limit_req zone=auth burst=5 nodelay;
        
        # Request logging
        access_log /var/log/nginx/auth_access.log json_analytics;
    }
    
    # User management
    location /users/ {
        # JWT validation
        auth_request /validate;
        auth_request_set $auth_status $upstream_status;
        
        proxy_pass http://user_service/;
        proxy_set_header X-User-ID $auth_user_id;
    }
    
    # Products
    location /products/ {
        proxy_pass http://product_service/;
        
        # Cache products for 5 minutes
        proxy_cache product_cache;
        proxy_cache_valid 200 5m;
        
        # Cache key includes query params
        proxy_cache_key "$scheme$request_method$host$request_uri";
    }
    
    # Orders
    location /orders/ {
        auth_request /validate;
        
        proxy_pass http://order_service/;
        
        # WebSocket support for real-time updates
        proxy_http_version 1.1;
        proxy_set_header Upgrade $http_upgrade;
        proxy_set_header Connection "upgrade";
    }
    
    # JWT validation endpoint (internal)
    location = /validate {
        internal;
        proxy_pass http://auth_service/validate;
        proxy_pass_request_body off;
        proxy_set_header Content-Length "";
        proxy_set_header X-Original-URI $request_uri;
    }
    
    # Health check endpoint
    location /health {
        access_log off;
        
        # Check upstream services
        proxy_pass http://auth_service/health;
        health_check uri=/health match=status_ok;
        
        add_header Content-Type application/json;
        return 200 '{"status": "healthy", "timestamp": "$time_local"}';
    }
    
    # API documentation
    location /docs {
        proxy_pass http://docs-service:8080;
    }
}

# Rate limiting zones
limit_req_zone $binary_remote_addr zone=auth:10m rate=10r/m;
limit_req_zone $binary_remote_addr zone=api:10m rate=100r/s;
                

Production-Ready Configuration Template:

# /etc/nginx/nginx.conf
user nginx;
worker_processes auto;
worker_rlimit_nofile 65535;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;

events {
    worker_connections 4096;
    multi_accept on;
    use epoll;
}

http {
    include /etc/nginx/mime.types;
    default_type application/octet-stream;
    
    # Logging
    log_format main '$remote_addr - $remote_user [$time_local] "$request" '
                    '$status $body_bytes_sent "$http_referer" '
                    '"$http_user_agent" "$http_x_forwarded_for"';
    
    access_log /var/log/nginx/access.log main buffer=32k flush=5s;
    
    # Performance
    sendfile on;
    tcp_nopush on;
    tcp_nodelay on;
    keepalive_timeout 65;
    keepalive_requests 1000;
    types_hash_max_size 2048;
    server_tokens off;
    client_max_body_size 100M;
    
    # SSL
    ssl_protocols TLSv1.2 TLSv1.3;
    ssl_ciphers ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384;
    ssl_prefer_server_ciphers off;
    ssl_session_cache shared:SSL:10m;
    ssl_session_timeout 10m;
    ssl_session_tickets off;
    
    # Gzip
    gzip on;
    gzip_vary on;
    gzip_proxied any;
    gzip_comp_level 6;
    gzip_min_length 256;
    gzip_types text/plain text/css text/xml text/javascript 
               application/json application/javascript application/xml+rss 
               application/atom+xml;
    
    # Rate limiting
    limit_req_zone $binary_remote_addr zone=one:10m rate=10r/s;
    limit_req_zone $binary_remote_addr zone=api:10m rate=100r/s;
    
    # Cache
    proxy_cache_path /var/cache/nginx levels=1:2 keys_zone=my_cache:10m 
                     max_size=10g inactive=60m use_temp_path=off;
    
    # Include configurations
    include /etc/nginx/conf.d/*.conf;
    include /etc/nginx/sites-enabled/*;
}

# TCP/UDP load balancing (stream context)
stream {
    upstream backend {
        server backend1.example.com:443;
        server backend2.example.com:443;
    }
    
    server {
        listen 443;
        proxy_pass backend;
        proxy_ssl on;
    }
}
                

Essential Nginx Commands

# Test configuration
nginx -t
nginx -T  # Test and show full config

# Reload configuration
nginx -s reload
systemctl reload nginx

# Stop Nginx
nginx -s stop
systemctl stop nginx

# Start Nginx
systemctl start nginx

# Check version and compile options
nginx -V

# Check running processes
ps aux | grep nginx

# Monitor access logs in real-time
tail -f /var/log/nginx/access.log

# Check error logs
tail -f /var/log/nginx/error.log

# Test upstream servers
curl -I http://upstream-server:port

# Check SSL certificate
openssl s_client -connect example.com:443 -servername example.com
        

Additional Resources

© 2025 Complete Nginx Configuration Guide. All rights reserved.

Comments

Popular posts from this blog

Real-world Terraform scenarios to test and improve your Infrastructure as Code skills

Azure Kubernetes Service (AKS) Complete Guide

Automate Your DevOps Documentation: `iac-to-docs` Lands on PyPI with AI Power