Nginx configuration details

1. Basic Nginx operations

# / usr/local/webserver/nginx for installation location
# View version
/usr/local/webserver/nginx/sbin/nginx -v
# create user WWW for Nginx:
/usr/sbin/groupadd www 
/usr/sbin/useradd -g www www
# Other operations
/usr/local/webserver/nginx/sbin/nginx -s reload            Reload the configuration file
/usr/local/webserver/nginx/sbin/nginx -s reopen            # restart Nginx
/usr/local/webserver/nginx/sbin/nginx -s stop              # stop Nginx
Copy the code

2. Nginx configuration file

1. Global block 2. HTTP block 3

# 1. Global blocks
#user nobody;
user www www;        # the user
worker_processes  2; Set the value to be consistent with the number of CPU cores

#error_log logs/error.log;
error_log /usr/local/webserver/nginx/logs/nginx_error.log crit; # Log location and log level
#error_log logs/error.log notice;
#error_log logs/error.log info;

#pid logs/nginx.pid;
pid /usr/local/webserver/nginx/nginx.pid;
#Specifies the value for maximum file descriptors that can be opened by this process.
worker_rlimit_nofile 65535;

# 2. The events block mainly affects Nginx's network connection to the user
events {
    use epoll;
    worker_connections  1024;  # Supports frequent configuration of the maximum number of connections
}

# 3. HTTP block
http {
	# 3.1 HTTP global block minme.type, log custom, timeout
    include       mime.types;
    default_type  application/octet-stream;
    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" $http_x_forwarded_for';
    #log_format main '$remote_addr - $remote_user [$time_local] "$request" '
    # '$status $body_bytes_sent "$http_referer" '
    # '"$http_user_agent" "$http_x_forwarded_for"';

    #access_log logs/access.log main;
    server_names_hash_bucket_size 128;
    client_header_buffer_size 32k;
    large_client_header_buffers 4 32k;
    client_max_body_size 8m;

    sendfile        on;
    #tcp_nopush on;
    tcp_nopush on;
    keepalive_timeout 60;
    tcp_nodelay on;
    fastcgi_connect_timeout 300;
    fastcgi_send_timeout 300;
    fastcgi_read_timeout 300;
    fastcgi_buffer_size 64k;
    fastcgi_buffers 4 64k;
    fastcgi_busy_buffers_size 128k;
    fastcgi_temp_file_write_size 128k;
    gzip on;
    gzip_min_length 1k;
    gzip_buffers 4 16k;
    gzip_http_version 1.0;
    gzip_comp_level 2;
    gzip_types text/plain application/x-javascript text/css application/xml;
    gzip_vary on;

    #keepalive_timeout 0;
    #keepalive_timeout 65;

    #gzip on;
    #limit_zone crawler $binary_remote_addr 10m;
    
    # 3.2 HTTP server block
    The following is the configuration of the server virtual host
    server {
        listen       9090; # Listening port
        server_name  localhost; # the domain name

        index index.html index.htm index.php index.jsp; 
        root /usr/local/webserver/nginx/html; # Site directory

        #charset koi8-r;

        #access_log logs/host.access.log main;

        location / {
            root   html;
            index  index.html index.htm;
        }

        #error_page 404 /404.html;

        # redirect server error pages to the static page /50x.html
        #
        error_page   500 502 503 504  /50x.html;
        location = /50x.html {
            root   html;
        }

        # proxy the PHP scripts to Apache listening on 127.0.0.1:80
        #
        #location ~ \.php$ {
        # proxy_pass http://127.0.0.1;
        #}

        # pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
        #
        #location ~ \.php$ {
        # root html;
        # fastcgi_pass 127.0.0.1:9000;
        # fastcgi_index index.php;
        # fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
        # include fastcgi_params;
        #}

        # deny access to .htaccess files, if Apache's document root
        # concurs with nginx's one
        #
        #location ~ /\.ht {
        # deny all;
        #}
        access_log off;
    }


    # another virtual host using mix of IP-, name-, and port-based configuration
    #
    #server {
    # listen 8000;
    # listen somename:8080;
    # server_name somename alias another.alias;

    # location / {
    # root html;
    # index index.html index.htm;
    #}
    #}


    # HTTPS server
    #
    #server {
    # listen 443 ssl;
    # server_name localhost;

    # ssl_certificate cert.pem;
    # ssl_certificate_key cert.key;

    # ssl_session_cache shared:SSL:1m;
    # ssl_session_timeout 5m;

    # ssl_ciphers HIGH:! aNULL:! MD5;
    # ssl_prefer_server_ciphers on;

    # location / {
    # root html;
    # index index.html index.htm;
    #}
    #}

}

Copy the code
Test the correctness of the configuration file
[root@master conf]# /usr/local/webserver/nginx/sbin/nginx -t
# start
[root@master conf]# /usr/local/webserver/nginx/sbin/nginxAccess IPCopy the code

3. Configure the reverse proxy on Nginx

Enter the address to go to the Tomcat home page
# implementation
Install Tomcat and start Tomcat./startup.sh
# 2. Configure the reverse proxy

 # Common reverse proxyserver { location / { root html; Proxy_pass http://intranet IP address :8080;# Reverse proxyindex index.html index.htm; }}# Determine which server to access through the pathserver { listen 9001; Server_name IP network; The location/MS01 / {proxy_pass http://172.26.245.104:8080; } the location/MS02 / {proxy_pass http://172.26.245.104:8090; }}Copy the code

4. Configure load balancing on Nginx

# Load balancing:
# 1. Polling (default)
You can add weight = 10 after service;
Add ip_hash to the user-defined service mode, so that the same user can only access the same server to solve the session sharing problem
# 4. Fair (third party) adds a fair in a custom service mode. It is allocated according to the response time of the back-end server

The configuration is written in HTTP {}Upstream myServer {server 172.26.245.104:8080; Server 172.26.245.104:8090; } server { listen 9090;# Listening portServer_name 172.26.245.104;# the domain name

        index index.html index.htm index.php;
        root /usr/local/webserver/nginx/html; # Site directory
        location / {
            root   html;
            proxy_pass http://myserver;  Set custom request modeindex index.html index.htm; }}Copy the code

5. Configure dynamic/static separation on Nginx

# Static and dynamic separation
# Static request request static resource server (HTML \ CSS \js\image..) , dynamic request request Tomcat
To prepare: place static resources in /data/
# Static extraction via location /
location /www/ {
            root   /data/;
            index  index.html index.htm;
}
location /image/ {
            root   /data/;
            autoindex on;
}
Copy the code

6. Nginx high availability

# If Nginx is down the request cannot be implemented
One master and one backup
Nginx 2. Keepalived 3. Virtual IP
Configure two servers to install Nginx
# yum install keepalived -y
RPM -q-a keepalived
# modify the/etc/keepalived/keepalived. Conf
! Configuration File for keepalived
# global
global_defs {      # Global definition
   notification_email {
     [email protected]
     [email protected]
     [email protected]
   }
   notification_email_from [email protected]
   smtp_server 192.168.200.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL # Host name Intranet IP address
   vrrp_skip_check_adv_addr
   vrrp_strict
   vrrp_garp_interval 0
   vrrp_gna_interval 0
}

vrrp_script chk_http_port {  # Test script

	script "/usr/local/src/nginx_check.sh" # Script set
	
	interval 2 # Check script execution interval
	
	weight 2 # weights

}

vrrp_instance VI_1 {
    state MASTER  # Change MASTER to BACKUP on BACKUP server
    interface eth0 # card
    virtual_router_id 51 # Active and standby virtual_Router_ID must be the same
    priority 100 The primary and secondary servers have different priorities. The primary server has a larger priority and the secondary server has a smaller priority
    advert_int 1 # a heartbeat
    authentication {  # Check modeAuth_type PASS auth_pass 1111} virtual_ipaddress {192.168.200.16# VRRP H Virtual address
        # 192.168.200.17
        # 192.168.200.18}}/usr/local/ SRC /nginx_check.sh
# !/bin/bash
A=`ps -C nginx -no-header |wc -l`
if [ $A -eq 0 ];then
	/usr/local/nginx/sbin/nginx # start nginx
	sleep 2
	if [`ps -C nginx --no-header |wc -l` -eq 0 ];then # Primary server down
		killall keepalived
	fi
fi

# Enable both Nginx and KeepalivedNginx:./nginx starts keepalived: systemctl start keepalivedCopy the code

Nginx principle

advantages

  1. You can use nGINx -S Reload hot deployment to use NGINx for hot deployment
  2. Each worker is an independent process to reduce server risk

The problem

  1. How many workers is appropriate to set?

Set several workers for each CPU core

  1. Connection number Worker_Connection?

The static resources of sending requests occupy two dynamic resources occupy four connections of the worker

  1. Maximum number of concurrent requests supported?

One master four workers each Max 1024 41024/2 or 41024/4