nginx download and installation
nginx download address: nginx download address
Install compiler and dependency Libraries
yum install gcc gcc-c++ zlib-devel pcre-devel openssl-devel openssl-libs openssl -y
nginx installation:
Run to nginx root: Make & & make install
After the installation is completed, you can configure the environment variables to operate Nginx without using the absolute path:
vim /etc/profile.d/http.sh
Add the following:
export PATH=/usr/local/nginx/sbin:$PATH
Effective configuration:
source !$
Start Nginx
nginx -s is followed by stop and reload to shut down and reload nginx. Running nginx directly starts the service. If you are prompted that the port is occupied at startup, you need to find out the occupied process, or change the listening port in the / usr/local/nginx/conf/nginx.conf file.
Visit Nginx
Enter on Browser http://ip:port If "Welcome to nginx!" appears The word "" indicates that the installation is successful. If you can't access it, first confirm whether the firewall forbids the corresponding port.
Simple configuration explanation of nginx (/ usr / local / nginx / nginix. Conf file)
http{
Load balancing configuration;
server configuration;
}
#Number of open processes < = number of CPUs
worker_processes 1;
#User defined error log saving location, global setting, default logs/error.log
#error_log logs/error.log;
#Maximum number of connections per process (maximum number of connections = number of connections x number of processes) how many links per worker are allowed to generate at the same time, 1024 by default
events {
worker_connections 1024;
}
#
http {
#File extension and file type mapping table
include mime.types;
#Default file type
default_type application/octet-stream;
#Customize log file output format global settings
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
#User defined global request log saving location, global settings, default logs/access.log, definition format: file storage location + log output format
#access_log logs/access.log main;
#Open send file
sendfile on;
#Connection timeout
#keepalive_timeout 0;
keepalive_timeout 65;
#Open gzip compression
#gzip on;
#Configure virtual host. Multiple server s can be configured based on domain name, ip and port
server {
#Listening port, which can be ip:port or port
listen 80;
#Monitoring domain name can be ip or domain name. There are three matching methods for server name: precise matching (www.domain.com), wildcard matching (*. domain.com, www. *), regular expression matching (~ ^ (? +) \. domain\.com $)
server_name localhost;
#Custom request log, local, current server valid
#access_log logs/host.access.log main;
#Error page and its return address
error_page 500 502 503 504 /50x.html;
#Request matching is also precise (= / index.html), fuzzy (~ index), regular, and allows multiple location s to be configured
location / {
#Return the root path address (relative path: relative to / usr/local/nginx /)
root html;
#Default home page
index index.html index.htm;
}
#
location /html {
root html;
index index.html index.htm;
}
#
}
#
}
nginx reverse agent, dynamic static separation, load balancing
#user nobody;
##The maximum number of working processes, generally corresponding to the number of CPU cores
worker_processes 1;
#error_log logs/error.log;
#error_log logs/error.log notice;
#error_log logs/error.log info;
#pid logs/nginx.pid;
##My understanding is concurrency
events {
worker_connections 1024;
}
http {
##Set the mine type, which is defined by the mime.type file
include mime.types;
##Default file type
default_type application/octet-stream;
#log_format main '$remote_addr - $remote_user [$time_local] "$request" '
# '$status $body_bytes_sent "$http_referer" '
# '"$http_user_agent" "$http_x_forwarded_for"';
#access_log logs/access.log main;
##nginx call sendfile Function to output a file. For normal applications, it must be on. If it is used to download the disk IO Heavy duty applications##Can be set to off
sendfile on;
#tcp_nopush on;
#keepalive_timeout 0;
##Long link timeout in seconds
keepalive_timeout 65;
#gzip on;
#load balancing
upstream balance {
#By default, polling is performed by url ﹣ hash by url ﹣ ip ﹣ hash by ip ﹣ least ﹣ conn
# ip_hash;
#ip_hash;
least_conn;
#The higher the weight value, the greater the load weight and the more requests
#Max fail allows the number of requests to fail. After the number of failures is exceeded, it will be forwarded to the next server. When Max fail requests fail, it means that the back-end server is not available. The default value is 1. Set it to 0 to turn off the check
#If there is no response within the specified time, it will fail. In the future, nginx will not send the request to the server that has been checked out as unavailable
#down indicates that the current server does not participate in the load
#Backup when other non backup servers are busy, the backup server acts as a backup server and forwards the request to the backup server
server 192.168.1.4 weight=1 max_fails=2 fail_timeout=30s;
server 192.168.1.20:8080 weight=1 max_fails=2 fail_timeout=30s;
server 127.0.0.1:8081 down;
#server 192.168.58.152:8080 backup;
}
##New site
server {
listen 80;
server_name test.com;
#Custom request log, local, current server valid
#access_log logs/test.com.log main;
#charset koi8-r;
#access_log logs/host.access.log main;
#Static resources
location ~ .*\.(js|css|htm|html|gif|jpg|jpeg|png|bmp|swf|ioc|rar|zip|txt|flv|mid|doc|ppt|pdf|xls|mp3|wma)$ {
#Get static resources from nginx server (specific directory customization)
root static;
}
#dynamic resource
location ~ \.(jsp|jspx|do|action)(\/.*)?$ {
#Dynamic request is forwarded to tomcat server, matching method can be customized
#Set realistic
proxy_set_header real_ip $remote_addr; #Set the variable name in real_ip, which can be obtained from the web side
#My tomcat port is 80
#proxy_pass http://192.168.1.4;
proxy_pass http://balance; configure the load server added above
}
#error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
# proxy the PHP scripts to Apache listening on 127.0.0.1:80
#
#location ~ \.php$ {
# proxy_pass http://127.0.0.1;
#}
# pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
#
#location ~ \.php$ {
# root html;
# fastcgi_pass 127.0.0.1:9000;
# fastcgi_index index.php;
# fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
# include fastcgi_params;
#}
# deny access to .htaccess files, if Apache's document root
# concurs with nginx's one
#
#location ~ /\.ht {
# deny all;
#}
}
server{
listen 80;
return 500;
}
# another virtual host using mix of IP-, name-, and port-based configuration
#
#server {
# listen 8000;
# listen somename:8080;
# server_name somename alias another.alias;
# location / {
# root html;
# index index.html index.htm;
# }
#}
# HTTPS server
#
#server {
# listen 443 ssl;
# server_name localhost;
# ssl_certificate cert.pem;
# ssl_certificate_key cert.key;
# ssl_session_cache shared:SSL:1m;
# ssl_session_timeout 5m;
# ssl_ciphers HIGH:!aNULL:!MD5;
# ssl_prefer_server_ciphers on;
# location / {
# root html;
# index index.html index.htm;
# }
#}
}
Example of load balancing configuration
#user nobody;
worker_processes 2;
#error_log logs/error.log;
#error_log logs/error.log notice;
#error_log logs/error.log info;
#pid logs/nginx.pid;
events {
accept_mutex on; #Set the network connection serialization to prevent the occurrence of swarm alarm. The default is on
multi_accept on; #Set whether a process accepts multiple network connections at the same time. The default is off
worker_connections 1024;#maximum connection
}
http {
include mime.types;#File extension and file type mapping table, which is mainly used for static resources deployed on native nginx
default_type application/octet-stream;
#Log format
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log logs/access.log main;
sendfile on;
#tcp_nopush on;
#keepalive_timeout 0;
keepalive_timeout 65;#Connection timeout
gzip on;
#Reverse proxy
#[configuration 1] this configuration is a combination of [configuration 4] and [configuration 5]
#This configuration forwards the request to two WEB servers, allocates the target host according to the client IP, and allocates the traffic according to the weight
upstream app1 {
ip_hash;
server 192.168.14.132:8080 weight=5;
server 192.168.14.133:80 weight=3;
}
#[configuration 2]
#Default load balancing configuration, nginx applies HTTP load balancing to distribute requests.
#upstream app1 {
# server 192.168.14.132:8080;
# server 192.168.14.133:80;
#}
#[configuration 3]
#With minimum connection load balancing configuration, nginx will try not to use busy servers, but distribute new requests to servers that are not too busy.
#upstream app1 {
# least_conn;
# server 192.168.14.132:8080;
# server 192.168.14.133:80;
#}
#[configuration 4]
#Session persistence configuration, using IP hash, client's IP address as hash key,
#To determine which server in the server group should be selected for client requests.
#This method ensures that requests from the same client will always be directed to the same server unless the server is not available.
#upstream app1 {
# ip_hash;
# server 192.168.14.132:8080;
# server 192.168.14.133:80;
#}
#[configuration 5]
#Weighted load balancing configuration further affects nginx load balancing algorithm by using server weight.
#Servers that are not weighted mean that all specified servers are considered equally qualified for a particular load balancing method.
#upstream app1 {
# ip_hash;
# server 192.168.14.132:8080 weight=3;
# server 192.168.14.133:80 weight=2;
# server 192.168.14.134:80;
# server 192.168.14.135:80;
#}
server {#Multiple server s can be configured to listen to different IP and different ports
listen 80;#Listening port
server_name localhost;#Listening server
#charset koi8-r;
#access_log logs/host.access.log main;
#The anti skew bar represents all connections. The purpose of this configuration is to give all connections to the upstream agent named app1 for load balancing
location / {
proxy_pass http://app1;
}
#Picture file path. Generally speaking, static files will be deployed on the local machine to speed up the response
#Multiple such location s can be configured to meet various requirements
location ~\.(gif|jpg|png)$ {
root /home/root/images;
}
location ~\.(iso|zip|txt|doc|docx)$ {
root /home/root/files;
}
#error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
# FastCGI is CGI full name is "common gateway interface"
#For me, use Tomcat instead. Please ignore this configuration.
#location ~ \.php$ {
# root html;
# fastcgi_pass 127.0.0.1:9000;
# fastcgi_index index.php;
# fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
# include fastcgi_params;
#}
# Add a blacklist to prevent xxx from accessing specific files
# concurs with nginx's one
#
#location ~ /\.ht {
# deny all;
#}
}
# another virtual host using mix of IP-, name-, and port-based configuration
#
#server {
# listen 8000;
# listen somename:8080;
# server_name somename alias another.alias;
# location / {
# root html;
# index index.html index.htm;
# }
#}
# HTTPS server
#
#server {
# listen 443 ssl;
# server_name localhost;
# ssl_certificate cert.pem;
# ssl_certificate_key cert.key;
# ssl_session_cache shared:SSL:1m;
# ssl_session_timeout 5m;
# ssl_ciphers HIGH:!aNULL:!MD5;
# ssl_prefer_server_ciphers on;
# location / {
# root html;
# index index.html index.htm;
# }
#}
}
Finally, attach a JSP demo to get ip
<%@ page language="java" contentType="text/html; charset=UTF-8" pageEncoding="UTF-8"%>
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<title>Welcome to my demo.jsp</title>
</head>
<body>
//Welcome to my demo.jsp<br/>
//ip address accessed: <%=request.getHeader("real_ip") %> <br/>
nginx server ip is: <%=request.getRemoteAddr()%>
</body>
</html>