haproxy+LNMT實現動靜分離實戰
一、前言本次實驗的目的:
(1) LNMP動靜分離部署wordpress,動靜都要能實現負載均衡,要注意會話的問題;
(2) 在haproxy和後端主機之間新增varnish進行快取;
(3) haproxy的設定要求:
(a) stats page,要求僅能通過本地訪問使用管理介面;
(b) 動靜分離;
(c) 壓縮合適的內容型別;
4)最後新增一tomcat伺服器,實現動靜分離處理jsp動態請求。(補充)
二、LNMP環境搭建
1、配置nginx-dynamic
#安裝nginx和php-fpm服務 [root@dynamic ~]# yum install -y epel-release [root@dynamic ~]# yum install -y nginx php-fpm php-mysql php-mbstring php-mcrypt #建立nginx web根目錄 [root@dynamic ~]# mkdir -pv /data/nginx/html #下載wordpress到指定目錄並解壓 [root@dynamic html]# cd /data/nginx/html/ [root@dynamic html]# wget https://cn.wordpress.org/wordpress-4.9.4-zh_CN.tar.gz [root@dynamic html]# tar xf wordpress-4.9.4-zh_CN.tar.gz #建立php動態測試頁面 [root@dynamic html]# vim test.php <html> <head> <title>PHP 測試</title> </head> <body> <?php echo '<p>Hello World</p>'; ?> </body> </html> #建立web根目錄的預設html和php頁面 [root@dynamic html]# vim index.html <h1>This is dynamic</h1> [root@dynamic html]# vim index.php <h1>Dynamic</h1> <?php phpinfo(); ?> #編輯配置/etc/php-fpm.d/www.conf檔案 [root@dynamic html]# vim /etc/php-fpm.d/www.conf listen = 0.0.0.0:9000 user = apache group = apache pm = dynamic pm.max_children = 50 pm.start_servers = 5 pm.min_spare_servers = 5 pm.max_spare_servers = 35 ping.path = /ping ping.response = pong pm.status_path = /status slowlog = /var/log/php-fpm/www-slow.log php_admin_value[error_log] = /var/log/php-fpm/www-error.log php_admin_flag[log_errors] = on php_value[session.save_handler] = files php_value[session.save_path] = /var/lib/php/session #建立php-fpm的session目錄 [root@dynamic html]# mkdir /var/lib/php/session [root@dynamic html]# chown apache /var/lib/php/session/ #編輯建立nginx的配置檔案 #注意在/etc/nginx/nginx.conf檔案中註釋下述兩個預設配置 # listen 80 default_server; # listen [::]:80 default_server; [root@dynamic html]# vim /etc/nginx/conf.d/dynamic.conf server { listen 80; server_name www.ilinux.io; root /data/nginx/html; index index.html index.php; location ~* \.php$ { fastcgi_pass 192.168.0.83:9000; fastcgi_index index.php; include fastcgi_params; fastcgi_param SCRIPT_FILENAME /data/nginx/html/$fastcgi_script_name; } location ~* ^/(ping|status)$ { fastcgi_pass 192.168.0.83:9000; include fastcgi_params; fastcgi_param SCRIPT_FILENAME $fastcgi_script_name; } #啟動nginx和php-fpm服務並調整firewalld和selinux狀態 [root@dynamic html]# systemctl start php-fpm [root@dynamic html]# systemctl start nginx [root@dynamic html]# systemctl stop firewalld [root@dynamic html]# systemctl disable firewalld [root@dynamic html]# setenforce 0
2、配置nginx-static
#安裝nginx服務和mariadb-server [root@static ~]# yum install -y epel-release mariadb-server #配置建立wordpress資料庫 [root@static ~]# systemctl start mariadb [root@static ~]# mysql Welcome to the MariaDB monitor. Commands end with ; or \g. Your MariaDB connection id is 2 Server version: 5.5.56-MariaDB MariaDB Server Copyright (c) 2000, 2017, Oracle, MariaDB Corporation Ab and others. Type 'help;' or '\h' for help. Type '\c' to clear the current input statement. MariaDB [(none)]> create database wordpress; Query OK, 1 row affected (0.00 sec) MariaDB [(none)]> grant all on wordrpess.* to 'wpuser'@'192.168.0.%' identified by "magedu"; Query OK, 0 rows affected (0.00 sec) #建立nginx web根目錄 [root@static ~]# mkdir -pv /data/nginx/htm #下載wordpress到指定目錄並解壓 [root@static html]# cd /data/nginx/html/ [root@static html]# wget https://cn.wordpress.org/wordpress-4.9.4-zh_CN.tar.gz [root@static html]# tar xf wordpress-4.9.4-zh_CN.tar.gz #建立txt文字和複製相關的圖片內容到web根目錄下作為靜態內容 [root@dynamic html]# cp /usr/share/backgrounds/*.{png,jpg} . [root@static html]# vim poem.txt Quiet Night I saw the moonlight before my couch, And wondered if it were not the frost on the ground. I raised my head and looked out on the mountain moon, I bowed my head and thought of my far-off home. by S. Obata #配置建立web根目錄的預設html和php頁面 [root@static html]# vim index.html <h1>This is static</h1> [root@static html]# vim index.php <h1>Static</h1> <?php phpinfo(); ?> #編輯建立nginx的配置檔案 [root@static html]# vim /etc/nginx/conf.d/static.conf server { listen 80; server_name www.ilinux.io; root /data/nginx/html; index index.html index.php; location ~* \.php$ { fastcgi_pass 192.168.0.83:9000; fastcgi_index index.php; include fastcgi_params; fastcgi_param SCRIPT_FILENAME /data/nginx/html/$fastcgi_script_name; } location ~* ^/(ping|status)$ { fastcgi_pass 192.168.0.83:9000; include fastcgi_params; fastcgi_param SCRIPT_FILENAME $fastcgi_script_name; } } #啟動nginx服務並檢查firewalld和selinux的狀態 [root@static html]# systemctl stop firewalld [root@static html]# systemctl disable firewalld Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service. Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service. [root@static html]# setenforce 0 [root@static html]# systemctl start nginx
3、配置varnish
#安裝varnish服務 [root@static ~]# yum install -y epel-release [root@static ~]# yum install -y varnish #配置varnish的監聽資訊和系統引數 [root@static ~]# vim /etc/varnish/varnish.params RELOAD_VCL=1 VARNISH_VCL_CONF=/etc/varnish/default.vcl VARNISH_LISTEN_PORT=6081 VARNISH_ADMIN_LISTEN_ADDRESS=192.168.0.87 VARNISH_ADMIN_LISTEN_PORT=6082 VARNISH_SECRET_FILE=/etc/varnish/secret VARNISH_STORAGE="file,/data/cache/varnish_storage.bin,1G" VARNISH_USER=varnish VARNISH_GROUP=varnish #建立快取目錄 [root@static ~]# mkdir -pv /data/cache [root@static ~]# chown varnish /data/cache #編輯配置varnish的vcl [root@static ~]# vim /etc/varnish/default.vcl import directors; probe static_healthcheck { .url = "/index.html"; .window = 5; .threshold = 4; .interval =2s; .timeout = 1s; } backend static { .host = "192.168.0.84"; .port = "80"; .probe = static_healthcheck; } sub vcl_init { new BE = directors.round_robin(); BE.add_backend(static); } acl purgers { "127.0.0.1"; "192.168.0.0/24"; } sub vcl_recv { if (req.method == "GET" && req.http.cookie) { return(hash); } if (req.method == "PURGE") { if (client.ip ~ purgers) { return(purge); } } if (req.http.X-Forward-For) { set req.http.X-Forward-For = req.http.X-Forward-For + "," + client.ip; } else { set req.http.X-Forward-For = client.ip; } set req.backend_hint = BE.backend(); return(hash); } sub vcl_backend_response { if (bereq.url ~ "\.(jpg|jpeg|gif|png)$") { set beresp.ttl = 1d; } if (bereq.url ~ "\.(html|css|js|txt)$") { set beresp.ttl = 12h; } if (beresp.http.Set-Cookie) { set beresp.grace = 30m; return(deliver); } } sub vcl_deliver { if (obj.hits > 0) { set resp.http.X-Cache = "HIT from " + server.ip; } else { set resp.http.X-Cache = "MISS"; } } #啟動varnish服務並調整firewalld和selinux狀態 [root@static ~]# systemctl start varnish [root@static ~]# systemctl stop firewalld [root@static ~]# systemctl disable firewalld [root@static ~]# setenforce 0
二、HAProxy的搭建和配置
配置完後端的LNMP環境後,接著我們來配置HAproxy。
#安裝haproxy服務
[root@haproxy ~]# yum install -y haproxy
#配置HAProxy記錄日誌到本地
[root@haproxy ~]# vim /etc/rsyslog.conf
$ModLoad imudp
$UDPServerRun 514
local2.* /var/log/haproxy.log
[root@haproxy ~]# vim /etc/sysconfig/rsyslog
SYSLOGD_OPTIONS="-r"
[root@haproxy ~]# systemctl restart rsyslog
#編輯配置haproxy的配置檔案
[root@haproxy ~]# vim /etc/haproxy/haproxy.cfg
frontend main *:80
acl url_static path_end -i .jpg .gif .png .css .js .txt
acl url_dynamic path_end -i .php
compression algo gzip #設定壓縮演算法為gzip
compression type text/html text/plain image/x-png image/x-citrix-jpeg #設定壓縮的內容型別為相關靜態內容
use_backend static if url_static
use_backend dynamic if url_dynamic
default_backend websrvs
backend websrvs
balance roundrobin
server web1 192.168.0.83:80 check
server web2 192.168.0.87:6081 check
backend static #新增varnish為靜態服務,由varnish將代理處理靜態請求
balance roundrobin
server srvs1 192.168.0.87:6081 check
backend dynamic
balance roundrobin
server dyn1 192.168.0.83:80 check
listen stats
bind *:8080
stats enable
stats uri /admin?stats
acl url_stats src 192.168.0.0/24 #配置ACL匹配本地網段
stats admin if url_stats #只允許匹配ACL的本地網段訪問stats的管理頁面
#啟動haproxy服務
[root@haproxy ~]# systemctl start haproxy
[root@haproxy ~]# systemctl stop firewalld
[root@haproxy ~]# setenforce 0
[root@haproxy ~]# systemctl disable firewalld
Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
Removed symlink /etc/systemd/system/basic.target.wants/firewalld.service.
三、測試
- 1)LNMP動靜分離部署wordpress,動靜都要能實現負載均衡,要注意會話的問題。
此時訪問以.php的結尾的內容會被haproxy負載到dynamic伺服器上處理,而訪問.jpg,.png和.txt等靜態內容則被負載到static伺服器上進行處理。
由上圖所示訪問wordpres頁面的動態和靜態內容已被分開處理,靜態內容代理到varnish上進行處理,而動態內容則代理到dynamic伺服器進行處理。
此時訪問http://192.168.0.81 預設會輪詢到後端兩個伺服器上,如下所示:
[root@client ~]# for i in {1..10} ; do curl http://192.168.0.81 ; done
<h1>This is dynamic</h1>
<h1>This is static</h1>
<h1>This is dynamic</h1>
<h1>This is static</h1>
<h1>This is dynamic</h1>
<h1>This is static</h1>
<h1>This is dynamic</h1>
<h1>This is static</h1>
<h1>This is dynamic</h1>
<h1>This is static</h1>
但有些時候需要確保我們使用者每次訪問的都是同一個伺服器,此時我們就需要配置會話保持。haproxy自身提供了會話保持機制,我們可以在haproxy配置裡新增基於cookie來做的會話保持,從而實現使用者每次訪問的都是同一個伺服器,如下所示:
backend websrvs
balance roundrobin
cookie WEBSRV insert nocache indirect
server web1 192.168.0.83:80 check cookie web1
server web2 192.168.0.87:6081 check cookie web2
重啟haproxy後,使用者通過web訪問都會被排程到同一個後端伺服器。
其原理在於,haproxy會把客戶端第一次的請求由哪個後端伺服器處理,使用cookie告知給客戶端。然後客戶端之後的傳送的請求都會帶有此後端伺服器的cookie,然後haproxy通過讀取這個cookie的資訊來判斷連線請求該排程給哪個後端伺服器。
- 2)在haproxy和後端主機之間新增varnish進行快取
從此前的截圖上,我們已經能看到,相關的靜態內容已經被varnish快取所“HIT”中了,這說明我們快取已經生效了。 - 3)壓縮合適的內容型別和設定stats page僅能通過本地訪問使用管理介面。
因為我們在haproxy的配置中設定了對相關靜態內容進行壓縮,所以訪問相關靜態內容時,如果響應報文帶有相關的壓縮欄位,說明壓縮已經成功,如:
此時訪問stats頁面,因為訪問主機是本地網路,所以能夠下圖紅框中的管理操作。如果不是指定的本地網段,則只能檢視相關的stats狀態,而無法進行管理操作。
四、動靜分離jsp動態內容(補充)
在dynamic服務上部署一個tomcat服務,haproxy上將jsp相關動態內容負載均衡到dynamic伺服器的nginx服務監控的Ip和埠上,然後通過本機的nginx服務將jsp動態內容排程到tomcat上進行處理。
1、在dynamic伺服器上安裝tomcat
#事先下載相應的jdk原始碼包放置在/usr/local/src目錄下,用於編譯安裝
#編譯安裝jdk服務
[root@dynamic ~]# cd /usr/local/src/
[root@dynamic src]# tar xf jdk-10.0.1_linux-x64_bin.tar.gz
[root@dynamic src]# ln -sv /usr/local/src/jdk-10.0.1 /usr/local/jdk
‘/usr/local/jdk’ -> ‘/usr/local/src/jdk-10.0.1’
[root@dynamic src]# vim /etc/profile.d/jdk.sh
export JAVA_HOME=/usr/local/jdk
export PATH=$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$PATH
export CLASSPATH=.$CLASSPATH:$JAVA_HOME/lib:$JAVA_HOME/jre/lib:$JAVA_HOME/lib/tools.jar
[root@dynamic src]# source /etc/profile.d/jdk.sh
[root@dynamic src]# java -version
java version "10.0.1" 2018-04-17
Java(TM) SE Runtime Environment 18.3 (build 10.0.1+10)
Java HotSpot(TM) 64-Bit Server VM 18.3 (build 10.0.1+10, mixed mode)
#yum安裝tomcat服務
[root@dynamic src]# yum install -y tomcat tomcat-webapps tomcat-admin-webapps tomcat-docs-webapp tomcat-lib
#配置開啟tomcat的管理頁面
[root@dynamic src]# vim /etc/tomcat/tomcat-users.xml
<role rolename="admin-gui"/>
<role rolename="manager-gui"/>
<user username="admin" password="magedu" roles="admin-gui,manager-gui"/>
#手動新增一個java測試頁面
[root@dynamic src]# mkdir -pv /usr/local/tomcat/webapps/javatest/{classes,lib,WEB-INF}
mkdir: created directory ‘/usr/local/tomcat/webapps/javatest’
mkdir: created directory ‘/usr/local/tomcat/webapps/javatest/classes’
mkdir: created directory ‘/usr/local/tomcat/webapps/javatest/lib’
mkdir: created directory ‘/usr/local/tomcat/webapps/javatest/WEB-INF’
[root@dynamic src]# vim /usr/local/tomcat/webapps/javatest/index.jsp
<%@ page language="java" %>
<html>
<head><title>TomcatA</title></head>
<body>
<h1><font color="red">TomcatA.magedu.com</font></h1>
<table align="centre" border="1">
<tr>
<td>Session ID</td>
<% session.setAttribute("magedu.com","magedu.com"); %>
<td><%= session.getId() %></td>
</tr>
<tr>
<td>Created on</td>
<td><%= session.getCreationTime() %></td>
</tr>
</table>
</body>
</html>
#配置編輯tomcat的serve.xml檔案
[root@dynamic src]# vim /etc/tomcat/server.xml
<Host name="192.168.0.83" appBase="/usr/local/tomcat/webapps/" #定義javatest訪問路徑
unpackWARs="true" autoDeploy="true">
#啟動tomcat服務
[root@dynamic src]# systemctl start tomcat
#配置nginx服務將jsp動態內容代理到tomcat監聽的埠
[root@dynamic src]# vim /etc/nginx/conf.d/dynamic.conf
server {
listen 80;
server_name www.ilinux.io;
root /data/nginx/html;
index index.html index.php;
location ~* \.php$ {
fastcgi_pass 192.168.0.83:9000;
fastcgi_index index.php;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME /data/nginx/html/$fastcgi_script_name;
}
location ~* ^/(ping|status)$ {
fastcgi_pass 192.168.0.83:9000;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $fastcgi_script_name;
}
location ~* /javatest\.* { #把java動態內容的URI路徑代理至tomcat
proxy_pass http://192.168.0.83:8080;
}
location ~* \.(jsp|do)$ { #將以.jsp和.do的動態內容代理到tomcat進行處理
proxy_pass http://192.168.0.83:8080;
}
}
#過載nginx服務
[root@dynamic src]# nginx -t
nginx: the configuration file /etc/nginx/nginx.conf syntax is ok
nginx: configuration file /etc/nginx/nginx.conf test is successful
[root@dynamic src]# nginx -s reload
2、修改haproxy負載代理jsp動態內容
#修改haproxy配置檔案中的fontend配置段的內容
[root@haproxy ~]# vim /etc/haproxy/haproxy.cfg
frontend main *:80
acl url_static path_end -i .jpg .gif .png .css .js .txt
acl url_dynamic path_end -i .php .jsp .do #在url_dynamic ACL中新增以.jsp,.do為字尾的動態內容
acl url_java path_beg -i /javatest #新增tomcat動態內容的路徑
compression algo gzip
compression type text/css text/html text/plain image/x-png image/x-citrix-jpeg
use_backend static if url_static
use_backend dynamic if url_dynamic
use_backend dynamic if url_java
default_backend websrvs
#重啟haproxy服務
[root@haproxy ~]# systemctl restart haproxy
上述截圖說明tomcat的jsp動態內容能夠被haproxy正常負載排程到tomcat伺服器上,而此時相應的靜態內容依舊能正常訪問。
來自為知筆記(Wiz)