1. 程式人生 > 其它 >centos7 二進位制部署kubernetes(v1.19.0) 高可用叢集

centos7 二進位制部署kubernetes(v1.19.0) 高可用叢集

centos7 二進位制部署kubernetes(v1.19.0) 高可用叢集

一、規劃

1. master/etcd 叢集節點-3臺:

192.168.21.30(master)
192.168.21.31(node1)
192.168.21.32(node2)

2. node節點-3臺:

192.168.21.31(node1)
192.168.21.32(node2)
192.168.21.33(node3)

3. haproxy keepalived叢集高可用節點-2臺

192.168.21.30(master)
192.168.21.31(node1)

4. harbor 節點-1臺

192.168.21.34(node4)

二、部署etcd高可用叢集

1. 為etcd和kubernetes叢集建立安全連線的CA證書

使用openssl頒發自簽名證書,放在/etc/kubernetes/pki 目錄下

openssl genrsa -out ca.key 2048

openssl req -x509 -new -nodes -key ca.key -subj "/CN=192.16
8.21.30" -days 36500 -out ca.crt

[root@master pki]# pwd
/etc/kubernetes/pki
[root@master pki]# ls
ca.crt  ca.key

2. 建立etcd的CA證書

  • 建立CA根證書,包括ca.key和ca.crt
vim etcd_ssl.cnf

[ req ]
req_extensions = v3_req
distinguished_name = req_distinguished_name

[ req_distinguished_name ]

[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names

[ alt_names ]
IP.1 = 192.168.21.30
IP.2 = 192.168.21.31
IP.3 = 192.168.21.32
  • 2.建立etcd伺服器端證書
    使用openssl命令建立etcd的服務端CA證書,包括etcd_server.key和etcd_server.crt 儲存在/etc/etcd/pki下
[root@master pki]# openssl genrsa -out etcd_server.key 2048
Generating RSA private key, 2048 bit long modulus
.....................................................................................
.................+++.............+++
e is 65537 (0x10001)

[root@master pki]# openssl req -new -key etcd_server.key -config etcd_ssl.cnf -subj "
/CN=etcd-server" -out etcd_server.csr

[root@master pki]# openssl x509 -req -in etcd_server.csr -CA /etc/kubernetes/pki/ca.c
rt -CAkey /etc/kubernetes/pki/ca.key -CAcreateserial -days 36500 -extensions v3_req -extfile etcd_ssl.cnf -out etcd_server.crtSignature ok
subject=/CN=192.168.21.30
Getting CA Private Key
  • 3.建立etcd客戶端CA證書
    使用openssl命令建立etcd的服務端CA證書,包括etcd_client.key和etcd_client.crt 儲存在/etc/etcd/pki下
[root@master pki]# openssl genrsa -out etcd_client.key 2048
Generating RSA private key, 2048 bit long modulus
.............................................+++
..............................................................................+++
e is 65537 (0x10001)
[root@master pki]# openssl req -new -key etcd_client.key -config etcd_ssl.cnf -subj "
/CN=etcd-client" -out etcd_client.csr[root@master pki]# openssl x509 -req -in etcd_client.csr -CA /etc/kubernetes/pki/ca.c
rt -CAkey /etc/kubernetes/pki/ca.key -CAcreateserial -days 36500 -extensions v3_req \
-extfile etcd_ssl.cnf -out etcd_client.crt
Signature ok
subject=/CN=etcd-client
Getting CA Private Key

另外2臺的證書,直接複製第一個節點的即可。

3. 配置etcd

編輯/etc/etcd/etcd.conf,使用環境變數方式
以其中一個節點為例,其它節點更改相應IP即可

ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.21.30:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.21.30:2379"
ETCD_NAME="etcd1"
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.21.30:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.21.30:2379"
ETCD_INITIAL_CLUSTER="etcd1=https://192.168.21.30:2380,etcd2=https://192.168.21.31:23
80,etcd3=https://192.168.21.32:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_CERT_FILE="/etc/etcd/pki/etcd_server.crt"
ETCD_KEY_FILE="/etc/etcd/pki/etcd_server.key"
ETCD_CLIENT_CERT_AUTH="true"
ETCD_TRUSTED_CA_FILE="/etc/kubernetes/pki/ca.crt"
ETCD_PEER_CERT_FILE="/etc/etcd/pki/etcd_server.crt"
ETCD_PEER_KEY_FILE="/etc/etcd/pki/etcd_server.key"
ETCD_PEER_TRUSTED_CA_FILE="/etc/kubernetes/pki/ca.crt"

啟動etcd並設定開機自啟

systemctl restart etcd && systemctl enable etcd

驗證etcd叢集健康與否

etcdctl --ca-file=/etc/kubernetes/pki/ca.crt --cert-file=/etc/etcd/pki/etcd_client.crt \
--key-file=/etc/etcd/pki/etcd_client.key \
--endpoints=https://192.168.21.30:2379,https://192.168.21.31:2379,https://192.168.21.32:2379 cluster-health
member a5753ed960575bb4 is healthy: got healthy result from https://192.168.21.31:237
9member ca2a47d444bac4dd is healthy: got healthy result from https://192.168.21.30:237
9member d85cddbd7165b028 is healthy: got healthy result from https://192.168.21.32:237
9cluster is healthy

三、部署k8s master高可用叢集(1.19.0)

1. 下載服務端元件二進位制包並將可執行檔案拷貝到/usr/bin目錄下

下載地址:https://dl.k8s.io/v1.19.0/kubernetes-server-linux-amd64.tar.gz
解壓kubernetes-server-linux-amd64.tar.gz 把bin目錄下的可執行檔案拷貝到/usr/bin/ 目錄下

[root@master k8s-1.19.0]# ls
kubernetes-client-linux-amd64.tar.gz  kubernetes-server
kubernetes-node-linux-amd64.tar.gz    kubernetes-server-linux-amd64.tar.gz

[root@master bin]# pwd
/root/k8s/k8s-1.19.0/kubernetes-server/server/bin
[root@master bin]# ls
1.txt                               kubectl
apiextensions-apiserver             kubelet
kubeadm                             kube-proxy
kube-aggregator                     kube-proxy.docker_tag
kube-apiserver                      kube-proxy.tar
kube-apiserver.docker_tag           kube-scheduler
kube-apiserver.tar                  kube-scheduler.docker_tag
kube-controller-manager             kube-scheduler.tar
kube-controller-manager.docker_tag  mounter
kube-controller-manager.tar
[root@master bin]# find . -perm 755 -exec cp {} /usr/bin/ \;

2. 部署 kube-apiserver服務

  • 配置服務端CA證書
[root@master pki]# pwd
/etc/kubernetes/pki
[root@master pki]# vim master_ssl.cnf
[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name

[req_distinguished_name]

[v3_req]
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names

[alt_names]
DNS.1 = kubernetes
DNS.2 = kubernetes.default
DNS.3 = kubernetes.default.svc
DNS.4 = kubernetes.default.svc.cluster.local
DNS.5 = master
DNS.6 = node1
DNS.7 = node2
DNS.8 = node3
DNS.9 = node4
IP.1 = 172.16.0.100
IP.2 = 192.168.21.30
IP.3 = 192.168.21.31
IP.4 = 192.168.21.32
IP.5 = 192.168.21.33
IP.6 = 192.168.21.34
IP.7 = 192.168.21.35

[root@master pki]# openssl genrsa -out apiserver.key 2048
Generating RSA private key, 2048 bit long modulus
...................................+++
...............................+++
e is 65537 (0x10001)

[root@master pki]# openssl req -new -key apiserver.key -config master_ssl.cnf -subj "
/CN=192.168.21.30" -out apiserver.csr[root@master pki]# openssl x509 -req -in apiserver.csr -CA ca.crt -CAkey ca.key -CAcr
eateserial -days 36500 -extensions v3_req -extfile master_ssl.cnf -out apiserver.crtSignature ok
subject=/CN=192.168.21.30
Getting CA Private Key
  • 建立systemd服務
[root@master pki]# vim /usr/lib/systemd/system/kube-apiserver.service[Unit]Description=kubernetes API ServerDocumentation=https://github.com/kubernetes/kubernetes[Service]EnvironmentFile=/etc/kubernetes/apiserver.confExecStart=/usr/bin/kube-apiserver $KUBE_API_ARGSRestart=always[Install]WantedBy=multi-user.target建立配置檔案/etc/kubernetes/apiserver.conf[root@master pki]# vim /etc/kubernetes/apiserver.confKUBE_API_ARGS="--insecure-port=0 \--secure-port=6443 \--tls-cert-file=/etc/kubernetes/pki/apiserver.crt \--tls-private-key-file=/etc/kubernetes/pki/apiserver.key \--client-ca-file=/etc/kubernetes/pki/ca.crt--apiserver-count=3 --endpoint-reconciler-type=master-count \--etcd-servers=https://192.168.21.30:2379,https://192.168.21.31:2379,https://192.168.21.32:2379 \--etcd-cafile=/etc/kubernetes/pki/ca.crt \--etcd-certfile=/etc/etcd/pki/etcd_client.crt \--etcd-keyfile=/etc/etcd/pki/etcd_client.key \--service-cluster-ip-range=169.169.0.0/16 \--service-node-port-rang=30000-32767 \--allow-privileged=true \--logtostderr=false \--log-dir=/var/log/kubernetes --v=0"
  • 啟動kube-apiserver並加入開機自啟動
systemctl start kube-apiserver.service && systemctl enable kube-apiserver.service[root@master k8s]# netstat -an |grep 6443tcp6       0      0 :::6443                 :::*                    LISTEN     tcp6       0      0 ::1:52844               ::1:6443                ESTABLISHEDtcp6       0      0 ::1:6443                ::1:52844               ESTABLISHED
  • 建立客戶端CA證書
[root@master pki]# openssl genrsa -out client.key 2048Generating RSA private key, 2048 bit long modulus....+++.........................................+++e is 65537 (0x10001)[root@master pki]# openssl req -new -key client.key -subj "/CN=admin" -out client.csr[root@master pki]# openssl x509 -req -in client.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out client.crt -days 36500Signature oksubject=/CN=adminGetting CA Private Key
  • 建立客戶端連線kube-apiserver服務所需的kubeconfig配置檔案
[root@master kubernetes]# pwd/etc/kubernetes[root@master kubernetes]# vim kubeconfigapiVersion: v1kind: Configclusters: - name: default  cluster:    server: https://192.168.21.35:9443    certificate-authority: /etc/kubernetes/pki/ca.crtusers: - name: admin  user:     client-certificate: /etc/kubernetes/pki/client.crt    client-key: /etc/kubernetes/pki/client.keycontexts: - context:    cluster: default    user: admin  name: defaultcurrent-context: default

3.部署kube-controller-manager服務

  • 建立kube-controller-manager的systemd服務
[root@master kubernetes]# cat /usr/lib/systemd/system/kube-controller-manager.service[Unit]Description=Kubernetes Controller ManagerDocumentation=https://github.com/kubernetes/kubernetes[Service]EnvironmentFile=/etc/kubernetes/controller-manager.confExecStart=/usr/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_ARGSRestart=always[Install]WantedBy=multi-user.target
  • 建立controller-manager.conf配置檔案
[root@master kubernetes]# cat controller-manager.conf KUBE_CONTROLLER_MANAGER_ARGS="--kubeconfig=/etc/kubernetes/kubeconfig \--leader-elect=true \--service-cluster-ip-range=169.169.0.0/16 \--service-account-private-key-file=/etc/kubernetes/pki/apiserver.key \--root-ca-file=/etc/kubernetes/pki/ca.crt \--log-dir=/var/log/kubernetes --logtostderr=false --v=0"
  • 啟動kube-controller-manager並設定為開機自啟動
[root@master kubernetes]# systemctl start kube-controller-manager.service && systemct[root@master kubernetes]# ps aux |grep kube-controllerroot     16451  6.1  1.8 810028 72476 ?        Ssl  09:50   0:01 /usr/bin/kube-controller-manager --kubeconfig=/etc/kubernetes/kubeconfig --leader-elect=true --service-cluster-ip-range=169.169.0.0/16 --service-account-private-key-file=/etc/kubernetes/pki/apiserver.key --root-ca-file=/etc/kubernetes/pki/ca.crt --log-dir=/var/log/kubernetes --logtostderr=false --v=0

4.部署kube-scheduler服務

  • 建立kube-scheduler的systemd服務
[root@master kubernetes]# vim /usr/lib/systemd/system/kube-scheduler.service[Unit]Description=Kubernetes SchedulerDocumentation=https://github.com/kubernetes/kubernetes[Service]EnvironmentFile=/etc/kubernetes/scheduler.confExecStart=/usr/bin/kube-scheduler $KUBE_SCHEDULER_ARGSRestart=always[Install]WantedBy=multi-user.target
  • 建立scheduler.conf配置檔案
[root@master kubernetes]# vim scheduler.conf[root@master kubernetes]# cat scheduler.conf KUBE_SCHEDULER_ARGS="--kubeconfig=/etc/kubernetes/kubeconfig \--leader-elect=true \--logtostderr=false --log-dir=/var/log/kubernetes --v=0"
  • 啟動kube-scheduler.service並設定開機自啟
[root@master kubernetes]# systemctl start kube-scheduler.service && systemctl enable kube-scheduler.service Created symlink from /etc/systemd/system/multi-user.target.wants/kube-scheduler.service to /usr/lib/systemd/system/kube-scheduler.service.[root@master kubernetes]# ps aux |grep kube-schroot     17369 10.1  1.1 746396 44876 ?        Ssl  10:04   0:01 /usr/bin/kube-scheduler --kubeconfig=/etc/kubernetes/kubeconfig --leader-elect=true --logtostderr=false --log-dir=/var/log/kubernetes --v=0root     17407  0.0  0.0 112820  2256 pts/0    R+   10:04   0:00 grep --color=auto kube-sch

5.使用haproxy和keepalive部署高可用的負載均衡器

為了避免單點故障,使用2臺主機組成高可用,本例使用21.30,21.31這2臺主機部署。vip:192.168.21.35

  • 安裝haproxy和keepalived
[root@master kubernetes]# yum install haproxy[root@master kubernetes]# yum install keepalived
  • 配置haproxy
[root@master haproxy]# cat haproxy.cfg |grep -Ev "^#" |grep -Ev "^*#"global    log         127.0.0.1 local2    chroot      /var/lib/haproxy    pidfile     /var/run/haproxy.pid    maxconn     4096    user        haproxy    group       haproxy    daemon    stats socket /var/lib/haproxy/statsdefaults    mode                    http    log                     global    option                  httplog    option                  dontlognull    option http-server-close    option forwardfor       except 127.0.0.0/8    option                  redispatch    retries                 3    timeout http-request    10s    timeout queue           1m    timeout connect         10s    timeout client          1m    timeout server          1m    timeout http-keep-alive 10s    timeout check           10s    maxconn                 3000frontend  kube-apiserver     mode                 tcp    bind                 *:9443    option               tcplog    default_backend      kube-apiserverlisten stats    mode           http    bind           *:8888    stats auth     admin:password    stats refresh  5s    stats realm    HAProxy\ Statistics    stats uri      /stats    log            127.0.0.1 local3 errbackend kube-apiserver    mode        tcp    balance     roundrobin    server  master 192.168.21.30:6443 check    server  node1 192.168.21.31:6443 check    server  node2 192.168.21.32:6443 check
  • 啟動haproxy並設為開機自啟
[root@master ~]# systemctl start haproxy.service && systemctl enable haproxy.service Created symlink from /etc/systemd/system/multi-user.target.wants/haproxy.service to /usr/lib/systemd/system/haproxy.service.

驗證haproxy

  • 配置keepalived
    第一個節點:
[root@master keepalived]# cat keepalived.conf! Configuration File for keepalivedglobal_defs {   router_id LVS_1}vrrp_script checkhaproxy {  script "/usr/bin/check-haproxy.sh"  interval 2  weight -30}vrrp_instance VI_1 {    state MASTER    interface eth0    virtual_router_id 51    priority 100    advert_int 1    authentication {        auth_type PASS        auth_pass password    }    virtual_ipaddress {        192.168.21.35/24 dev eth0    }    track_script {      checkhaproxy    }}[root@master keepalived]# cat /usr/bin/check-haproxy.sh#!/bin/bashcount=`netstat -apn | grep 9443 | wc -l`if [ $count -gt 0 ]; then  exit 0else  exit 1fi

第二個節點:

[root@node1 ~]# cat /etc/keepalived/keepalived.conf ! Configuration File for keepalivedglobal_defs {   router_id LVS_2}vrrp_script checkhaproxy {  script "/usr/bin/check-haproxy.sh"  interval 2  weight -30}vrrp_instance VI_1 {    state BACKUP    interface eth0    virtual_router_id 51    priority 100    advert_int 1    authentication {        auth_type PASS        auth_pass password    }    virtual_ipaddress {        192.168.21.35/24 dev eth0    }    track_script {      checkhaproxy    }}
  • 啟動keepalived並設為開機自啟
[root@master keepalived]# systemctl start keepalived.service && systemctl enable keepalived.service Created symlink from /etc/systemd/system/multi-user.target.wants/keepalived.service to /usr/lib/systemd/system/keepalived.service.[root@master keepalived]# ps aux |grep keeproot     22629  0.0  0.0 123008  2108 ?        Ss   11:32   0:00 /usr/sbin/keepalived -Droot     22630  0.0  0.1 123008  5708 ?        S    11:32   0:00 /usr/sbin/keepalived -Droot     22631  0.0  0.1 125132  5708 ?        S    11:32   0:00 /usr/sbin/keepalived -Droot     22667  0.0  0.0 112820  2212 pts/0    S+   11:32   0:00 grep --color=auto keep
  • 驗證keepalive
[root@master keepalived]# curl -v -k https://192.168.21.35:9443* About to connect() to 192.168.21.35 port 9443 (#0)*   Trying 192.168.21.35...* Connected to 192.168.21.35 (192.168.21.35) port 9443 (#0)* Initializing NSS with certpath: sql:/etc/pki/nssdb* skipping SSL peer certificate verification* NSS: client certificate not found (nickname not specified)* SSL connection using TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384* Server certificate:* 	subject: CN=192.168.21.30* 	start date: Jul 20 07:05:48 2021 GMT* 	expire date: Jun 26 07:05:48 2121 GMT* 	common name: 192.168.21.30* 	issuer: CN=192.168.21.30> GET / HTTP/1.1> User-Agent: curl/7.29.0> Host: 192.168.21.35:9443> Accept: */*> < HTTP/1.1 401 Unauthorized< Cache-Control: no-cache, private< Content-Type: application/json< Date: Wed, 21 Jul 2021 03:36:48 GMT< Content-Length: 165< {  "kind": "Status",  "apiVersion": "v1",  "metadata": {      },  "status": "Failure",  "message": "Unauthorized",  "reason": "Unauthorized",  "code": 401* Connection #0 to host 192.168.21.35 left intact

四. 部署網路元件flanneld

在所有node節點上安裝flanneld

1. 下載flannel

下載地址:https://github.com/flannel-io/flannel/releases

flannel-v0.14.0-linux-amd64.tar.gz

解壓後將 flanneld 和 mk-docker-opts.sh 拷貝到/usr/bin目錄下(所有node節點)

[root@node1 flannel]# pwd/root/k8s/flannel[root@node1 flannel]# lsflanneld  mk-docker-opts.sh  README.md[root@node1 flannel]# cp flanneld mk-docker-opts.sh /usr/bin/

2. 建立flanneld的systemd服務

[root@node1 flannel]# cat /usr/lib/systemd/system/flanneld.service [Unit]Description=Kubernetes Network Plugin FlannelDocumentation=https://flannelAfter=network-online.target network.targetBefore=docker.service[Service]Type=notifyEnvironmentFile=/etc/sysconfig/flanneld.confExecStart=/usr/bin/flanneld --ip-masq \$FLANNEL_OPTIONSExecStartPost=/usr/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.envRestart=on-failure[Install]WantedBy=multi-user.target

3. 建立flanneld.conf配置檔案

[root@node1 flannel]# cat /etc/sysconfig/flanneld.conf ETCD_ENDPOINTS=${"https://192.168.21.30:2379,https://192.168.21.31:2379,https://192.168.21.32:2379"}FLANNEL_OPTIONS="--etcd-endpoints=${ETCD_ENDPOINTS} \--etcd-cafile=/etc/kubernetes/pki/ca.crt \--etcd-certfile=/etc/kubernetes/pki/etcd_server.crt \--etcd-keyfile=/etc/kubernetes/pki/etcd_server.key \--etcd-prefix=/coreos.com/network \--iface=eth0"

4.在etcd中建立條目(master節點上操作)

etcdctl --endpoints https://192.168.21.30:2379,https://192.168.21.31:2379,https://192.168.21.32:2379 --ca-file /etc/kubernetes/pki/ca.crt --cert-file /etc/kubernetes/pki/etcd_server.crt --key-file /etc/kubernetes/pki/etcd_server.key set /coreos.com/network/config '{"Network":"172.16.0.0/16","Backend":{"Type":"vxlan"}}'

5. 啟動flanneld服務並設定開機自啟

[root@node1 flannel]# systemctl start flanneld.service && systemctl enable flanneld.service

6. 驗證

# ifconfigflannel.1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1450        inet 172.16.62.0  netmask 255.255.255.255  broadcast 172.16.62.0        inet6 fe80::5898:9aff:fe32:56ab  prefixlen 64  scopeid 0x20<link>        ether 5a:98:9a:32:56:ab  txqueuelen 0  (Ethernet)        RX packets 3  bytes 252 (252.0 B)        RX errors 0  dropped 0  overruns 0  frame 0        TX packets 3  bytes 252 (252.0 B)        TX errors 0  dropped 5 overruns 0  carrier 0  collisions 0

五. 部署docker服務

在所有node節點上安裝docker,本例使用yum安裝。

1. docker-ce.repo

[root@node1 flannel]# cat /etc/yum.repos.d/docker-ce.repo [docker-ce-stable]name=Docker CE Stable - $basearchbaseurl=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/$releasever/$basearch/stableenabled=1gpgcheck=1gpgkey=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/gpg[docker-ce-stable-debuginfo]name=Docker CE Stable - Debuginfo $basearchbaseurl=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/$releasever/debug-$basearch/stableenabled=0gpgcheck=1gpgkey=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/gpg[docker-ce-stable-source]name=Docker CE Stable - Sourcesbaseurl=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/$releasever/source/stableenabled=0gpgcheck=1gpgkey=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/gpg[docker-ce-test]name=Docker CE Test - $basearchbaseurl=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/$releasever/$basearch/testenabled=0gpgcheck=1gpgkey=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/gpg[docker-ce-test-debuginfo]name=Docker CE Test - Debuginfo $basearchbaseurl=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/$releasever/debug-$basearch/testenabled=0gpgcheck=1gpgkey=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/gpg[docker-ce-test-source]name=Docker CE Test - Sourcesbaseurl=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/$releasever/source/testenabled=0gpgcheck=1gpgkey=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/gpg[docker-ce-nightly]name=Docker CE Nightly - $basearchbaseurl=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/$releasever/$basearch/nightlyenabled=0gpgcheck=1gpgkey=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/gpg[docker-ce-nightly-debuginfo]name=Docker CE Nightly - Debuginfo $basearchbaseurl=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/$releasever/debug-$basearch/nightlyenabled=0gpgcheck=1gpgkey=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/gpg[docker-ce-nightly-source]name=Docker CE Nightly - Sourcesbaseurl=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/$releasever/source/nightlyenabled=0gpgcheck=1gpgkey=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/gpg

2. 安裝

yum install docker-ce

3. 配置docker.service

[root@node1 flannel]# cat /usr/lib/systemd/system/docker.service[Unit]Description=Docker Application Container EngineDocumentation=https://docs.docker.comAfter=network-online.target firewalld.service containerd.serviceWants=network-online.targetRequires=docker.socket containerd.service[Service]Type=notify# the default is not to use systemd for cgroups because the delegate issues still# exists and systemd currently does not support the cgroup feature set required# for containers run by docker#ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sockEnvironmentFile=/run/flannel/subnet.envExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONSExecReload=/bin/kill -s HUP $MAINPIDTimeoutSec=0RestartSec=2Restart=always# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.# Both the old, and new location are accepted by systemd 229 and up, so using the old location# to make them work for either version of systemd.StartLimitBurst=3# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make# this option work for either version of systemd.StartLimitInterval=60s# Having non-zero Limit*s causes performance problems due to accounting overhead# in the kernel. We recommend using cgroups to do container-local accounting.LimitNOFILE=infinityLimitNPROC=infinityLimitCORE=infinity# Comment TasksMax if your systemd version does not support it.# Only systemd 226 and above support this option.TasksMax=infinity# set delegate yes so that systemd does not reset the cgroups of docker containersDelegate=yes# kill only the docker process, not all processes in the cgroupKillMode=processOOMScoreAdjust=-500[Install]WantedBy=multi-user.target

4. 啟動docker並設定開機自啟

systemctl start docker && systemctl enable docker

5. 檢視docker網路

[root@node1 flannel]# ifconfigdocker0: flags=4099<UP,BROADCAST,MULTICAST>  mtu 1500        inet 172.16.62.1  netmask 255.255.255.0  broadcast 172.16.62.255        inet6 fe80::42:e0ff:fe18:9fa  prefixlen 64  scopeid 0x20<link>        ether 02:42:e0:18:09:fa  txqueuelen 0  (Ethernet)        RX packets 5  bytes 308 (308.0 B)        RX errors 0  dropped 0  overruns 0  frame 0        TX packets 10  bytes 904 (904.0 B)        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500        inet 192.168.21.31  netmask 255.255.255.0  broadcast 192.168.21.255        inet6 fe80::8d62:e14a:b27d:d478  prefixlen 64  scopeid 0x20<link>        inet6 fe80::95d3:ac4d:e02d:e037  prefixlen 64  scopeid 0x20<link>        inet6 fe80::3ce7:8033:b538:bb4e  prefixlen 64  scopeid 0x20<link>        ether 3a:66:dd:a0:4b:f2  txqueuelen 1000  (Ethernet)        RX packets 1984756  bytes 277698227 (264.8 MiB)        RX errors 0  dropped 0  overruns 0  frame 0        TX packets 1962569  bytes 272656611 (260.0 MiB)        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0flannel.1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1450        inet 172.16.62.0  netmask 255.255.255.255  broadcast 172.16.62.0        inet6 fe80::5898:9aff:fe32:56ab  prefixlen 64  scopeid 0x20<link>        ether 5a:98:9a:32:56:ab  txqueuelen 0  (Ethernet)        RX packets 3  bytes 252 (252.0 B)        RX errors 0  dropped 0  overruns 0  frame 0        TX packets 3  bytes 252 (252.0 B)        TX errors 0  dropped 5 overruns 0  carrier 0  collisions 0lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536        inet 127.0.0.1  netmask 255.0.0.0        inet6 ::1  prefixlen 128  scopeid 0x10<host>        loop  txqueuelen 1000  (Local Loopback)        RX packets 534202  bytes 108091817 (103.0 MiB)        RX errors 0  dropped 0  overruns 0  frame 0        TX packets 534202  bytes 108091817 (103.0 MiB)        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

6. docker國內映象加速

[root@node1 ~]# cat /etc/docker/daemon.json {	"insecure-registries":["192.168.21.34"],	"registry-mirrors": ["https://s7s5jkzp.mirror.aliyuncs.com"]}

7. docker私有倉庫harbor

官網地址:https://goharbor.io/,本例使用192.168.21.34這臺主機。

六. 部署Node服務

1. 下載客戶端元件二進位制包並將可執行檔案拷貝到所有Node節點的/usr/bin目錄下

下載地址:https://dl.k8s.io/v1.19.0/kubernetes-node-linux-amd64.tar.gz
解壓kubernetes-node-linux-amd64.tar.gz 把bin目錄下的可執行檔案拷貝到/usr/bin/ 目錄下

2. 部署kubelet服務

  • 建立kubelet的systemd服務
[root@node1 pki]# cat /usr/lib/systemd/system/kubelet.service [Unit]Description=Kubernetes Kubelet ServerDocumentation=https://github.com/kubernetes/kubernetesAfter=docker.target[Service]EnvironmentFile=/etc/kubernetes/kubelet.confExecStart=/usr/bin/kubelet $KUBELET_ARGSRestart=always[Install]WantedBy=multi-user.target
  • 建立kublete.conf配置檔案
[root@node1 kubernetes]# cat kubelet.confKUBELET_ARGS="--kubeconfig=/etc/kubernetes/kubeconfig \--config=/etc/kubernetes/kubelet.config \--hostname-override=192.168.21.31 #其它節點修改成相應的IP地址\--network-plugin=flannel \--logtostderr=false --log-dir=/var/log/kubernetes --v=0 \--runtime-cgroups=/systemd/system.slice \--kubelet-cgroups=/systemd/system.slice"
[root@node1 kubernetes]# cat kubelet.config kind: kubeletConfigurationapiVersion: kubelet.config.k8s.io/v1beta1address: 0.0.0.0port: 10250cgroupDriver: cgroupfscluster-dns=172.16.0.100cluster-domain=cluster.localauthentication:  anonymous:    enabled: true
  • 啟動kubelet.service並設定開機自啟
[root@node1 ~]# systemctl start kubelet.service && systemctl enable kubelet.service[root@node1 ~]# ps aux |grep kubeletroot      4821  0.0  0.0 112716  2264 pts/0    S+   12:02   0:00 grep --color=auto kubeletroot     21424  0.6  2.2 1168096 91636 ?       Ssl  10:59   0:24 /usr/bin/kubelet --kubeconfig=/etc/kubernetes/kubeconfig --config=/etc/kubernetes/kubelet.config --hostname-override=192.168.21.31 --logtostderr=false --log-dir=/var/log/kubernetes --v=0 --runtime-cgroups=/systemd/system.slice --kubelet-cgroups=/systemd/system.slice
  • 檢視後臺程序
[root@node1 ~]# ps aux |grep kubeletroot     17062  0.0  0.0 112716  2188 pts/0    S+   12:53   0:00 grep --color=auto kubeletroot     21424  0.6  2.3 1168096 93684 ?       Ssl  10:59   0:43 /usr/bin/kubelet --kubeconfig=/etc/kubernetes/kubeconfig --config=/etc/kubernetes/kubelet.config --hostname-override=192.168.21.31 --logtostderr=false --log-dir=/var/log/kubernetes --v=0 --runtime-cgroups=/systemd/system.slice --kubelet-cgroups=/systemd/system.slice

3. 部署kube-proxy服務

  • 建立kube-proxy的systemd服務
[root@node1 ~]# cat /usr/lib/systemd/system/kube-proxy.service [Unit]Description=kubernetes kube-proxy ServerDocumentation=https://github.com/kubernetes/kubernetesAfter=network.target[Service]EnvironmentFile=/etc/kubernetes/kube-proxy.confExecStart=/usr/bin/kube-proxy $KUBE_PROXY_ARGSRestart=always[Install]WantedBy=multi-user.target
  • 建立kube-proxy的配置檔案
[root@node1 ~]# cat /etc/kubernetes/kube-proxy.confKUBE_PROXY_ARGS="--kubeconfig /etc/kubernetes/kubeconfig \--hostname-override 192.168.21.31 #其它節點修改成相應的IP地址\--proxy-mode iptables \--logtostderr=false \--log-dir /var/log/kubernetes\ --v=0"
  • 啟動kube-proxy.service並設定開機自啟
[root@node3 ~]# systemctl enable kube-proxy.service Created symlink from /etc/systemd/system/multi-user.target.wants/kube-proxy.service to /usr/lib/systemd/system/kube-proxy.service.[root@node3 ~]# systemctl start kube-proxy.service
  • 檢視後臺程序
[root@node1 ~]# ps aux |grep kuberoot      3451  0.0  1.0 743152 41760 ?        Ssl  11:57   0:01 /usr/bin/kube-proxy \--kubeconfig /etc/kubernetes/kubeconfig --hostname-override 192.168.21.31 \--proxy-mode iptables --logtostderr=false \--log-dir /var/log/kubernetes --v=0

七. 部署CoreDNS服務

1. 建立資原始檔

coredns需要3個資源對像,1個configmap,1個Deployment和1個service。編輯coredns.yaml檔案包含這3個資源對像

apiVersion: v1kind: ConfigMapmetadata:   name: coredns  namespace: kube-system  labels:     addonmanager.kubernetes.io/mode: EnsureExistsdata:   Corefile: |    cluster.local {      errors      health {         lameduck 5s      }      ready      kubernetes cluster.local 172.16.0.0/16 {         fallthrough in-addr.arpa ip6.arpa      }      prometheus: 9153      forward . /etc/resolv.conf      cache 30      loop      reload      loadbalance    }    . {      cache 30      loadbalance      forward . /etc/resolv.conf    }---apiVersion: apps/v1kind: Deploymentmetadata:   name: coredns  namespace: kube-system  labels:     k8s-app: coredns    kubernetes.io/name: "CoreDNS"    kubernetes.io/cluster-service: "true"spec:   replicas: 1  selector:     matchLabels:       k8s-app: coredns  template:     metadata:       labels:         k8s-app: coredns      annotations:         scheduler.alpha.kubernetes.io/critical-pod: ''        scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly","operator":"Exists"}]'    spec:       containers:       - name: coredns        image: 192.168.21.34/release/coredns:latest        imagePullPolicy: IfNotPresent        resources:           limits:             memory: 170Mi          requests:             cpu: 100m            memory: 70Mi        args: [ "-conf", "/etc/coredns/Corefile" ]        volumeMounts:         - name: config-volume          mountPath: /etc/coredns          readOnly: true        ports:         - containerPort: 53          name: dns          protocol: UDP        - containerPort: 53          name: dns-tcp          protocol: TCP        - containerPort: 9153          name: metrics          protocol: TCP        securityContext:          allowPrivilegeEscalation: false          capabilities:             add:             - NET_BIND_SERVICE            drop:            - all          readOnlyRootFilesystem: true        livenessProbe:          httpGet:            path: /health            port: 8080            scheme: HTTP          initialDelaySeconds: 60          timeoutSeconds: 5          successThreshold: 1          failureThreshold: 5        readinessProbe:          httpGet:            path: /ready            port: 8181            scheme: HTTP      dnsPolicy: Default      volumes:        - name: config-volume          configMap:            name: coredns            items:            - key: Corefile              path: Corefile---apiVersion: v1kind: Servicemetadata:   name: coredns  namespace: kube-system  annotations:    prometheus.io/port: "9153"    prometheus.io/scrape: "true"  labels:     k8s-app: coredns    kubernetes.io/cluster-service: "true"    kubernetes.io/name: "CoreDNS"spec:   selector:     k8s-app: coredns  clusterIP: 172.16.0.100  ports:   - name: dns    port: 53    protocol: UDP  - name: dns-tcp    port: 53    protocol: TCP  - name: metrics    port: 9153    protocol: TCP

2. 建立coredns

[root@node1 coredns]# kubectl create -f coredns.yaml configmap/coredns createddeployment.apps/coredns createdservice/coredns created

檢視各資源的狀態

[root@node1 coredns]# kubectl get all --namespace=kube-systemNAME                           READY   STATUS             RESTARTS   AGEpod/coredns-7bff699665-zfj5r   0/1     CrashLoopBackOff   6          10mNAME              TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)                  AGEservice/coredns   ClusterIP   172.16.0.100   <none>        53/UDP,53/TCP,9153/TCP   10mNAME                      READY   UP-TO-DATE   AVAILABLE   AGEdeployment.apps/coredns   0/1     1            0           10mNAME                                 DESIRED   CURRENT   READY   AGEreplicaset.apps/coredns-7bff699665   1         1         0       10m

情況有點不妙,pod和deployment資源沒有ready起來。先看下pod的日誌

[root@node1 coredns]# kubectl logs pod/coredns-7bff699665-zfj5r --namespace=kube-system/etc/coredns/Corefile:10 - Error during parsing: Unknown directive 'prometheus:'

發現,有個指令示識別(prometheus:),於是在coredns.yaml檔案中註釋掉這行

 ready      kubernetes cluster.local 172.16.0.0/16 {         fallthrough in-addr.arpa ip6.arpa      }      #prometheus: 9153      forward . /etc/resolv.conf

再次重新apply 一下

[root@node1 coredns]# kubectl apply -f coredns.yaml configmap/coredns configureddeployment.apps/coredns unchangedservice/coredns unchanged

檢視狀態正常

[root@node1 coredns]# kubectl get all --namespace=kube-systemNAME                           READY   STATUS    RESTARTS   AGEpod/coredns-7bff699665-zfj5r   1/1     Running   11         61mNAME              TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)                  AGEservice/coredns   ClusterIP   172.16.0.100   <none>        53/UDP,53/TCP,9153/TCP   61mNAME                      READY   UP-TO-DATE   AVAILABLE   AGEdeployment.apps/coredns   1/1     1            1           61mNAME                                 DESIRED   CURRENT   READY   AGEreplicaset.apps/coredns-7bff699665   1         1         1       61m

3. 驗證coredns

建立一個nginx的pod和service

[root@node1 k8s]# cat svc/coredns-test.yaml apiVersion: v1kind: Podmetadata:  name: nginx  labels:    app: nginxspec:  containers:  - name: nginx    image: 192.168.21.34/release/nginx:v1.21.1    ports:    - containerPort: 80---apiVersion: v1kind: Servicemetadata:  name: nginxspec:  ports:  - port: 80    targetPort: 80    protocol: TCP  selector:    app: nginx[root@node1 svc]# kubectl create -f coredns-test.yaml pod/nginx createdservice/nginx created

再建立一個名為myweb的pod

[root@node1 pod]# cat nginx-pod-1.yaml apiVersion: v1kind: Podmetadata:   name: myweb spec:   containers:     - name: web      image: 192.168.21.34/release/nginx:v1.21.1      imagePullPolicy: IfNotPresent

進入myweb容器,curl nginx

[root@node1 pod]# kubectl exec -it myweb -- /bin/bashroot@myweb:/# curl nginx<!DOCTYPE html><html><head><title>Welcome to nginx!</title><style>    body {        width: 35em;        margin: 0 auto;        font-family: Tahoma, Verdana, Arial, sans-serif;    }</style></head><body><h1>Welcome to nginx!</h1><p>If you see this page, the nginx web server is successfully installed andworking. Further configuration is required.</p><p>For online documentation and support please refer to<a href="http://nginx.org/">nginx.org</a>.<br/>Commercial support is available at<a href="http://nginx.com/">nginx.com</a>.</p><p><em>Thank you for using nginx.</em></p></body></html>root@myweb:/# exitexit

發現使用nginx名字可以訪問資源,使用busybox 測試一下nslookup

[root@node1 pod]# cat busybox.yaml apiVersion: v1kind: Podmetadata:   name: busybox  namespace: defaultspec:   containers:   - name: busybox    image: 192.168.21.34/release/busybox:latest    command:       - sleep      - "3600"[root@node1 pod]# kubectl exec busybox -- nslookup nginxServer:		172.16.0.100Address:	172.16.0.100:53Name:	nginx.default.svc.cluster.localAddress: 172.16.28.151

八. 驗證叢集

1. 在master節點上通過kubectl驗證node資訊

kubectl --kubeconfig=/etc/kubernetes/kubeconfig get nodesNAME            STATUS   ROLES    AGE   VERSION192.168.21.31   Ready    <none>   24h   v1.19.0192.168.21.32   Ready    <none>   24h   v1.19.0192.168.21.33   Ready    <none>   24h   v1.19.0

=======================================================================

知識無邊界,交流以長進

如需轉載,請註明出處,謝謝

=======================================================================