1. 程式人生 > 其它 >Kubernetes-3:使用kubeadm部署k8s環境及常見報錯解決方法

Kubernetes-3:使用kubeadm部署k8s環境及常見報錯解決方法

技術標籤:kubernetskubernetes

k8s叢集安裝

環境說明:

k8s-Master-Centos8 ip:192.168.152.53

k8s-Node1-Centos7 ip:192.168.152.253

k8s-Node2-Centos8 ip:192.168.152.252

注意:

Master與Node節點操作步驟基本一致

Node節點只需配置到本文的第6步即可

1、前期準備

#關閉防火牆
[[email protected] ~]# systemctl stop firewalld.service 

#關閉SElinux,永久關閉可修改/etc/selinux/config檔案
[
[email protected]
~]# setenforce 0 #關閉Swap分割槽,防止將K8S安裝至swap記憶體中 [[email protected] ~]# swapoff -a [[email protected] ~]# sed -i '/swap/ s/^/#/g' /etc/fstab #安裝iptables並設定為空規則及開機自啟 yum -y install iptables-services iptables systemctl enable --now iptables.service iptables -F #清空規則 service iptables save #儲存 iptables -L #檢查是否為空規則

2、升級核心,建議>= 4.4

#—————————————— Centos8 ————————————————#
#——————Centos8最好不要做升級,如果非要要建議也升級到4.4 ————————#
#——————以下只是演示升級過程,真實不要升級到5.6,貌似本身不支援nf_conntrack_ipv4 ————————#
#安裝ELrepo倉庫
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
yum install https://www.elrepo.org/elrepo-release-8.0-2.el8.elrepo.noarch.rpm

#檢視可用的系統核心包
yum --disablerepo="*" --enablerepo="elrepo-kernel" list available

#安裝核心
yum --enablerepo=elrepo-kernel install kernel-ml

# 安裝完畢後,檢視現有所有核心
grubby --info=ALL

#設定啟動核心
grubby --set-default /boot/vmlinuz-5.6.2-1.el8.elrepo.x86_64

#———————————————— Centos7 ————————————————#
#下載核心rpm包
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm

#指定包名安裝新版kernel
yum --enablerepo=elrepo-kernel install -y kernel-lt

#設定預設啟動的核心
grub2-set-default 'CentOS Linux (4.4.218-1.el7.elrepo.x86_64) 7 (Core)'

#———————————————— 以下相同 ————————————————#
#重啟生效
systemctl reboot

#重新檢視核心版本
[
[email protected]
~]# uname -r 5.6.2-1.el8.elrepo.x86_64

3、設定kube-proxy開啟ipvs的前置條件(centos7及以上一般預設都開啟)

modprobe br_netfilter

cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF

chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules
# 檢視是否載入成功ipvs模組lsmod | grep -e ip_vs -e nf_conntrack_ipv4
  nf_conntrack_ipv4 20480 4   nf_defrag_ipv4         16384  1 nf_conntrack_ipv4  ip_vs_sh               16384  0   ip_vs_wrr              16384  0   ip_vs_rr               16384  0   ip_vs                 147456  6 ip_vs_rr,ip_vs_sh,ip_vs_wrr  nf_conntrack          114688  9 ip_vs,nf_nat,nf_nat_ipv4,nf_nat_ipv6,xt_conntrack,nf_nat_masquerade_ipv4,nf_conntrack_netlink,nf_conntrack_ipv4,nf_conntrack_ipv6  libcrc32c              16384  2 xfs,ip_vs

4、安裝Docker

#安裝環境
yum install -y yum-utils device-mapper-persistent-data lvm2

#新增docker倉庫
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

#安裝dpcker
yum -y install docker-ce
報錯:
   package docker-ce-3:19.03.8-3.el7.x86_64 requires containerd.io >= 1.2.2-3, but none of the providers can be installed

先安裝containerd.io >= 1.2.2-3:
dnf install https://download.docker.com/linux/centos/7/x86_64/stable/Packages/containerd.io-1.2.6-3.3.el7.x86_64.rpm

再次安裝docker:
yum -y install docker-ce

#啟動docker,並設定開機自啟
systemctl enable --now docker.service

#配置daemon.設定預設的cgroup組為systemd,並使docker的日誌以json形式輸出#並將映象倉庫源更改為阿里雲映象源
cat > /etc/docker/daemon.json <<EOF
{
    "exec-opts": ["native.cgroupdriver=systemd"],
    "log-driver": "json-file",
    "log-opts": {
        "max-size": "100m"
    },  "registry-mirrors":["https://f1bhsuge.mirror.aliyuncs.com"]
}
EOF

#建立配置檔案目錄
mkdir -p /etc/systemd/system/docker.service.d

#重新載入daemon 及 重啟docker
systemctl daemon-reload && systemctl restart docker.service

5、安裝Kubeadm(主從配置)

#配置yum倉庫
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
EOF

#安裝kubeadm、kubectl、kubelet
yum -y install kubeadm-1.15.1 kubectl-1.15.1 kubelet-1.15.1

#設定開機自啟,暫時不開啟服務
systemctl enable kubelet.service

6、拉取初始化映象

vim initimage.sh
...
#!/usr/bin/env bash
K8S_VERSION=v1.15.1
ETCD_VERSION=3.3.10
DASHBOARD_VERSION=v1.8.3
FLANNEL_VERSION=v0.10.0-amd64
DNS_VERSION=1.3.1
PAUSE_VERSION=3.1
# 基本元件
docker pull mirrorgooglecontainers/kube-apiserver-amd64:$K8S_VERSION
docker pull mirrorgooglecontainers/kube-controller-manager-amd64:$K8S_VERSION
docker pull mirrorgooglecontainers/kube-scheduler-amd64:$K8S_VERSION
docker pull mirrorgooglecontainers/kube-proxy-amd64:$K8S_VERSION
docker pull mirrorgooglecontainers/etcd-amd64:$ETCD_VERSION
docker pull mirrorgooglecontainers/pause:$PAUSE_VERSION
docker pull coredns/coredns:$DNS_VERSION

# 修改tag
docker tag mirrorgooglecontainers/kube-apiserver-amd64:$K8S_VERSION k8s.gcr.io/kube-apiserver:$K8S_VERSION
docker tag mirrorgooglecontainers/kube-controller-manager-amd64:$K8S_VERSION k8s.gcr.io/kube-controller-manager:$K8S_VERSION
docker tag mirrorgooglecontainers/kube-scheduler-amd64:$K8S_VERSION k8s.gcr.io/kube-scheduler:$K8S_VERSION
docker tag mirrorgooglecontainers/kube-proxy-amd64:$K8S_VERSION k8s.gcr.io/kube-proxy:$K8S_VERSION
docker tag mirrorgooglecontainers/etcd-amd64:$ETCD_VERSION k8s.gcr.io/etcd:$ETCD_VERSION
docker tag mirrorgooglecontainers/pause:$PAUSE_VERSION k8s.gcr.io/pause:$PAUSE_VERSION
docker tag coredns/coredns:$DNS_VERSION k8s.gcr.io/coredns:$DNS_VERSION

#刪除冗餘的images
docker rmi mirrorgooglecontainers/kube-apiserver-amd64:$K8S_VERSION
docker rmi mirrorgooglecontainers/kube-controller-manager-amd64:$K8S_VERSION
docker rmi mirrorgooglecontainers/kube-scheduler-amd64:$K8S_VERSION
docker rmi mirrorgooglecontainers/kube-proxy-amd64:$K8S_VERSION
docker rmi mirrorgooglecontainers/etcd-amd64:$ETCD_VERSION
docker rmi mirrorgooglecontainers/pause:$PAUSE_VERSION
docker rmi coredns/coredns:$DNS_VERSION
...
chmod +x initimage.sh

#執行此指令碼,開始匯入映象...
./initimage.sh

[[email protected] ]# docker images 
REPOSITORY                           TAG                 IMAGE ID            CREATED             SIZE
quay.io/coreos/flannel               v0.12.0-amd64       4e9f801d2217        4 weeks ago         52.8MB
k8s.gcr.io/kube-scheduler            v1.15.1             b0b3c4c404da        8 months ago        81.1MB
k8s.gcr.io/kube-controller-manager   v1.15.1             d75082f1d121        8 months ago        159MB
k8s.gcr.io/kube-proxy                v1.15.1             89a062da739d        8 months ago        82.4MB
k8s.gcr.io/kube-apiserver            v1.15.1             68c3eb07bfc3        8 months ago        207MB
k8s.gcr.io/coredns                   1.3.1               eb516548c180        15 months ago       40.3MB
k8s.gcr.io/etcd                      3.3.10              2c4adeb21b4f        16 months ago       258MB
k8s.gcr.io/pause                     3.1                 da86e6ba6ca1        2 years ago         742kB

(2)第二種可以在k8s配置檔案初始化後,也就是下邊的第七步之後,再執行匯入映象操作

7、初始化主節點

#列印k8s預設的初始化配置檔案至kubeadm-conf.yaml中
kubeadm config print init-defaults > kubeadm-conf.yaml

#修改kubeadm-conf.yaml
vim kubeadm-conf.yaml
...
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.152.53    #此處需要修改為你的真實ip
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: centos8
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers  #映象站點的更改,也可以不改
kind: ClusterConfiguration
kubernetesVersion: v1.15.1   #版本資訊修改一致
networking:
  dnsDomain: cluster.local
  podSubnet: "10.244.0.0/16"  #新增此配置,用於指定flannel的預設PodNet網段
  serviceSubnet: 10.96.0.0/12
scheduler: {}
---    #以下為新增配置,將預設的排程方式改為ipvs
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
  SupportIPVSProxyMode: true
mode: ipvs
...
# 提前拉取配置檔案種所需要的image,對應上邊第6步的(2)
kubeadm config images pull --config kubeadm-conf.yaml

#開始初始化
kubeadm init --config=kubeadm-config.yaml --experimental-upload-certs | tee kubeadm-init.log
報錯:
    [ERROR NumCPU]: the number of available CPUs 1 is less than the required 2
    很明顯,cpu數目小於所需的2,重新設定一個虛擬機器CPU個數就ok

初始化成功回顯:
To start using your cluster, you need to run the following as a regular user:
  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

kubeadm join 192.168.152.53:6443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:50ca5375950abfa05cd4bd37dfb60e9ccd078083aeca49fa8bb6275c13d2a2cd 

#根據回顯建立檔案及目錄
#目的為儲存 kubectl 與 api server 互動時的快取,互動過程為https協議
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config

#檢視當前擁有節點,可見狀態為NotReady,是因為沒有新增網路
[[email protected] .kube]# kubectl get node 
NAME      STATUS     ROLES    AGE    VERSION
centos8   NotReady   master   4m1s   v1.15.1

8、新增flannel網路

(1)第一種

mkdir -p install-k8s/plugin/flannel
mkdir -p install-k8s/core
cd install-k8s/core
mv /etc/kubernetes/kubeadm-init.log /etc/kubernetes/kubeadm-config.yaml ./
cd ../plugin/flannel
#下載flannel.yml
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
#執行yaml檔案
kubectl create -f kube-flannel.yml
#構建完成,檢視當前名稱空間為kube-system的pod狀態,-n 指定名稱空間
[[email protected] core]# kubectl get pod -n kube-system
NAME                              READY   STATUS             RESTARTS   AGE
coredns-5c98db65d4-5gwmj          0/1     CrashLoopBackOff   22         91m
coredns-5c98db65d4-c277w          0/1     CrashLoopBackOff   22         91m
etcd-centos8                      1/1     Running            0          90m
kube-apiserver-centos8            1/1     Running            0          90m
kube-controller-manager-centos8   1/1     Running            0          90m
kube-flannel-ds-amd64-ggghn       1/1     Running            0          8m45s
kube-proxy-gslw2                  1/1     Running            0          91m
kube-scheduler-centos8            1/1     Running            0          90m

#構建完成後,網絡卡介面會顯示flannel資訊
[[email protected] core]# ifconfig
flannel.1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1450
        inet 10.244.0.0  netmask 255.255.255.255  broadcast 0.0.0.0
        inet6 fe80::4019:beff:fe7c:5582  prefixlen 64  scopeid 0x20<link>
        ether 42:19:be:7c:55:82  txqueuelen 0  (Ethernet)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 13 overruns 0  carrier 0  collisions 0
 #網路載入成功,狀態變為Ready

 [[email protected] ~]# kubectl get node 
 NAME   STATUS ROLES AGE VERSION
 centos8 Ready master 53d v1.15.1

(2)第二種,當kubectl create -f kube-flannel.yml 時,映象匯入不成功,或者因為網路問題導致失敗時

# 修改主機hosts檔案
echo "199.232.28.133  raw.githubusercontent.com" >> /etc/hosts

# 然後下載flannel檔案
curl -o kube-flannel.yml   https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

# 編輯映象源,預設的映象地址我們修改一下。把yaml檔案中所有的quay.io 修改為quay-mirror.qiniu.com
sed -i 's/quay.io/quay-mirror.qiniu.com/g' kube-flannel.yml

# 最後再執行建立命令就可以啦
kubectl apply -f kube-flannel.yml

9、配置從節點與主節點關聯

# 直接將kubeadm-init.log的最後一句回顯在node節點執行即可
[[email protected] ~]# kubeadm join 192.168.152.53:6443 --token abcdef.0123456789abcdef \
>     --discovery-token-ca-cert-hash sha256:c291b4fc646b5925299f8cdf7fafe33ad9c0505a1609041d8c8214d104eb08da

[[email protected] ~]# kubeadm join 192.168.152.53:6443 --token abcdef.0123456789abcdef \
>     --discovery-token-ca-cert-hash sha256:c291b4fc646b5925299f8cdf7fafe33ad9c0505a1609041d8c8214d104eb08da

遇到的問題及啟動服務報錯:

1、W0412 04:57:19.803140 846 watcher.go:87] Error while processing event ("/sys/fs/cgroup/devices/libcontainer_30695_systemd_test_default.slice": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/devices/libcontainer_30695_systemd_test_default.slice: no such file or directory

## 問題的原因是docker與kubelet的cgroup組不一致
## 檢視kubeadm的配置檔案位置
[[email protected] ~]# rpm -ql kubeadm
/usr/bin/kubeadm
/usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf

## 修改配置檔案,將kubelet的cgroup改為systemd
vim /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf
...
Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml --cgroup-driver=systemd"
...

## 重新載入配置檔案及重啟服務
[[email protected] ~]# systemctl daemon-reload 
[[email protected] ~]# systemctl restart kubelet.service

2、node2節點的flannel一直報錯

## 還未找到根本原因,正在排錯,如有大佬路過,希望指點
kube-flannel-ds-amd64-b47l9       0/1     Init:ErrImagePull   0          3m50s

3、如果需要重新安裝master節點,執行:

# kubeadm reset
然後再進行重新安裝

4、如果需要重新安裝node節點,執行:

# kubectl drain <node name> --delete-local-data --force --ignore-daemonsets
# kubectl delete node <node name>
或
kubeadm reset #直接重新join

5、node節點加入master時報錯:error execution phase preflight: couldn't validate the identity of the API Server: abort connecting to API servers after timeout of 5m0s

原因:master的token過期了,需要重新建立

解決:

Master:
[[email protected] ~]# kubeadm token create
blopur.fn8gtr06gsjlq7yi

Node:
kubeadm join 192.168.152.53:6443 --token blopur.fn8gtr06gsjlq7yi --discovery-token-ca-cert-hash sha256:c291b4fc646b5925299f8cdf7fafe33ad9c0505a1609041d8c8214d104eb08da

6、node2節點加入master後,systemctl status kubelet.service報錯:Unable to update cni config: No networks found in /etc/cni/net.d

vim /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf
...
新增:
Environment="KUBELET_NETWORK_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/ --cni-bin-dir=/opt/cni/bin"
...

systemctl daemon-reload && systemctl restart kubelet.service

7、node2節點加入master後,systemctl status kubelet.service報錯:open /run/flannel/subnet.env: no such file or directory

## 將master節點的/run/flannel/subnet.env檔案拷貝到此處即可
[[email protected] flannel]# scp subnet.env kubenode2:/run/flannel/

## 重啟
systemctl restart kubelet.service  

補充:

node節點為了方便以後的部署和擴充套件,我採用Ansible自動化安裝docker及k8s。

ansible服務端為k8s的MASTER端:192.168.152.53

客戶端根據node節點需求來控制

#### 注意:所有的操作及檔案都在192.168.152.53中 ####
[[email protected] ansible]# pwd
/root/ansible

*** 先編輯好匯入匯出images的指令碼 ***
[[email protected] ansible]# cat saveImages.sh  #匯出映象指令碼
#!/usr/bin/env bash

IMAGESNAME=(`docker images | awk '/ago/{print $1}'`)
IMAGESTAG=(`docker images | awk '/ago/{print $2}'`)
IPADDR1='192.168.152.253'
IPADDR2='192.168.152.252'

if [[ -d /root/images ]];then
  for i in `seq 0 6`;do
    docker save > /root/images/${i}.tar.gz ${IMAGESNAME[$i]}:${IMAGESTAG[$i]}
    scp /root/images/${i}.tar.gz ${IPADDR1}:/root/
    scp /root/images/${i}.tar.gz ${IPADDR2}:/root/
  done
else
  mkdir -p /root/images
  for i in `seq 0 6`;do
    docker save > /root/images/${IMAGESNAME[$i]}.tar.gz ${IMAGESNAME[$i]}:${IMAGESTAG[$i]}
    scp /root/images/${i}.tar.gz ${IPADDR1}:/root/
    scp /root/images/${i}.tar.gz ${IPADDR2}:/root/
  done
fi

[[email protected] ansible]# cat loadImages.sh  #匯入映象指令碼
#!/usr/bin/env bash

for i in `seq 0 6`;do
  docker load < /root/${i}.tar.gz && rm -f /root/${i}.tar.gz
done

*** 在本地執行saveImages.sh ***
./saveImages.sh

*** 建立PlayBook ***
[[email protected] ansible]# vim kuber.yaml
...
---
- name: Install docker and k8s
  hosts: all
  tasks:
  - block:
    - name: Add repository
      shell: yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
    - name: install docker-ass
      yum:
        name:
          - yum-utils
          - device-mapper-persistent-data
          - lvm2
          - https://download.docker.com/linux/centos/7/x86_64/stable/Packages/containerd.io-1.2.6-3.3.el7.x86_64.rpm
          - docker-ce
        state: latest
    - name: mkdir docker.service.d
      file:
        path: '{{ item }}'
        state: directory
      loop:
      - /etc/docker
      - /etc/systemd/system/docker.service.d
    - name: Copy daemon.json
      copy:
        src: /etc/docker/daemon.json
        dest: /etc/docker/daemon.json
    - name: daemon-reload
      shell: systemctl daemon-reload
    - name: Start docker
      service:
        name: docker
        state: restarted
        enabled: yes
    rescue:
    - debug:
        msg: 'docker Installation failed!'
  - name: Copy using inline content
    copy:
      content: 
        '[kubernetes]
        name=Kubernetes
        baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
        enabled=1
        gpgcheck=0
        repo_gpgcheck=0
        gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg'
      dest: /etc/yum.repos.d/kubernetes.repo
  - name: Install k8s
    yum:
      name:
        - kubeadm-1.15.1
        - kubectl-1.15.1
        - kubelet-1.15.1
      state: present
  - name: Start kubelet
    service:
      name: kubelet
      state: started
      enabled: yes
  - name: Run script load Images
    script: /root/ansible/loadImages.sh
...

ansible-playbook kuber.yaml  #執行,完成,最後在node節點再執行以上第8步即可