1. 程式人生 > 其它 >k8s叢集搭建詳細教程【1master2node】

k8s叢集搭建詳細教程【1master2node】

一、叢集分類

 一主多從:一臺master 多個多個Node節點 
 多主多從:多個master多個node節點

二、安裝方式

minikube:快速安裝搭建單節點k8s工具
kubeadm:快速搭建k8s叢集的工具---主要使用的方式
二進位制包安裝:從官網下載每個元件的二進位制包進行安裝

三、安裝規劃

192.168.2.109  k8s-master-109
192.168.2.110  k8s-node-110
192.168.2.111  k8s-node-111

四、環境搭建

docker : 20.10.10
kubeadm: 1.23.1
kubelet: 1.23.1
kubectl: 1.23.1
1、環境初始化【所有節點執行】
(1)hosts檔案修改
修改主機hosts檔案
vim /etc/hosts
192.168.2.109  k8s-master-109
192.168.2.110  k8s-node-110
192.168.2.111  k8s-node-111
(2)停止firewalld和iptables
#關閉firewalld
systemctl stop firewalld
systemctl disable firewalld
#關閉iptables
systemctl stop iptables
systemctl disable iptables
(3)禁用selinux
setenforce 0
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
(4)配置時間同步
yum install ntpdate -y 
echo "* */1 * * * /usr/sbin/ntpdate ntp.aliyun.com" >> /var/spool/cron/root
(5)禁用swap分割槽
swapoff -a
#編輯/etc/fstab檔案,註釋swap分割槽
vim /etc/fstab
#/dev/mapper/centos-swap swap                    swap    defaults        0 0
(6)修改核心引數
#修改核心引數 新增網橋過濾和地址轉發功能
echo "net.ipv4.ip_forward = 1" >> /etc/sysctl.conf
echo "net.bridge.bridge-nf-call-iptables = 1" >> /etc/sysctl.conf
echo "net.bridge.bridge-nf-call-ip6tables = 1" >> /etc/sysctl.conf
#生效核心引數
sysctl -p
#載入網橋過濾模組
[root@k8s-master-109 ~]# modprobe br_netfilter
#驗證模組新增是否正常
[root@k8s-master-109 ~]# lsmod | grep br_netfilter
br_netfilter           28672  0
(7)配置ipvs功能
在k8s中有兩種代理模型,一種是基於iptables,另外一種是基於ipvs,兩者相比較ipvs效能較高

#安裝ipset和ipvsadm
yum install ipset ipvsadm -y

#新增相關模組
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4   #低版本核心
modprobe -- nf_conntrack  #高版本核心
(8)重啟伺服器
reboot
2、docker安裝【所有節點執行】
線上安裝:
yum remove docker  docker-common docker-selinux docker-engine

yum install -y yum-utils device-mapper-persistent-data lvm2

yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

yum -y install docker-ce

#修改配置檔案
vim /etc/docker/daemon.json
{
  "registry-mirrors": ["https://docker.mirrors.ustc.edu.cn"],
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2"
}

#載入配置資訊
systemctl daemon-reload
systemctl start docker
systemctl enable docker
3、安裝k8s元件【所有節點執行】
#新增軟體原始檔
[root@k8s-master-109 ~]# vim /etc/yum.repos.d/k8s.repo 
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg

#安裝k8s組建 kubeadm kubectl  kubelet 
[root@k8s-master-109 ~]# yum install kubeadm kubectl kubelet

#配置kubelet的cgroup
#vim /etc/sysconfig/kubelet

KUBELET_CGROUP_ARGS="--cgroup-driver=systemd"
KUBE_PROXY_MODE="ipvs"

#設定kubelet開機自啟動

systemctl enable kubelet
4、下載叢集映象【所有節點執行】
#根據相關命令輸出進行下載對應檔案
[root@k8s-master-109 ~]# kubeadm config images list  
k8s.gcr.io/kube-apiserver:v1.23.1
k8s.gcr.io/kube-controller-manager:v1.23.1
k8s.gcr.io/kube-scheduler:v1.23.1
k8s.gcr.io/kube-proxy:v1.23.1
k8s.gcr.io/pause:3.6
k8s.gcr.io/etcd:3.5.1-0
k8s.gcr.io/coredns/coredns:v1.8.6
[root@k8s-master-109 ~]# 

#下載叢集所需的阿里雲映象檔案
docker pull registry.aliyuncs.com/google_containers/kube-apiserver:v1.23.1
docker pull registry.aliyuncs.com/google_containers/kube-controller-manager:v1.23.1
docker pull registry.aliyuncs.com/google_containers/kube-scheduler:v1.23.1
docker pull registry.aliyuncs.com/google_containers/kube-proxy:v1.23.1
docker pull registry.aliyuncs.com/google_containers/pause:3.6
docker pull registry.aliyuncs.com/google_containers/etcd:3.5.1-0
docker pull registry.aliyuncs.com/google_containers/coredns:1.8.6

#修改映象檔案標籤為對應標籤
docker tag registry.aliyuncs.com/google_containers/kube-apiserver:v1.23.1   k8s.gcr.io/kube-apiserver:v1.23.1 
docker tag registry.aliyuncs.com/google_containers/kube-controller-manager:v1.23.1    k8s.gcr.io/kube-controller-manager:v1.23.1
docker tag registry.aliyuncs.com/google_containers/kube-scheduler:v1.23.1    k8s.gcr.io/kube-scheduler:v1.23.1
docker tag registry.aliyuncs.com/google_containers/kube-proxy:v1.23.1    k8s.gcr.io/kube-proxy:v1.23.1
docker tag registry.aliyuncs.com/google_containers/pause:3.6    k8s.gcr.io/pause:3.6
docker tag registry.aliyuncs.com/google_containers/etcd:3.5.1-0   k8s.gcr.io/etcd:3.5.1-0
docker tag registry.aliyuncs.com/google_containers/coredns:1.8.6 k8s.gcr.io/coredns/coredns:v1.8.6 

#刪除舊映象檔案

docker rmi registry.aliyuncs.com/google_containers/kube-apiserver:v1.23.1 
docker rmi registry.aliyuncs.com/google_containers/kube-proxy:v1.23.1
docker rmi registry.aliyuncs.com/google_containers/kube-scheduler:v1.23.1 
docker rmi registry.aliyuncs.com/google_containers/kube-controller-manager:v1.23.1
docker rmi registry.aliyuncs.com/google_containers/etcd:3.5.1-0  
docker rmi registry.aliyuncs.com/google_containers/coredns:1.8.6
docker rmi registry.aliyuncs.com/google_containers/pause:3.6
5、叢集初始化
【master節點執行】
#初始化叢集
kubeadm init --kubernetes-version=v1.23.1 --pod-network-cidr=172.26.0.0/16 --service-cidr=10.126.0.0/16 --apiserver-advertise-address=192.168.2.109 

#初始化集群后需要記錄返回的新增叢集連結,便於叢集新增
kubeadm join 192.168.2.109:6443 --token e9cl34.w1nh9tl05pwhh9w3 \
        --discovery-token-ca-cert-hash sha256:649b9f114475b252d16c68ff3558f2a12e42080e187c7b072d19aaab0c84b958 
#如果初始化存在錯誤,需要使用kubeadm reset 進行重置
#建立必要檔案  kubectl 讀取該檔案
[root@k8s-master-109 ~]# mkdir -p $HOME/.kube
[root@k8s-master-109 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s-master-109 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
【node節點執行】
#登陸node節點執行如下命令,將node節點新增至叢集
kubeadm join 192.168.2.109:6443 --token e9cl34.w1nh9tl05pwhh9w3 \
        --discovery-token-ca-cert-hash sha256:649b9f114475b252d16c68ff3558f2a12e42080e187c7b072d19aaab0c84b958
6、網路外掛安裝【master節點安裝】
#k8s支援多種網路外掛,如fiannel/calico/canal等,本次使用calico外掛進行安裝
#下載網路外掛的配置檔案
wget --no-check-certificate https://docs.projectcalico.org/manifests/calico.yaml

#修改裡面定義Pod網路(CALICO_IPV4POOL_CIDR)那行,該值與Kubeadm init指定的--pod-network-cidr需一致
[root@k8s-master-109 ~]# vim calico.yaml

- name: CALICO_IPV4POOL_CIDR
value: "172.26.0.0/16"
#安裝網路外掛
[root@k8s-master-109 ~]# kubectl apply -f calico.yaml

#狀態檢查  均為running時即可使用
kubectl get pods -n kube-system
kubectl get pod -o wide -nkube-system
kubectl get pods --all-namespaces
7、開啟ipvs
# 開啟ipvs
[root@k8s-master-109 ~]# kubectl edit cm kube-proxy -n kube-system
# 修改mode: "ipvs"
[root@k8s-master-109 ~]# kubectl delete pod -l k8s-app=kube-proxy -n kube-system