1. 程式人生 > 其它 >Kubeadm 安裝 k8s 記錄

Kubeadm 安裝 k8s 記錄

目錄

本文環境

docker: 20.10.8
k8s: 1.21.0
kubeadm: 1.21.4
kubelet: 1.21.4
kubectl: 1.21.4
系統版本: CentOS Linux release 8.4.2105
網路外掛: flannel
兩臺虛擬機器:
	192.168.5.128  k8s-master
	192.168.5.129  k8s-node-1
配置均為 2核2G/20GB

前置工作

前置工作需要在所有的節點上執行

配置要求

CPU 推薦兩核或者更多
記憶體 不得小於 2G
MAC地址 保證唯一
交換分割槽 禁用
節點之間保持網路通暢

修改主機名

各個節點修改成自己的名字

hostnamectl set-hostname <name>

修改 hosts

配置各個節點的ip主機名對映

# vim /etc/hosts
192.168.5.128 k8s-master

關閉防火牆, 開啟核心網路引數

systemctl stop firewalld
systemctl disable firewalld

# vi /etc/sysctl.conf  # 編輯配置檔案
# 追加下面兩行
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1

sysctl -p  # 應用配置

關閉 SELinux

setenforce 0
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config

關閉 swap

註釋掉 /etc/fstab 檔案中包含 swap 哪一行, 如下檔案內容示例註釋

# vim /etc/fstab
# Created by anaconda on Wed Jan  6 20:22:34 2021
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/centos-root /                       ext4    defaults        1 1
UUID=b6a81016-1920-44c6-b713-2547ccbc9adf /boot                   ext4    defaults        1 2
/dev/mapper/centos-home /home                   ext4    defaults        1 2
# /dev/mapper/centos-swap swap                    swap    defaults        0 0

重啟

reboot

安裝 Docker

所有的節點都必須安裝docker且設定服務為開機自動啟動

# 移除機器上已經安裝的 docker
yum remove docker \
    docker-client \
    docker-client-latest \
    docker-common \
    docker-latest \
    docker-latest-logrotate \
    docker-logrotate \
    docker-engine
    
# 安裝依賴
yum install -y yum-utils \
    device-mapper-persistent-data \
    lvm2
# 新增映象源
yum-config-manager \
    --add-repo \
    https://mirrors.ustc.edu.cn/docker-ce/linux/centos/docker-ce.repo
# 安裝
yum install docker-ce docker-ce-cli containerd.io -y
# 安裝指定版本 docker, 安裝其他軟體也是一樣
yum list docker-ce --showduplicates | sort -r
# Last metadata expiration check: 0:32:36 ago on Mon 16 Aug 2021 02:15:13 PM CST.
# Installed Packages
# docker-ce.x86_64               3:20.10.8-3.el8                 docker-ce-stable
# docker-ce.x86_64               3:20.10.8-3.el8                 @docker-ce-stable
# docker-ce.x86_64               3:20.10.7-3.el8                 docker-ce-stable
# docker-ce.x86_64               3:20.10.6-3.el8                 docker-ce-stable
# docker-ce.x86_64               3:20.10.5-3.el8                 docker-ce-stable
# docker-ce.x86_64               3:20.10.4-3.el8                 docker-ce-stable
# docker-ce.x86_64               3:20.10.3-3.el8                 docker-ce-stable
# docker-ce.x86_64               3:20.10.2-3.el8                 docker-ce-stable
# docker-ce.x86_64               3:20.10.1-3.el8                 docker-ce-stable
# docker-ce.x86_64               3:20.10.0-3.el8                 docker-ce-stable
# docker-ce.x86_64               3:19.03.15-3.el8                docker-ce-stable
# docker-ce.x86_64               3:19.03.14-3.el8                docker-ce-stable
# docker-ce.x86_64               3:19.03.13-3.el8                docker-ce-stable
# Available Packages
# 選擇上面 列出的版本進行安裝,比如這裡安裝最新版的 20.10.8
yum install docker-ce-20.10.8-3.el8

# 啟動服務,並設定為開機自啟
systemctl start docker
systemctl enable docker
# 更換 docker 的映象源
# vim /etc/docker/daemon.json
{
    "registry-mirrors" : [
    "https://registry.docker-cn.com",
    "https://docker.mirrors.ustc.edu.cn",
    "http://hub-mirror.c.163.com",
    "https://cr.console.aliyun.com/"
  ]
}
# 如果當前使用者非 root 使用者,需要加入 docker 的使用者組
# 加入 docker 組後,需要重啟下系統,才能不使用 sudo 使用docker命令
sudo usermod -aG docker <your username>
# 重啟docker
sudo systemctl restart docker

安裝 kubeadm,kubelet,kubectl

這三個元件有版本相容性要求

具體版本要求參考官網

三個工具所有節點都需要安裝

新增映象源

# vim /etc/yum.repos.d/kubernetes.repo

# 內容, 注意 gpgkey 是一行, 兩個 https 中間使用空格拆分
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg

# 清理快取,重建
yum clean all&&yum makecache

開始安裝

當前時間: 2021年8月16

yum install -y kubelet-1.21.4 kubeadm-1.21.4 kubectl-1.21.4

準備初始化叢集<Master節點>

檢視預設的初始化配置檔案, 並匯出成檔案

kubeadm config print init-defaults > init-defaults.yaml

按照下方示例提示文字,進行修改

apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456780abcdef   # token 設定
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.5.128   # master 對外訪問ip
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: k8s-master   # master節點名稱, 此名稱須加入 hosts 檔案解析
  taints: null
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/k8sxio   # 修改映象源地址
kind: ClusterConfiguration
kubernetesVersion: 1.21.0   # 待安裝的 k8s 版本
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.244.0.0/16   # flannel 預設網段
scheduler: {}

檢視並下載映象

可以事先下載然後匯入到自己本地的docker

# 檢視需要下載那些映象
kubeadm config images list --config init-defaults.yaml
# registry.aliyuncs.com/k8sxio/kube-apiserver:v1.22.0
# registry.aliyuncs.com/k8sxio/kube-controller-manager:v1.22.0
# registry.aliyuncs.com/k8sxio/kube-scheduler:v1.22.0
# registry.aliyuncs.com/k8sxio/kube-proxy:v1.22.0
# registry.aliyuncs.com/k8sxio/pause:3.5
# registry.aliyuncs.com/k8sxio/etcd:3.5.0-0
# registry.aliyuncs.com/k8sxio/coredns:v1.8.4

# 開始下載映象,防止直接安裝因為某個映象下載失敗,導致整體安裝失敗
kubeadm config images pull --config init-defaults.yaml
# 如果發生下面這種拉取映象錯誤,嘗試使用docker直接搜尋映象,然後使用 docker tag 重新打標即可
# [root@k8s-master k8s-install-file]# kubeadm config images pull --config init-defaults.yaml
# [config/images] Pulled registry.aliyuncs.com/k8sxio/kube-apiserver:v1.21.0
# [config/images] Pulled registry.aliyuncs.com/k8sxio/kube-controller-manager:v1.21.0
# [config/images] Pulled registry.aliyuncs.com/k8sxio/kube-scheduler:v1.21.0
# [config/images] Pulled registry.aliyuncs.com/k8sxio/kube-proxy:v1.21.0
# [config/images] Pulled registry.aliyuncs.com/k8sxio/pause:3.4.1
# [config/images] Pulled registry.aliyuncs.com/k8sxio/etcd:3.4.13-0
# failed to pull image "registry.aliyuncs.com/k8sxio/coredns:v1.8.0": output: Error response from daemon: manifest for registry.aliyuncs.com/k8sxio/coredns:v1.8.0 not found: manifest unknown: manifest unknown
# , error: exit status 1
# To see the stack trace of this error execute with --v=5 or higher
# 這裡搜尋映象
# [root@k8s-master k8s-install-file]# docker search coredns:v1.8.0
# NAME                       DESCRIPTION                              STARS     OFFICIAL   AUTOMATED
# louwy001/coredns-coredns   k8s.gcr.io/coredns/coredns:v1.8.0        1
# ninokop/coredns            k8s.gcr.io/coredns/coredns:v1.8.0        0
# xwjh/coredns               from k8s.gcr.io/coredns/coredns:v1.8.0   0
# hhhlhh/coredns-coredns     FROM k8s.gcr.io/coredns/coredns:v1.8.0   0
# suxishuo/coredns           k8s.gcr.io/coredns/coredns:v1.8.0        0
# fengbb/coredns             k8s.gcr.io/coredns/coredns:v1.8.0        0
# 拉取映象
# [root@k8s-master k8s-install-file]# docker pull louwy001/coredns-coredns:v1.8.0
# v1.8.0: Pulling from louwy001/coredns-coredns
# c6568d217a00: Pull complete
# 5984b6d55edf: Pull complete
# Digest: sha256:10ecc12177735e5a6fd6fa0127202776128d860ed7ab0341780ddaeb1f6dfe61
# Status: Downloaded newer image for louwy001/coredns-coredns:v1.8.0
# docker.io/louwy001/coredns-coredns:v1.8.0
# 重新打標,並取消多餘的tag名稱
# [root@k8s-master k8s-install-file]# docker tag louwy001/coredns-coredns:v1.8.0 registry.aliyuncs.com/k8sxio/coredns:v1.8.0
# [root@k8s-master k8s-install-file]#
# [root@k8s-master k8s-install-file]# docker rmi louwy001/coredns-coredns:v1.8.0
# Untagged: louwy001/coredns-coredns:v1.8.0
# Untagged: louwy001/coredns-coredns@sha256:10ecc12177735e5a6fd6fa0127202776128d860ed7ab0341780ddaeb1f6dfe61
# [root@k8s-master k8s-install-file]#

解除安裝叢集

如果 初始化叢集失敗了,或者引數錯誤,直接執行下面的命令還原設定

kubeadm reset
iptables -F 
iptables -X
ipvsadm -C
rm -rf /etc/cni/net.d
rm -rf $HOME/.kube/config 

開始初始化

kubeadm init --config init-defaults.yaml

初始化完成後, 根據提示執行初始設定, 並記錄下 加入叢集的命令和引數

# 叢集配置檔案
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# 開機自啟 kubelet
systemctl enable kubelet.service

# 加入叢集
kubeadm join 192.168.5.128:6443 --token abcdef.0123456780abcdef \
	--discovery-token-ca-cert-hash sha256:d27cf2fd4a45c3ce8c59cdf0163edbf7cd4bc55a994a34404c0e175a47770798

其他節點接入叢集

確認安裝好 kubeadm , kubelet, kubectl

在節點機器上執行上面提示的 加入叢集命令, 並設定kubelet為開機自啟

master節點上拷貝叢集配置檔案給node, 這樣 node 才能正常使用kubectl命令,也可以不操作這一步

systemctl enable kubelet.service
scp /etc/kubernetes/admin.conf k8s-node-1:~/.kube/config

配置 Flannel 網路

安裝 flannel 保證各個節點的pod之間網路通訊

修改叢集 kube-controller-manager.yaml 檔案,追加網路引數

vim /etc/kubernetes/manifests/kube-controller-manager.yaml
# 在 command 下面追加兩行
--allocate-node-cidrs=true
--cluster-cidr=10.244.0.0/16

# 重啟 kubelet
systemctl restart kubelet

如果是多網絡卡的機器,可能需要指定下網絡卡, 參考這個大佬的文章 文章 "安裝 Pod Network" 中提到的 :

"另外需要注意的是如果你的節點有多個網絡卡的話,需要在 kube-flannel.yml 中使用--iface引數指定叢集主機內網網絡卡的名稱,否則可能會出現 dns 無法解析。"

我猜應該是在配置檔案下面的位置加, 注意Kindmetadata中的資訊

apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      ....
      ...
      ..
      .
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.14.0
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr     
        - --iface=ens33    # 這裡追加引數   <----------------
        resources:
          requests:
			...
			....
			......

獲取flannel部署檔案,並下載映象

curl https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml > kube-flannel.yml

# 檢視需要的映象
cat kube-flannel.yml | grep image
# image: quay.io/coreos/flannel:v0.14.0
# image: quay.io/coreos/flannel:v0.14.0

# 直接下載如果失敗的話,就用docker搜尋下別人上傳的映象
docker search flannel:v0.14.0
# NAME           DESCRIPTION                           STARS     OFFICIAL   AUTOMATED
# xwjh/flannel   from quay.io/coreos/flannel:v0.14.0   1

# 下載映象並重新進行打tag, 完事後刪除多餘的 tag
docker pull xwjh/flannel:v0.14.0
docker tag xwjh/flannel:v0.14.0 quay.io/coreos/flannel:v0.14.0
docker rmi xwjh/flannel:v0.14.0

# 應用配置
kubectl create -f kube-flannel.yml

# [root@k8s-master k8s-install-file]# kubectl create -f kube-flannel.yml
# Warning: policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+
# podsecuritypolicy.policy/psp.flannel.unprivileged created
# clusterrole.rbac.authorization.k8s.io/flannel created
# clusterrolebinding.rbac.authorization.k8s.io/flannel created
# serviceaccount/flannel created
# configmap/kube-flannel-cfg created
# daemonset.apps/kube-flannel-ds created
# [root@k8s-master k8s-install-file]#

驗證&其他設定

至此k8s簡單搭建版到此結束, 後續多個節點,多master之類的查資料設定加入叢集即可

驗證節點狀態

執行kubectl get node檢視叢集節點狀態, 如果你之前沒裝 flannel直接執行會看到如下資訊

[root@k8s-master ~]# kubectl get node
NAME         STATUS     ROLES                  AGE   VERSION
k8s-master   NotReady   control-plane,master   21h   v1.21.4
k8s-node-1   NotReady   <none>                 21h   v1.21.4
[root@k8s-master ~]#

當你flannel正確安裝後,會變成如下樣式, 兩個節點都會變成 Ready狀態

[root@k8s-master k8s-install-file]# kubectl get node
NAME         STATUS   ROLES                  AGE   VERSION
k8s-master   Ready    control-plane,master   22h   v1.21.4
k8s-node-1   Ready    <none>                 21h   v1.21.4
[root@k8s-master k8s-install-file]#

驗證 coredns 狀態

安裝完成後檢視pod 狀態可能會出現coredns錯誤,無法啟動:

[root@k8s-master k8s-install-file]# kubectl  get pod --all-namespaces
NAMESPACE     NAME                                 READY   STATUS             RESTARTS   AGE
kube-system   coredns-67574f65b-fh2kq              0/1     ImagePullBackOff   0          22h
kube-system   coredns-67574f65b-qspjm              0/1     ImagePullBackOff   0          22h
kube-system   etcd-k8s-master                      1/1     Running            1          22h
kube-system   kube-apiserver-k8s-master            1/1     Running            1          22h
kube-system   kube-controller-manager-k8s-master   1/1     Running            1          5h44m
kube-system   kube-flannel-ds-h5fd6                1/1     Running            0          7m33s
kube-system   kube-flannel-ds-z945p                1/1     Running            0          7m33s
kube-system   kube-proxy-rmwcx                     1/1     Running            1          21h
kube-system   kube-proxy-vzmjw                     1/1     Running            1          22h
kube-system   kube-scheduler-k8s-master            1/1     Running            1          22h
[root@k8s-master k8s-install-file]#

我們檢視下pod的錯誤資訊

root@k8s-master k8s-install-file]# kubectl -n kube-system describe pod coredns-67574f65b-fh2kq
Name:                 coredns-67574f65b-fh2kq
Namespace:            kube-system
Priority:             2000000000
Priority Class Name:  system-cluster-critical
Node:                 k8s-node-1/192.168.5.129
Start Time:           Tue, 17 Aug 2021 14:54:36 +0800
Labels:               k8s-app=kube-dns
                      pod-template-hash=67574f65b
Annotations:          <none>
Status:               Pending
IP:                   10.244.1.3
IPs:
  IP:           10.244.1.3
Controlled By:  ReplicaSet/coredns-67574f65b
Containers:
  coredns:
    Container ID:
    Image:         registry.aliyuncs.com/k8sxio/coredns:v1.8.0
    Image ID:
    Ports:         53/UDP, 53/TCP, 9153/TCP
    Host Ports:    0/UDP, 0/TCP, 0/TCP
    Args:
      -conf
      /etc/coredns/Corefile
    State:          Waiting
      Reason:       ImagePullBackOff
    Ready:          False
    Restart Count:  0
    Limits:
      memory:  170Mi
    Requests:
      cpu:        100m
      memory:     70Mi
    Liveness:     http-get http://:8080/health delay=60s timeout=5s period=10s #success=1 #failure=5
    Readiness:    http-get http://:8181/ready delay=0s timeout=1s period=10s #success=1 #failure=3
    Environment:  <none>
    Mounts:
      /etc/coredns from config-volume (ro)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-trjcg (ro)
Conditions:
  Type              Status
  Initialized       True
  Ready             False
  ContainersReady   False
  PodScheduled      True
Volumes:
  config-volume:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      coredns
    Optional:  false
  kube-api-access-trjcg:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
QoS Class:                   Burstable
Node-Selectors:              kubernetes.io/os=linux
Tolerations:                 CriticalAddonsOnly op=Exists
                             node-role.kubernetes.io/control-plane:NoSchedule
                             node-role.kubernetes.io/master:NoSchedule
                             node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
  Type     Reason            Age                     From               Message
  ----     ------            ----                    ----               -------
  Warning  FailedScheduling  4h53m (x1020 over 21h)  default-scheduler  0/2 nodes are available: 2 node(s) had taint {node.kubernetes.io/not-ready:}, that the pod didn't tolerate.
  Warning  FailedScheduling  8m6s (x9 over 14m)      default-scheduler  0/2 nodes are available: 2 node(s) had taint {node.kubernetes.io/not-ready:}, that the pod didn't tolerate.
  Normal   Scheduled         7m56s                   default-scheduler  Successfully assigned kube-system/coredns-67574f65b-fh2kq to k8s-node-1
  Normal   Pulling           6m27s (x4 over 7m54s)   kubelet            Pulling image "registry.aliyuncs.com/k8sxio/coredns:v1.8.0"
  Warning  Failed            6m26s (x4 over 7m53s)   kubelet            Failed to pull image "registry.aliyuncs.com/k8sxio/coredns:v1.8.0": rpc error: code = Unknown desc = Error response from daemon: manifest for registry.aliyuncs.com/k8sxio/coredns:v1.8.0 not found: manifest unknown: manifestunknown
  Warning  Failed            6m26s (x4 over 7m53s)   kubelet            Error: ErrImagePull
  Warning  Failed            6m15s (x6 over 7m53s)   kubelet            Error: ImagePullBackOff
  Normal   BackOff           2m45s (x21 over 7m53s)  kubelet            Back-off pulling image "registry.aliyuncs.com/k8sxio/coredns:v1.8.0"

發現錯誤是拉取映象失敗, 但是master節點確實存在這個映象, 那這個指的就是 node節點上缺少映象,我們匯出master上的registry.aliyuncs.com/k8sxio/coredns:v1.8.0拷貝給node節點匯入即可

docker save -o coredns.zip registry.aliyuncs.com/k8sxio/coredns:v1.8.0
scp coredns.zip k8s-node-1:~

# node 節點
docker load -i coredns.zip

重新檢視狀態

[root@k8s-master k8s-install-file]# kubectl -n kube-system get pods
NAME                                 READY   STATUS    RESTARTS   AGE
coredns-67574f65b-fh2kq              1/1     Running   0          22h
coredns-67574f65b-qspjm              1/1     Running   0          22h
etcd-k8s-master                      1/1     Running   1          22h
kube-apiserver-k8s-master            1/1     Running   1          22h
kube-controller-manager-k8s-master   1/1     Running   1          5h58m
kube-flannel-ds-h5fd6                1/1     Running   0          21m
kube-flannel-ds-z945p                1/1     Running   0          21m
kube-proxy-rmwcx                     1/1     Running   1          22h
kube-proxy-vzmjw                     1/1     Running   1          22h
kube-scheduler-k8s-master            1/1     Running   1          22h
[root@k8s-master k8s-install-file]#

node 節點角色為 none

檢視節點詳細資訊, 可以看到node節點為none角色, 我們手動指定節點為node

[root@k8s-master k8s-install-file]# kubectl get node -o wide
NAME         STATUS   ROLES                  AGE   VERSION   INTERNAL-IP     EXTERNAL-IP   OS-IMAGE         KERNEL-VERSION                 CONTAINER-RUNTIME
k8s-master   Ready    control-plane,master   22h   v1.21.4   192.168.5.128   <none>        CentOS Linux 8   4.18.0-305.12.1.el8_4.x86_64   docker://20.10.8
k8s-node-1   Ready    <none>                 22h   v1.21.4   192.168.5.129   <none>        CentOS Linux 8   4.18.0-305.12.1.el8_4.x86_64   docker://20.10.8
[root@k8s-master k8s-install-file]

執行下面的命令修改節點角色

kubectl label node <node name> node-role.kubernetes.io/node=
[root@k8s-master k8s-install-file]# kubectl label node k8s-node-1 node-role.kubernetes.io/node=
node/k8s-node-1 labeled
[root@k8s-master k8s-install-file]#
[root@k8s-master k8s-install-file]# kubectl get node
NAME         STATUS   ROLES                  AGE   VERSION
k8s-master   Ready    control-plane,master   22h   v1.21.4
k8s-node-1   Ready    node                   22h   v1.21.4
[root@k8s-master k8s-install-file]#

設定節點角色

# 設定節點為 master
kubectl label node <node name> node-role.kubernetes.io/master=

# 設定 test2 為 node 角色
kubectl label node <node name> node-role.kubernetes.io/node=

# 設定 master 一般情況下不接受負載
kubectl taint node <node name> node-role.kubernetes.io/master=true:NoSchedule

# 設定 master 不執行pod
kubectl taint node <node name> node-role.kubernetes.io/master=:NoSchedule

# 刪除節點標籤<角色>, 只要修改 = 號為 - 號即可
kubectl label node k8s-node-1 node-role.kubernetes.io/node-

所有節點都允許執行pod

官網手冊

kubectl taint nodes --all node-role.kubernetes.io/master-

# 多次執行後是這個提示, 正常可以參考官網提示
# [root@k8s-master k8s-install-file]# kubectl taint nodes --all node-role.kubernetes.io/master-
# taint "node-role.kubernetes.io/master" not found
# taint "node-role.kubernetes.io/master" not found
# [root@k8s-master k8s-install-file]#

修改 NodePort 埠範圍

預設埠號範圍是 30000-32767

修改後等一會兒就可以生效

# vim /etc/kubernetes/manifests/kube-apiserver.yaml
# 在 command 末尾追加下面的引數,設定範圍為 0-65535 全埠
- --service-node-port-range=0-65535