1. 程式人生 > 其它 >k8s叢集安裝v1

k8s叢集安裝v1

k8s安裝v1

k8s安裝

主機 IP 備註
node81 192.168.0.81 master
node82 192.168.0.82 node
node83 192.168.0.83 node

docker資訊

配置資訊 備註
系統版本 CentOS Linux release 7.6.1810
Docker版本 20.10
Pod網段 172.16.0.0/12
Service網段 10.96.0.0/12
#命令列優化:
echo "export PS1='\[\033[01;31m\]\u\[\033[00m\]@\[\033[01;32m\]\h\[\033[00m\][\[\033[01;33m\]\t\[\033[00m\]]:\[\033[01;34m\]\w\[\033[00m\]$ '" >>/etc/profile
source /etc/profile

#歷史記錄優化: 
export HISTTIMEFORMAT='%F %T ' 
echo "export HISTTIMEFORMAT='%F %T '" >>/etc/profile 
source /etc/profile 

2、準備工作

設定主機名
hostnamectl set-hostname node81
修改/etc/hosts
root@node81[15:56:41]:~# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6

192.168.0.81 node81
192.168.0.82 node82
192.168.0.83 node83

節點源配置

cd /etc/yum.repos.d/
curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
yum -y install epel-release yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

cat <<EOF > /etc/yum.repos.d/kubernete.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

sed -i -e '/mirrors.cloud.aliyuncs.com/d' -e '/mirrors.aliyuncs.com/d' /etc/yum.repos.d/CentOS-Base.repo

yum clean all
yum install wget jq psmisc vim net-tools telnet yum-utils device-mapper-persistent-data lvm2 git -y

節點優化

#所有節點關閉防火牆、selinux、dnsmasq、swap。伺服器配置如下:
systemctl disable --now firewalld 
systemctl disable --now dnsmasq
systemctl disable --now NetworkManager

setenforce 0
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/sysconfig/selinux
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config

#關閉swap分割槽
swapoff -a && sysctl -w vm.swappiness=0
sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab

#安裝ntpdate
rpm -ivh http://mirrors.wlnmp.com/centos/wlnmp-release-centos.noarch.rpm

#新增計劃任務
yum install -y ntpdate
echo '*/5 * * * * ntpdate cn.pool.ntp.org' >>/var/spool/cron/root
systemctl restart crond
ntpdate time2.aliyun.com

#所有節點同步時間。時間同步配置如下:
ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
echo 'Asia/Shanghai' >/etc/timezone
ntpdate time2.aliyun.com
# 加入到crontab
*/5 * * * * /usr/sbin/ntpdate time2.aliyun.com

#limit優化
ulimit -SHn 65535

cat <<EOF >> /etc/security/limits.conf
* soft nofile 655360
* hard nofile 131072
* soft nproc 655350
* hard nproc 655350
* soft memlock unlimited
* hard memlock unlimited
EOF

免密配置

cd /root
ssh-keygen -t rsa
for i in node81 node82 node83;do ssh-copy-id -p2200 -i .ssh/id_rsa.pub $i;done

升級核心

#下載安裝所有的原始碼檔案
cd /root/ 
git clone https://github.com/dotbalo/k8s-ha-install.git

#如果無法下載請使用下面的重試:
git clone https://gitee.com/dukuan/k8s-ha-install.git

# 在node81節點下載核心,並快取 [所有機器升級]
#CentOS7需要升級系統,CentOS8可以按需升級系統 
yum update -y --exclude=kernel* && reboot

# 核心配置
#CentOS7 需要升級核心至4.18+,本地升級的版本為4.19

cd /root
wget http://193.49.22.109/elrepo/kernel/el7/x86_64/RPMS/kernel-ml-devel-4.19.12-1.el7.elrepo.x86_64.rpm
wget http://193.49.22.109/elrepo/kernel/el7/x86_64/RPMS/kernel-ml-4.19.12-1.el7.elrepo.x86_64.rpm

#從node81節點傳到其他節點:
for i in node82 node83;do scp -p2200 kernel-ml-4.19.12-1.el7.elrepo.x86_64.rpm kernel-ml-devel-4.19.12-1.el7.elrepo.x86_64.rpm $i:/root/ ; done
 
#所有節點安裝核心
cd /root && yum localinstall -y kernel-ml*
 
#所有節點更改核心啟動順序
grub2-set-default 0 && grub2-mkconfig -o /etc/grub2.cfg
grubby --args="user_namespace.enable=1" --update-kernel="$(grubby --default-kernel)"
 
#檢查預設核心是不是4.19
root@node81[14:51:55]:~# grubby --default-kernel
/boot/vmlinuz-4.19.12-1.el7.elrepo.x86_64

#所有節點重啟,然後檢查核心是不是4.19 
root@node81[14:51:55]:~# grubby --default-kernel
/boot/vmlinuz-4.19.12-1.el7.elrepo.x86_64

#如果是,則說明核心配置正確

所有節點安裝ipvsadm

yum install ipvsadm ipset sysstat conntrack libseccomp -y 
 
 
#所有節點配置ipvs模組,在核心4.19+版本nf_conntrack_ipv4已經改為nf_conntrack, 4.18以下使用nf_conntrack_ipv4即可: 
 
modprobe -- ip_vs 
modprobe -- ip_vs_rr 
modprobe -- ip_vs_wrr 
modprobe -- ip_vs_sh 
modprobe -- nf_conntrack 

 
#建立 /etc/modules-load.d/ipvs.conf 並加入以下內容: 
cat >/etc/modules-load.d/ipvs.conf <<EOF 
ip_vs 
ip_vs_lc 
ip_vs_wlc 
ip_vs_rr 
ip_vs_wrr 
ip_vs_lblc 
ip_vs_lblcr 
ip_vs_dh 
ip_vs_sh 
ip_vs_fo 
ip_vs_nq 
ip_vs_sed 
ip_vs_ftp 
ip_vs_sh 
nf_conntrack 
ip_tables 
ip_set 
xt_set 
ipt_set 
ipt_rpfilter 
ipt_REJECT 
ipip 
EOF

#設定為開機啟動
systemctl enable --now systemd-modules-load.service

k8s核心優化

#開啟一些k8s叢集中必須的核心引數,所有節點配置k8s核心:
cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720

net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
EOF
sysctl --system

#所有節點配置完核心後,重啟伺服器,保證重啟後核心依舊載入

reboot
lsmod | grep --color=auto -e ip_vs -e nf_conntrack


#重啟後結果如下代表正常:
root@node81[15:07:44]:~# lsmod | grep --color=auto -e ip_vs -e nf_conntrack
ip_vs_ftp              16384  0  
nf_nat                 32768  1 ip_vs_ftp 
ip_vs_sed              16384  0  
ip_vs_nq               16384  0  
ip_vs_fo               16384  0  
ip_vs_sh               16384  0  
ip_vs_dh               16384  0  
ip_vs_lblcr            16384  0  
ip_vs_lblc             16384  0  
ip_vs_wrr              16384  0  
ip_vs_rr               16384  0  
ip_vs_wlc              16384  0  
ip_vs_lc               16384  0  
ip_vs                 151552  24 ip_vs_wlc,ip_vs_rr,ip_vs_dh,ip_vs_lblcr,ip_vs_sh,ip_vs_fo,ip_vs_nq,ip_vs_lblc,ip_vs_wrr,ip_vs_lc,ip_vs_sed,ip_vs_ftp 
nf_conntrack          143360  2 nf_nat,ip_vs 
nf_defrag_ipv6         20480  1 nf_conntrack 
nf_defrag_ipv4         16384  1 nf_conntrack 
libcrc32c              16384  4 nf_conntrack,nf_nat,xfs,ip_vs 

安裝docker

yum list docker-ce --showduplicates | sort -r
yum -y install docker-ce-20.10.10-3.el7  docker-ce-cli-20.10.10

mkdir -p /etc/systemd/system/docker.service.d
mkdir -p /data/docker
ln -s /data/docker /var/lib/docker

mkdir /etc/docker

cat > /etc/docker/daemon.json << \EOF
{
  "registry-mirrors": ["http://hub-mirror.c.163.com","https://ajvcw8qn.mirror.aliyuncs.com"],
  "insecure-registries": ["http://192.168.0.16:30002"],
  "exec-opts": ["native.cgroupdriver=systemd"] 
}
EOF

systemctl daemon-reload
systemctl start docker
systemctl enable docker

安裝kubeadm

yum list kubeadm.x86_64 --showduplicates | sort -r


#所有節點安裝最新版本kubeadm: 
yum install kubeadm-1.21* kubelet-1.21* kubectl-1.21* -y

#預設配置的pause映象使用gcr.io倉庫,國內可能無法訪問,所以這裡配置Kubelet使用阿里雲的pause映象
cat > /etc/sysconfig/kubelet << \EOF
KUBELET_EXTRA_ARGS="--cgroup-driver=systemd --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.2"
EOF

#設定開機啟動
systemctl daemon-reload
systemctl enable --now kubelet

kubernetes叢集初始化

建立k8s 操作目錄

mkdir /data/k8s
cd /data/k8s
Master節點建立 kubeadm-config.yaml 配置檔案如下:

#檢視辦法:
root@node81[15:16:49]:/data/k8s# kubectl version
Client Version: version.Info{Major:"1", Minor:"21", GitVersion:"v1.21.11", GitCommit:"38d3c1f3d5306401bcf39a71bad3b5a5106033d7", GitTreeState:"clean", BuildDate:"2022-03-16T14:08:11Z", GoVersion:"go1.16.15", Compiler:"gc", Platform:"linux/amd64"}


因為安裝的版本是 GitVersion:"v1.21.11"
下面的yaml檔案中的對應版本需要改為  v1.21.11 

k8s初始化配置

root@node81[17:20:57]:/data/k8s/k8s_init# cat kubeadm-config.yaml 
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  description: "kubeadm bootstrap token"
  ttl: 24h0m0s
  usages:
  - signing
  - authentication

localAPIEndpoint:
#本地主機IP
  advertiseAddress: 192.168.0.81
  bindPort: 6443

nodeRegistration:
  criSocket: /var/run/dockershim.sock
#主機名
  name: node81
  taints: null
#汙點  其中[effect] 可取值: [ NoSchedule | PreferNoSchedule | NoExecute ] 不能 儘量不 不能並驅逐
# - effect: NoSchedule
#   key: node-role.kubernetes.io/master

---
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
#clusterName: kubernetes
etcd:
  local:
    imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
    dataDir: /var/lib/etcd
#k8s 版本
kubernetesVersion: v1.21.11
#控制面板 設定為master
controlPlaneEndpoint: 192.168.0.81:6443
#資源元件映象源
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
#網路
networking:
  dnsDomain: cluster.local
  #pod子網網段
  podSubnet: 172.16.0.0/12
  serviceSubnet: 10.96.0.0/16

certificatesDir: /etc/kubernetes/pki

controllerManager: {}
dns: {}
scheduler: {}

kubeadm config migrate --old-config kubeadm-config.yaml --new-config new.yaml

叢集初始化

#如果初始化失敗,重置後再次初始化,命令如下:
kubeadm reset -f ; ipvsadm --clear ; rm -rf ~/.kube
root@node81[17:15:19]:/data/k8s/k8s_init# kubeadm init --config new.yaml --upload-certs
You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join 192.168.0.81:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:058701363080c97593a6b8fbdfe3039c064e320ed8efddca709ab57d448df296 \
	--control-plane --certificate-key 4762e2ac55d2d04eb67448818bc806380b93055ce45096c8a21ed6319ea71592

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.0.81:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:058701363080c97593a6b8fbdfe3039c064e320ed8efddca709ab57d448df296 

master優化

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config 
sudo chown $(id -u):$(id -g) $HOME/.kube/config 
cat <<EOF >> /root/.bashrc
export KUBECONFIG=/etc/kubernetes/admin.conf
EOF
source /root/.bashrc

節點加入叢集

#Token過期後生成新的token:
kubeadm token create --print-join-command


#Master需要生成-certificate-key
root@node81[17:24:25]:~$ kubeadm init phase upload-certs --upload-certs 
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace 
[upload-certs] Using certificate key: 
9b5153fe13fe5a9286eb68fae35311f7357b854a2f8ad925bc7e45b16d2b886e 

#其他master加入叢集

kubeadm join 192.168.0.81:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:058701363080c97593a6b8fbdfe3039c064e320ed8efddca709ab57d448df296 
root@node81[17:25:25]:/data/k8s/k8s_init# kubectl get node -A 
NAME     STATUS     ROLES                  AGE     VERSION
node81   NotReady   control-plane,master   6m52s   v1.21.11
node82   NotReady   <none>                 6m21s   v1.21.11
node83   NotReady   <none>                 6m5s    v1.21.11
root@node81[17:25:32]:/data/k8s/k8s_init# 
root@node81[17:25:32]:/data/k8s/k8s_init# 
root@node81[17:25:32]:/data/k8s/k8s_init# kubectl get pod -A 
NAMESPACE     NAME                             READY   STATUS    RESTARTS   AGE
kube-system   coredns-6f6b8cc4f6-6np74         0/1     Pending   0          6m44s
kube-system   coredns-6f6b8cc4f6-zgj98         0/1     Pending   0          6m44s
kube-system   etcd-node81                      1/1     Running   0          6m49s
kube-system   kube-apiserver-node81            1/1     Running   0          6m49s
kube-system   kube-controller-manager-node81   1/1     Running   0          6m49s
kube-system   kube-proxy-6fpql                 1/1     Running   0          6m14s
kube-system   kube-proxy-l8q5b                 1/1     Running   0          6m44s
kube-system   kube-proxy-xh267                 1/1     Running   0          6m30s
kube-system   kube-scheduler-node81            1/1     Running   0          6m49s

安裝網路,如需flannel,也先安裝calico

wget https://docs.projectcalico.org/v3.8/manifests/calico.yaml --no-check-certificate
kubectl apply -f calico.yaml

或者

wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
kubectl apply -f kube-flannel.yml

檢查狀態

root@node81[18:11:23]:/data/k8s/k8s_init# kubectl get po -A 
NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE
kube-system   calico-kube-controllers-7f6768fdfb-qmfqh   1/1     Running   0          118s
kube-system   calico-node-8zghp                          1/1     Running   0          118s
kube-system   calico-node-9x49x                          1/1     Running   0          118s
kube-system   calico-node-czptp                          1/1     Running   0          118s
kube-system   coredns-6f6b8cc4f6-77nfs                   1/1     Running   0          37m
kube-system   coredns-6f6b8cc4f6-zgj98                   1/1     Running   0          52m
kube-system   etcd-node81                                1/1     Running   0          52m
kube-system   kube-apiserver-node81                      1/1     Running   0          52m
kube-system   kube-controller-manager-node81             1/1     Running   0          52m
kube-system   kube-proxy-6fpql                           1/1     Running   0          52m
kube-system   kube-proxy-l8q5b                           1/1     Running   0          52m
kube-system   kube-proxy-xh267                           1/1     Running   0          52m
kube-system   kube-scheduler-node81                      1/1     Running   0          52m
root@node81[18:11:28]:/data/k8s/k8s_init# 
root@node81[18:11:29]:/data/k8s/k8s_init# 
root@node81[18:11:29]:/data/k8s/k8s_init# kubectl get nodes -A 
NAME     STATUS   ROLES                  AGE   VERSION
node81   Ready    control-plane,master   52m   v1.21.11
node82   Ready    <none>                 52m   v1.21.11
node83   Ready    <none>                 52m   v1.21.11

修改ipvs

修改ipvs
kubectl edit cm kube-proxy -n kube-system
mode: 修改為 mode: ipvs

更新Kube-Proxy的Pod:
kubectl patch daemonset kube-proxy -p "{\"spec\":{\"template\":{\"metadata\":{\"annotations\":{\"date\":\"`date +'%s'`\"}}}}}" -n kube-system

驗證
curl 127.0.0.1:10249/proxyMode
ipvs

安裝dashboard

#1. 安裝老版本
cd /root/k8s-ha-install/dashboard/
kubectl create -f .


#2. 安裝最新版:
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.2.0/aio/deploy/recommended.yaml

root@node81[14:26:54]:/data/k8s/k8s_init# vim recommended.yaml
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  type: NodePort
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 30000
  selector:
    k8s-app: kubernetes-dashboard

dashboard授權使用賬號密碼登陸

#授權:
vim admin.yaml
#--------------------------admin.yaml--------------------------#
apiVersion: v1 
kind: ServiceAccount 
metadata: 
  name: admin-user 
  namespace: kube-system 
--- 
apiVersion: rbac.authorization.k8s.io/v1 
kind: ClusterRoleBinding 
metadata: 
  name: admin-user 
  annotations: 
    rbac.authorization.kubernetes.io/autoupdate: "true" 
roleRef: 
  apiGroup: rbac.authorization.k8s.io 
  kind: ClusterRole 
  name: cluster-admin 
subjects: 
- kind: ServiceAccount 
  name: admin-user 
  namespace: kube-system
#--------------------------admin.yaml--------------------------#

#執行安裝
kubectl apply -f admin.yaml -n kube-system
獲取token值
kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')

使用密碼登陸dashboard

echo "admin,admin,1" > /etc/kubernetes/pki/basic_auth_file

增加- --token-auth-file=/etc/kubernetes/pki/basic_auth_file

#vim /etc/kubernetes/manifests/kube-apiserver.yaml
apiVersion: v1
kind: Pod
metadata:
  annotations:
    kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint: 192.168.0.81:6443
  creationTimestamp: null
  labels:
    component: kube-apiserver
    tier: control-plane
  name: kube-apiserver
  namespace: kube-system
spec:
  containers:
  - command:
    - kube-apiserver
    - --advertise-address=192.168.0.81
    - --allow-privileged=true
    - --authorization-mode=Node,RBAC
    - --client-ca-file=/etc/kubernetes/pki/ca.crt
    - --enable-admission-plugins=NodeRestriction
    - --enable-bootstrap-token-auth=true
    - --etcd-cafile=/etc/kubernetes/pki/etcd/ca.crt
    - --etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt
    - --etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key
    - --etcd-servers=https://127.0.0.1:2379
    - --insecure-port=0
    - --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt
    - --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key
    - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
    - --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt
    - --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key
    - --requestheader-allowed-names=front-proxy-client
    - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt
    - --requestheader-extra-headers-prefix=X-Remote-Extra-
    - --requestheader-group-headers=X-Remote-Group
    - --requestheader-username-headers=X-Remote-User
    - --secure-port=6443
    - --service-account-issuer=https://kubernetes.default.svc.cluster.local
    - --service-account-key-file=/etc/kubernetes/pki/sa.pub
    - --service-account-signing-key-file=/etc/kubernetes/pki/sa.key
    - --service-cluster-ip-range=10.96.0.0/16
    - --tls-cert-file=/etc/kubernetes/pki/apiserver.crt
    - --tls-private-key-file=/etc/kubernetes/pki/apiserver.key
    - --token-auth-file=/etc/kubernetes/pki/basic_auth_file
    image: registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.21.11

修改後apiserver會重啟

docker ps -a | grep apiserver
netstat -ntlp

建立叢集角色許可權繫結

kubectl create clusterrolebinding login-on-dashboard-with-cluster-admin --clusterrole=cluster-admin --user=admin

檢查許可權繫結

kubectl get clusterrolebinding login-on-dashboard-with-cluster-admin

修改dashboard檔案

增加

,"--token-ttl=21600","--authentication-mode=basic"       

      - --token-ttl=21600
      - --authentication-mode=basic
#kubectl edit deploy kubernetes-dashboard -n kubernetes-dashboard
apiVersion: apps/v1
kind: Deployment
metadata:
  annotations:
    deployment.kubernetes.io/revision: "2"
    kubectl.kubernetes.io/last-applied-configuration: |
      {"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{},"labels":{"k8s-app":"kubernetes-dashboard"},"name":"kubernetes-dashboard","namespace":"kubernetes-dashboard"},"spec":{"replicas":1,"revisionHistoryLimit":10,"selector":{"matchLabels":{"k8s-app":"kubernetes-dashboard"}},"template":{"metadata":{"labels":{"k8s-app":"kubernetes-dashboard"}},"spec":{"containers":[{"args":["--auto-generate-certificates","--namespace=kubernetes-dashboard","--token-ttl=21600","--authentication-mode=basic"],"image":"kubernetesui/dashboard:v2.2.0","imagePullPolicy":"Always","livenessProbe":{"httpGet":{"path":"/","port":8443,"scheme":"HTTPS"},"initialDelaySeconds":30,"timeoutSeconds":30},"name":"kubernetes-dashboard","ports":[{"containerPort":8443,"protocol":"TCP"}],"securityContext":{"allowPrivilegeEscalation":false,"readOnlyRootFilesystem":true,"runAsGroup":2001,"runAsUser":1001},"volumeMounts":[{"mountPath":"/certs","name":"kubernetes-dashboard-certs"},{"mountPath":"/tmp","name":"tmp-volume"}]}],"nodeSelector":{"kubernetes.io/os":"linux"},"serviceAccountName":"kubernetes-dashboard","tolerations":[{"effect":"NoSchedule","key":"node-role.kubernetes.io/master"}],"volumes":[{"name":"kubernetes-dashboard-certs","secret":{"secretName":"kubernetes-dashboard-certs"}},{"emptyDir":{},"name":"tmp-volume"}]}}}}
  creationTimestamp: "2022-04-20T05:26:34Z"
  generation: 2
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
  resourceVersion: "239944"
  uid: 494dc713-13df-491b-9663-c1f05bb310fb
spec:
  progressDeadlineSeconds: 600
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  strategy:
    rollingUpdate:
      maxSurge: 25%
      maxUnavailable: 25%
    type: RollingUpdate
  template:
    metadata:
      creationTimestamp: null
      labels:
        k8s-app: kubernetes-dashboard
    spec:
      containers:
      - args:
        - --auto-generate-certificates
        - --namespace=kubernetes-dashboard
        - --token-ttl=21600
        - --authentication-mode=basic
        image: kubernetesui/dashboard:v2.2.0
        imagePullPolicy: Always


等待重啟

kubectl  get pod -n kubernetes-dashboard

瀏覽器訪問kuernetes頁面即可使用密碼登陸

參考:https://www.cnblogs.com/superlinux/p/14676959.html