202. 菜鳥學習k8s安裝1
阿新 • • 發佈:2021-10-29
yum安裝三個學習節點的ks
1. 修改hosts檔案(all節點) vim /etc/hosts 192.168.56.100 ydzs-master 192.168.56.101 ydzs-node1 192.168.56.102 ydzs-node2 for i in ydzs-node1 ydzs-node2; do scp /etc/hosts $i:/etc/; done 2.關閉防火牆(all節點) [root@localhost ~]# systemctl stop firewalld [root@localhost ~]# systemctl disable firewalld [root@localhost ~]# setenforce 0 [root@localhost ~]# vim /etc/selinux/config # 禁用selinux [root@localhost ~]# cat /etc/selinux/config # This file controls the state of SELinux on the system. # SELINUX= can take one of these three values: # enforcing - SELinux security policy is enforced. # permissive - SELinux prints warnings instead of enforcing. # disabled - No SELinux policy is loaded. SELINUX=disabled # SELINUXTYPE= can take one of three values: # targeted - Targeted processes are protected, # minimum - Modification of targeted policy. Only selected processes are protected. # mls - Multi Level Security protection. SELINUXTYPE=targete for i in ydzs-node1 ydzs-node2; do scp /etc/selinux/config $i:/etc/selinux/; done 3. master 節點做免密登入(master 節點) [root@localhost ~]# ssh-keygen -t rsa for i in ydzs-node1 ydzs-node2;do ssh-copy-id -i .ssh/id_rsa.pub $i;done 4.由於開啟核心 ipv4 轉發需要載入 br_netfilter 模組(all節點) modprobe br_netfilter 5.建立/etc/sysctl.d/k8s.conf檔案(all節點) cat > /etc/sysctl.d/k8s.conf << EOF net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 net.ipv4.ip_forward = 1 EOF 載入一下 sysctl -p /etc/sysctl.d/k8s.conf 6.安裝 ipvs(all節點) cat > /etc/sysconfig/modules/ipvs.modules <<EOF #!/bin/bash modprobe -- ip_vs modprobe -- ip_vs_rr modprobe -- ip_vs_wrr modprobe -- ip_vs_sh modprobe -- nf_conntrack_ipv4 EOF 檢視一下: chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4 7.配置yum源(master 節點) # 其實就是阿里的yum源, 之後傳輸到其他兩個node節點 [root@localhost ~]# cat /etc/yum.repos.d/base.repo [base] name=CentOS-$releasever - Base - mirrors.aliyun.com failovermethod=priority baseurl=http://mirrors.aliyun.com/centos/$releasever/os/$basearch/ http://mirrors.aliyuncs.com/centos/$releasever/os/$basearch/ http://mirrors.cloud.aliyuncs.com/centos/$releasever/os/$basearch/ gpgcheck=1 gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7 #released updates [updates] name=CentOS-$releasever - Updates - mirrors.aliyun.com failovermethod=priority baseurl=http://mirrors.aliyun.com/centos/$releasever/updates/$basearch/ http://mirrors.aliyuncs.com/centos/$releasever/updates/$basearch/ http://mirrors.cloud.aliyuncs.com/centos/$releasever/updates/$basearch/ gpgcheck=1 gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7 #additional packages that may be useful [extras] name=CentOS-$releasever - Extras - mirrors.aliyun.com failovermethod=priority baseurl=http://mirrors.aliyun.com/centos/$releasever/extras/$basearch/ http://mirrors.aliyuncs.com/centos/$releasever/extras/$basearch/ http://mirrors.cloud.aliyuncs.com/centos/$releasever/extras/$basearch/ gpgcheck=1 gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7 #additional packages that extend functionality of existing packages [centosplus] name=CentOS-$releasever - Plus - mirrors.aliyun.com failovermethod=priority baseurl=http://mirrors.aliyun.com/centos/$releasever/centosplus/$basearch/ http://mirrors.aliyuncs.com/centos/$releasever/centosplus/$basearch/ http://mirrors.cloud.aliyuncs.com/centos/$releasever/centosplus/$basearch/ gpgcheck=1 enabled=0 gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7 #contrib - packages by Centos Users [contrib] name=CentOS-$releasever - Contrib - mirrors.aliyun.com failovermethod=priority baseurl=http://mirrors.aliyun.com/centos/$releasever/contrib/$basearch/ http://mirrors.aliyuncs.com/centos/$releasever/contrib/$basearch/ http://mirrors.cloud.aliyuncs.com/centos/$releasever/contrib/$basearch/ gpgcheck=1 enabled=0 gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7 ## rm -rf /etc/yum.repos.d/* 刪除其他兩個節點的yum檔案 for i in ydzs-node1 ydzs-node2; do scp -r /etc/yum.repos.d/* $i:/etc/yum.repos.d/; done 8.同步時間(all) $ yum install chrony -y $ systemctl enable chronyd $ systemctl start chronyd $ chronyc sources 9.關閉交換分割槽(all) swapoff -a 修改master節點的/etc/sysctl.d/k8s.conf加一行vm.swappiness=0, 傳到其他節點 [root@localhost ~]# vim /etc/sysctl.d/k8s.conf [root@localhost ~]# cat /etc/sysctl.d/k8s.conf net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 net.ipv4.ip_forward = 1 vm.swappiness=0 for i in ydzs-node1 ydzs-node2; do scp /etc/sysctl.d/k8s.conf $i:/etc/sysctl.d/; done ## 重新載入生效 sysctl -p /etc/sysctl.d/k8s.conf 10.安裝docker(all) yum install -y yum-utils device-mapper-persistent-data lvm2 ## 配置docker倉庫 yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo ## 檢視版本 yum list docker-ce --showduplicates | sort -r ## 同意安裝一個版本 yum install docker-ce-18.09.9 -y 11.配置docker加速景象(all) mkdir -p /etc/docker cat > /etc/docker/daemon.json <<EOF { "exec-opts": ["native.cgroupdriver=systemd"], "registry-mirrors" : [ "https://ot2k4d59.mirror.aliyuncs.com/" ] } EOF [root@localhost ~]# systemctl start docker [root@localhost ~]# systemctl enable docker 12.安裝 Kubeadm(all) cat <<EOF > /etc/yum.repos.d/kubernetes.repo [kubernetes] name=Kubernetes baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64 enabled=1 gpgcheck=0 repo_gpgcheck=0 gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg EOF ## 暫時--disableexcludes 禁掉除了kubernetes之外的別的倉庫 yum install -y kubelet-1.16.2 kubeadm-1.16.2 kubectl-1.16.2 --disableexcludes=kubernetes kubeadm version systemctl enable --now kubelet 13.初始化叢集(master) # 生成預設配置檔案, 修改如下內容 [root@ydzs-master ~]# kubeadm config print init-defaults > kubeadm.yaml ----------------------------分隔符start---------------------------------------- apiVersion: kubeadm.k8s.io/v1beta2 bootstrapTokens: - groups: - system:bootstrappers:kubeadm:default-node-token token: abcdef.0123456789abcdef ttl: 24h0m0s usages: - signing - authentication kind: InitConfiguration localAPIEndpoint: advertiseAddress: 10.151.30.11 # apiserver 節點內網IP bindPort: 6443 nodeRegistration: criSocket: /var/run/dockershim.sock name: ydzs-master # 預設讀取當前master節點的hostname taints: - effect: NoSchedule key: node-role.kubernetes.io/master --- apiServer: timeoutForControlPlane: 4m0s apiVersion: kubeadm.k8s.io/v1beta2 certificatesDir: /etc/kubernetes/pki clusterName: kubernetes controllerManager: {} dns: type: CoreDNS etcd: local: dataDir: /var/lib/etcd imageRepository: registry.aliyuncs.com/google_containers # 修改成阿里雲映象源 kind: ClusterConfiguration kubernetesVersion: v1.16.2 networking: dnsDomain: cluster.local podSubnet: 10.244.0.0/16 # Pod 網段,flannel外掛需要使用這個網段 serviceSubnet: 10.96.0.0/12 scheduler: {} --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration mode: ipvs # kube-proxy 模式 ----------------------------分隔符end---------------------------------------- # 初始化叢集, 複製最後幾行等下要用到 [root@ydzs-master ~]# kubeadm init --config kubeadm.yaml Your Kubernetes control-plane has initialized successfully! To start using your cluster, you need to run the following as a regular user: mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config You should now deploy a pod network to the cluster. Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: https://kubernetes.io/docs/concepts/cluster-administration/addons/ Then you can join any number of worker nodes by running the following on each as root: kubeadm join 192.168.56.100:6443 --token abcdef.0123456789abcdef \ --discovery-token-ca-cert-hash sha256:05f3d2cefefc8fc5cf372ed7c0bfd220a1fed06dedf9ee5567b0459079cb6307 [root@ydzs-master ~]# mkdir -p $HOME/.kube [root@ydzs-master ~]# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config [root@ydzs-master ~]# chown $(id -u):$(id -g) $HOME/.kube/config # 複製master節點配置檔案到其他節點 for i in ydzs-node1 ydzs-node2; do scp $HOME/.kube/config $i:$HOME/.kube/; done 14.加入叢集(node1,2節點配置) kubeadm join 192.168.56.100:6443 --token abcdef.0123456789abcdef \ --discovery-token-ca-cert-hash sha256:05f3d2cefefc8fc5cf372ed7c0bfd220a1fed06dedf9ee5567b0459079cb6307 [root@ydzs-master ~]# kubectl get nodes NAME STATUS ROLES AGE VERSION ydzs-master NotReady master 8m25s v1.16.2 ydzs-node1 NotReady <none> 16s v1.16.2 # 發現都是NotReady是因為, 還沒配置網路, 節點不能通訊 ydzs-node2 NotReady <none> 12s v1.16.2 15.配置網路(master) wget https://raw.githubusercontent.com/coreos/flannel/2140ac876ef134e0ed5af15c65e414cf26827915/Documentation/kube-flannel.yml # 搜尋到名為 kube-flannel-ds-amd64 的 DaemonSet,在kube-flannel容器下面 $ vi kube-flannel.yml ...... containers: - name: kube-flannel image: quay.io/coreos/flannel:v0.11.0-amd64 command: - /opt/bin/flanneld args: - --ip-masq - --kube-subnet-mgr - --iface=eth1 # 指定你的master節點ip的網絡卡就行了, 我的ip在eth1上 ...... # 載入網路外掛 $ kubectl apply -f kube-flannel.yml # 安裝 flannel 網路外掛 # 過一會檢視所有節點都正常了ok [root@ydzs-master ~]# kubectl get nodes NAME STATUS ROLES AGE VERSION ydzs-master Ready master 26m v1.16.2 ydzs-node1 Ready <none> 18m v1.16.2 ydzs-node2 Ready <none> 18m v1.16.2 16.配置Dashboard # 推薦使用下面這種方式 $ wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta5/aio/deploy/recommended.yaml $ vi recommended.yaml # 修改Service為NodePort型別 ...... kind: Service apiVersion: v1 metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kubernetes-dashboard spec: ports: - port: 443 targetPort: 8443 selector: k8s-app: kubernetes-dashboard type: NodePort # 加上type=NodePort變成NodePort型別的服務 ...... 直接建立: $ kubectl apply -f recommended.yaml [root@ydzs-master ~]# kubectl get pods -n kubernetes-dashboard -l k8s-app=kubernetes-dashboard NAME READY STATUS RESTARTS AGE kubernetes-dashboard-6b86b44f87-rqrvl 1/1 Running 0 93s No resources found in kubernetes-dashborad namespace. [root@ydzs-master ~]# kubectl get svc -n kubernetes-dashboard NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE dashboard-metrics-scraper ClusterIP 10.107.237.138 <none> 8000/TCP 2m23s kubernetes-dashboard NodePort 10.101.215.176 <none> 443:30159/TCP 2m23s # 使用火狐訪問https://master節點ip:30159 $ kubectl apply -f admin.yaml $ kubectl get secret -n kubernetes-dashboard|grep admin-token admin-token-lwmmx kubernetes.io/service-account-token 3 1d $ kubectl get secret admin-token-lwmmx -o jsonpath={.data.token} -n kubernetes-dashboard |base64 -d# 會生成一串很長的base64後的字串 拿到這個token去火狐開啟的頁面輸入 17.清理 清理¶ 如果你的叢集安裝過程中遇到了其他問題,我們可以使用下面的命令來進行重置: $ kubeadm reset $ ifconfig cni0 down && ip link delete cni0 $ ifconfig flannel.1 down && ip link delete flannel.1 $ rm -rf /var/lib/cni/
全部參考: https://www.qikqiak.com/ 優點知識, 陽明老師的部落格