Kubernetes 1.18.0 二進位制高可用叢集搭建
本文出自劉騰飛視訊教程:http://video.jessetalk.cn/
|
|
|
|
|
|
|
|
|
|
|
# 更新centos yum update # 下載 wget 工具 yum install wget # 禁用防火牆 systemctl stop firewalld systemctl disable firewalld # 安裝 epel yum install epel-release
swapoff -a
swapon -s
vi /etc/selinux/config # set SELINUX=disabled SELINUX=disabled # 重啟 reboot
sestatus SELinux status: disabled
hostname 主機名稱修改
#192.168.0.201 hostnamectl set-hostname node00 #192.168.0.202 hostnamectl set-hostname node01 #192.168.0.203 hostnamectl set-hostname node02
# 安裝 yum install chrony # 啟用 systemctl start chronyd systemctl enable chronyd # 設定亞洲時區 timedatectl set-timezone Asia/Shanghai # 啟用NTP同步 timedatectl set-ntp yes
vi /etc/hosts # 新增以下內容 192.168.0.201 node00 192.168.0.202 node01 192.168.0.203 node02
生成的 CA 證書和祕鑰檔案如下:
使用證書的元件如下:
|
|
|
|
|
|
|
|
||
|
|
|
|
|
|
|
|
|
|
|
|
|
||
|
|
|
||
|
|
|
mkdir -p /ssl cd /ssl wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 chmod +x cfssl_linux-amd64 mv cfssl_linux-amd64 /usr/local/bin/cfssl wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 chmod +x cfssljson_linux-amd64 mv cfssljson_linux-amd64 /usr/local/bin/cfssljson wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 chmod +x cfssl-certinfo_linux-amd64 mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo #export PATH=/usr/local/bin:$PATH
mkdir /root/ssl cd /root/ssl cfssl print-defaults config > config.json cfssl print-defaults csr > csr.json # 根據config.json檔案的格式建立如下的ca-config.json檔案 # 過期時間設定成了 87600h cat > ca-config.json <<EOF { "signing": { "default": { "expiry": "87600h" }, "profiles": { "kubernetes": { "usages": [ "signing", "key encipherment", "server auth", "client auth" ], "expiry": "87600h" } } } } EOF
{ "CN": "kubernetes", "hosts": [ "127.0.0.1", "172.21.0.17", "172.21.0.2", "172.21.0.8", "172.21.0.210" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "BeiJing", "L": "BeiJing", "O": "k8s", "OU": "System" } ], "ca": { "expiry": "87600h" } }
生成 CA 證書和私鑰
cfssl gencert -initca ca-csr.json | cfssljson -bare ca ls ca* ca-config.json ca.csr ca-csr.json ca-key.pem ca.pem
{ "CN": "kubernetes", "hosts": [ "127.0.0.1", "172.21.0.17", "172.21.0.2", "172.21.0.8", "172.21.0.210", "10.254.0.1", "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster", "kubernetes.default.svc.cluster.local" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "BeiJing", "L": "BeiJing", "O": "k8s", "OU": "System" } ] }
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes # 檢視生成的證書 ls kubernetes* kubernetes.csr kubernetes-csr.json kubernetes-key.pem kubernetes.pem
建立kubelet證書
# node00 cat > node00.json <<EOF { "CN": "system:node:node00", "key": { "algo": "rsa", "size": 2048 }, "hosts": [ "node00", "node01", "node02", "172.21.0.17", "172.21.0.2", "172.21.0.8" ], "names": [ { "C": "China", "L": "Shanghai", "O": "system:nodes", "OU": "Kubernetes", "ST": "Shanghai" } ] } EOF # node01 cat > node01.json <<EOF { "CN": "system:node:node01", "key": { "algo": "rsa", "size": 2048 }, "hosts": [ "node00", "node01", "node02", "172.21.0.17", "172.21.0.2", "172.21.0.8" ], "names": [ { "C": "China", "L": "Shanghai", "O": "system:nodes", "OU": "Kubernetes", "ST": "Shanghai" } ] } EOF # node02 cat > node02.json <<EOF { "CN": "system:node:node02", "key": { "algo": "rsa", "size": 2048 }, "hosts": [ "node00", "node01", "node02", "172.21.0.17", "172.21.0.2", "172.21.0.8" ], "names": [ { "C": "China", "L": "Shanghai", "O": "system:nodes", "OU": "Kubernetes", "ST": "Shanghai" } ] } EOF
cfssl gencert \ -ca=ca.pem \ -ca-key=ca-key.pem \ -config=ca-config.json \ -profile=kubernetes \ node00.json | cfssljson -bare node00 cfssl gencert \ -ca=ca.pem \ -ca-key=ca-key.pem \ -config=ca-config.json \ -profile=kubernetes \ node01.json | cfssljson -bare node01 cfssl gencert \ -ca=ca.pem \ -ca-key=ca-key.pem \ -config=ca-config.json \ -profile=kubernetes \ node02.json | cfssljson -bare node02
{ "CN": "admin", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "BeiJing", "L": "BeiJing", "O": "system:masters", "OU": "System" } ] }
注意
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin # 檢視生成的證書 ls admin* admin.csr admin-csr.json admin-key.pem admin.pem
建立 kube-controller-manager 證書
cat > kube-controller-manager-csr.json <<EOF { "CN": "system:kube-controller-manager", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "L": "BeiJing", "O": "system:kube-controller-manager", "OU": "Kubernetes", "ST": "BeiJing" } ] } EOF cfssl gencert \ -ca=ca.pem \ -ca-key=ca-key.pem \ -config=ca-config.json \ -profile=kubernetes \ kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
{ "CN": "system:kube-proxy", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "BeiJing", "L": "BeiJing", "O": "k8s", "OU": "System" } ] }
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy # 檢視生成的證書 ls kube-proxy* kube-proxy.csr kube-proxy-csr.json kube-proxy-key.pem kube-proxy.pem
cat > kube-scheduler-csr.json <<EOF { "CN": "system:kube-scheduler", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "L": "BeiJing", "O": "system:kube-scheduler", "OU": "Kubernetes", "ST": "BeiJing" } ] } EOF cfssl gencert \ -ca=ca.pem \ -ca-key=ca-key.pem \ -config=ca-config.json \ -profile=kubernetes \ kube-scheduler-csr.json | cfssljson -bare kube-scheduler
cat > service-account-csr.json <<EOF { "CN": "service-accounts", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "L": "BeiJing", "O": "Kubernetes", "OU": "Kubernetes", "ST": "BeiJing" } ] } EOF cfssl gencert \ -ca=ca.pem \ -ca-key=ca-key.pem \ -config=ca-config.json \ -profile=kubernetes \ service-account-csr.json | cfssljson -bare service-account
cfssl-certinfo -cert kubernetes.pem
mkdir -p /etc/kubernetes/ssl cp *.pem /etc/kubernetes/ssl cd /etc/kubernetes/ssl/ ls admin-key.pem ca-key.pem kube-proxy-key.pem kubernetes-key.pem admin.pem ca.pem kube-proxy.pem kubernetes.pem
scp *.pem [email protected]:/etc/kubernetes/ssl scp *.pem [email protected]:/etc/kubernetes/ssl
# 在3臺節點上建立etcd檔案臨時目錄 mkdir -p /root/etcd cd /root/etcd # 在node00上下載檔案 wget https://github.com/coreos/etcd/releases/download/v3.3.18/etcd-v3.3.18-linux-amd64.tar.gz # 下載完之後複製到 node01和 node 02 scp etcd-v3.3.18-linux-amd64.tar.gz [email protected]:/root/etcd scp etcd-v3.3.18-linux-amd64.tar.gz [email protected]:/root/etcd # 在node00, node01, node02的 /root/etcd目錄下執行 tar -xvf etcd-v3.3.18-linux-amd64.tar.gz mv etcd-v3.3.18-linux-amd64/etcd* /usr/local/bin
etcd --version etcd Version: 3.4.3 Git SHA: 3c8740a79 Go Version: go1.12.9 Go OS/Arch: linux/amd64
mkdir -p /var/lib/etcd
[Unit] Description=Etcd Server After=network.target After=network-online.target Wants=network-online.target Documentation=https://github.com/coreos [Service] Type=notify WorkingDirectory=/var/lib/etcd/ Restart=always RestartSec=5s LimitNOFILE=40000 TimeoutStartSec=0 ExecStart=/usr/local/bin/etcd \ --name infra1 \ --data-dir /var/lib/etcd \ --initial-advertise-peer-urls https://172.21.0.17:2380 \ --listen-peer-urls https://172.21.0.17:2380 \ --listen-client-urls https://172.21.0.17:2379 \ --advertise-client-urls https://172.21.0.17:2379 \ --initial-cluster-token etcd-cluster \ --initial-cluster infra1=https://172.21.0.17:2380,infra2=https://172.21.0.2:2380,infra3=https://172.21.0.8:2380 \ --initial-cluster-state new \ --client-cert-auth \ --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \ --cert-file=/etc/kubernetes/ssl/kubernetes.pem \ --key-file=/etc/kubernetes/ssl/kubernetes-key.pem \ --peer-client-cert-auth \ --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \ --peer-cert-file=/etc/kubernetes/ssl/kubernetes.pem \ --peer-key-file=/etc/kubernetes/ssl/kubernetes-key.pem [Install] WantedBy=multi-user.target
[Unit] Description=Etcd Server After=network.target After=network-online.target Wants=network-online.target Documentation=https://github.com/coreos [Service] Type=notify WorkingDirectory=/var/lib/etcd/ Restart=always RestartSec=5s LimitNOFILE=40000 TimeoutStartSec=0 ExecStart=/usr/local/bin/etcd \ --name infra2 \ #注意 --data-dir /var/lib/etcd \ --initial-advertise-peer-urls https://172.21.0.2:2380 \ --listen-peer-urls https://172.21.0.2:2380 \ --listen-client-urls https://172.21.0.2:2379 \ --advertise-client-urls https://172.21.0.2:2379 \ --initial-cluster-token etcd-cluster \ --initial-cluster infra1=https://172.21.0.17:2380,infra2=https://172.21.0.2:2380,infra3=https://172.21.0.8:2380 \ --initial-cluster-state new \ --client-cert-auth \ --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \ --cert-file=/etc/kubernetes/ssl/kubernetes.pem \ --key-file=/etc/kubernetes/ssl/kubernetes-key.pem \ --peer-client-cert-auth \ --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \ --peer-cert-file=/etc/kubernetes/ssl/kubernetes.pem \ --peer-key-file=/etc/kubernetes/ssl/kubernetes-key.pem [Install] WantedBy=multi-user.target
[Unit] Description=Etcd Server After=network.target After=network-online.target Wants=network-online.target Documentation=https://github.com/coreos [Service] Type=notify WorkingDirectory=/var/lib/etcd/ Restart=always RestartSec=5s LimitNOFILE=40000 TimeoutStartSec=0 ExecStart=/usr/local/bin/etcd \ --name infra2 \ --data-dir /var/lib/etcd \ --initial-advertise-peer-urls https://172.21.0.8:2380 \ --listen-peer-urls https://172.21.0.8:2380 \ --listen-client-urls https://172.21.0.8:2379 \ --advertise-client-urls https://172.21.0.8:2379 \ --initial-cluster-token etcd-cluster \ --initial-cluster infra1=https://172.21.0.17:2380,infra2=https://172.21.0.2:2380,infra3=https://172.21.0.8:2380 \ --initial-cluster-state new \ --client-cert-auth \ --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \ --cert-file=/etc/kubernetes/ssl/kubernetes.pem \ --key-file=/etc/kubernetes/ssl/kubernetes-key.pem \ --peer-client-cert-auth \ --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \ --peer-cert-file=/etc/kubernetes/ssl/kubernetes.pem \ --peer-key-file=/etc/kubernetes/ssl/kubernetes-key.pem [Install] WantedBy=multi-user.target
重要引數解釋
name |
本member名稱 |
data-dir |
指定節點的資料儲存目錄,這些資料包括節點ID,叢集ID,叢集初始化配置,Snapshot檔案,若未指定-wal-dir,還會儲存WAL檔案;如果不指定會用預設目錄。 |
initial-advertise-peer-urls |
其他member使用,其他member通過該地址與本member互動資訊。一定要保證從其他member能可訪問該地址。靜態配置方式下,該引數的value一定要同時在--initial-cluster引數中存在。 memberID的生成受--initial-cluster-token和--initial-advertise-peer-urls影響。 |
listen-peer-urls |
本member側使用,用於監聽其他member傳送資訊的地址。ip為全0代表監聽本member側所有介面 |
listen-client-urls |
本member側使用,用於監聽etcd客戶傳送資訊的地址。ip為全0代表監聽本member側所有介面 |
advertise-client-urls |
etcd客戶使用,客戶通過該地址與本member互動資訊。一定要保證從客戶側能可訪問該地址 |
client-cert-auth |
啟用客戶證書認證 |
trusted-ca-file |
客戶端認證CA檔案 |
cert-file |
客戶端認證公鑰 |
key-file |
客戶端認證私鑰 |
peer-client-cert-auth |
啟用member成員之間證書認證 |
peer-trusted-ca-file |
成員之間證書認證CA檔案 |
peer-cert-file |
成員之間證書認證公鑰 |
peer-key-file |
成員之間證書認證私鑰 |
initial-cluster-token |
用於區分不同叢集。本地如有多個叢集要設為不同 |
initial-cluster |
本member側使用。描述叢集中所有節點的資訊,本member根據此資訊去聯絡其他member。 memberID的生成受--initial-cluster-token和--initial-advertise-peer-urls影響。 |
initial-cluster-state |
用於指示本次是否為新建叢集。有兩個取值new和existing。如果填為existing,則該member啟動時會嘗試與其他member互動。 叢集初次建立時,要填為new,經嘗試最後一個節點填existing也正常,其他節點不能填為existing。 叢集執行過程中,一個member故障後恢復時填為existing,經嘗試填為new也正常。 |
mv etcd.service /usr/lib/systemd/system/ systemctl daemon-reload systemctl enable etcd systemctl start etcd systemctl status etcd
ETCDCTL_API=3 etcdctl --cert=/etc/kubernetes/ssl/kubernetes.pem --key /etc/kubernetes/ssl/kubernetes-key.pem --insecure-skip-tls-verify=true --endpoints=https://192.168.0.201:2379,https://192.168.0.202:2379,https://192.168.0.203:2379 endpoint health https://192.168.0.201:2379 is healthy: successfully committed proposal: took = 13.87734ms https://192.168.0.202:2379 is healthy: successfully committed proposal: took = 16.08662ms https://192.168.0.203:2379 is healthy: successfully committed proposal: took = 15.656404ms
# 建立統一檔案存放目錄 mkdir /kube cd /kube # 下載 kube-apiserver 元件 wget https://storage.googleapis.com/kubernetes-release/release/v1.17.1/bin/linux/amd64/kube-apiserver # 下載 kube-scheduler元件 wget https://storage.googleapis.com/kubernetes-release/release/v1.17.1/bin/linux/amd64/kube-scheduler # 下載 kube-controller-manager元件 wget https://storage.googleapis.com/kubernetes-release/release/v1.17.1/bin/linux/amd64/kube-controller-manager
Token auth file
head -c 16 /dev/urandom | od -An -t x | tr -d ' ' 7dc36cb645fbb422aeb328320673bbe0
export BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ') cat > token.csv <<EOF {BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap" EOF
BOOTSTRAP_TOKEN
cp token.csv /etc/kubernetes/ scp token.csv [email protected]:/etc/kubernetes scp token.csv [email protected]:/etc/kubernetes
mv ~/kube/kube-apiserver /usr/local/bin cd /usr/local/bin chmod 755 kube-apiserver
[Unit] Description=Kubernetes API Service Documentation=https://github.com/GoogleCloudPlatform/kubernetes After=network.target After=etcd.service [Service] ExecStart=/usr/local/bin/kube-apiserver \ --advertise-address=172.21.0.17 \ --allow-privileged=true \ --audit-log-maxage=30 \ --audit-log-maxbackup=3 \ --audit-log-maxsize=100 \ --audit-log-path=/var/log/audit.log \ --authorization-mode=Node,RBAC \ --bind-address=0.0.0.0 \ --client-ca-file=/etc/kubernetes/ssl/ca.pem \ --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota \ --enable-swagger-ui=true \ --etcd-cafile=/etc/kubernetes/ssl/ca.pem \ --etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem \ --etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem \ --etcd-servers=https://172.21.0.17:2379,https://172.21.0.2:2379,https://172.21.0.8:2379 \ --event-ttl=1h \ --insecure-bind-address=127.0.0.1 \ --kubelet-certificate-authority=/etc/kubernetes/ssl/ca.pem \ --kubelet-client-certificate=/etc/kubernetes/ssl/node00.pem \ --kubelet-client-key=/etc/kubernetes/ssl/node00-key.pem \ --kubelet-https=true \ --service-account-key-file=/etc/kubernetes/ssl/service-account.pem \ --service-cluster-ip-range=10.254.0.0/16 \ --tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \ --tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \ --v=2 Restart=always Type=notify LimitNOFILE=65536 [Install] WantedBy=multi-user.target
[Unit] Description=Kubernetes API Service Documentation=https://github.com/GoogleCloudPlatform/kubernetes After=network.target After=etcd.service [Service] ExecStart=/usr/local/bin/kube-apiserver \ --advertise-address=172.21.0.2 \ --allow-privileged=true \ --audit-log-maxage=30 \ --audit-log-maxbackup=3 \ --audit-log-maxsize=100 \ --audit-log-path=/var/log/audit.log \ --authorization-mode=Node,RBAC \ --bind-address=0.0.0.0 \ --client-ca-file=/etc/kubernetes/ssl/ca.pem \ --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota \ --enable-swagger-ui=true \ --etcd-cafile=/etc/kubernetes/ssl/ca.pem \ --etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem \ --etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem \ --etcd-servers=https://172.21.0.17:2379,https://172.21.0.2:2379,https://172.21.0.8:2379 \ --event-ttl=1h \ --insecure-bind-address=127.0.0.1 \ --kubelet-certificate-authority=/etc/kubernetes/ssl/ca.pem \ --kubelet-client-certificate=/etc/kubernetes/ssl/node01.pem \ --kubelet-client-key=/etc/kubernetes/ssl/node01-key.pem \ --kubelet-https=true \ --service-account-key-file=/etc/kubernetes/ssl/service-account.pem \ --service-cluster-ip-range=10.254.0.0/16 \ --tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \ --tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \ --v=2 Restart=always Type=notify LimitNOFILE=65536 [Install] WantedBy=multi-user.target
[Unit] Description=Kubernetes API Service Documentation=https://github.com/GoogleCloudPlatform/kubernetes After=network.target After=etcd.service [Service] ExecStart=/usr/local/bin/kube-apiserver \ --advertise-address=172.21.0.8 \ --allow-privileged=true \ --audit-log-maxage=30 \ --audit-log-maxbackup=3 \ --audit-log-maxsize=100 \ --audit-log-path=/var/log/audit.log \ --authorization-mode=Node,RBAC \ --bind-address=0.0.0.0 \ --client-ca-file=/etc/kubernetes/ssl/ca.pem \ --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota \ --enable-swagger-ui=true \ --etcd-cafile=/etc/kubernetes/ssl/ca.pem \ --etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem \ --etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem \ --etcd-servers=https://172.21.0.17:2379,https://172.21.0.2:2379,https://172.21.0.8:2379 \ --event-ttl=1h \ --insecure-bind-address=127.0.0.1 \ --kubelet-certificate-authority=/etc/kubernetes/ssl/ca.pem \ --kubelet-client-certificate=/etc/kubernetes/ssl/node02.pem \ --kubelet-client-key=/etc/kubernetes/ssl/node02-key.pem \ --kubelet-https=true \ --service-account-key-file=/etc/kubernetes/ssl/service-account.pem \ --service-cluster-ip-range=10.254.0.0/16 \ --tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \ --tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \ --v=2 Restart=always Type=notify LimitNOFILE=65536 [Install] WantedBy=multi-user.target
systemctl daemon-reload systemctl enable kube-apiserver systemctl start kube-apiserver systemctl status kube-apiserver
重要引數解釋
https://blog.csdn.net/zhonglinzhang/article/details/90697495
advertise-address |
向叢集成員釋出apiserver的IP地址,該地址必須能夠被叢集的成員訪問。如果為空,則使用--bind-address,如果--bind-address未指定,那麼使用主機的預設介面。 |
authorization-mode |
在安全埠上執行授權的有序的外掛列表。預設值:AlwaysAllow 以逗號分隔的列表:AlwaysAllow,AlwaysDeny,ABAC,Webhook,RBAC,Node. |
allow-privileged |
true允許特權模式的容器。預設值false |
audit-log-maxage |
|
audit-log-maxbackup |
|
audit-log-maxsize |
|
audit-log-path |
|
bind-address |
監聽安全埠的IP地址。必須能被叢集的其他以及CLI/web客戶機訪問 |
tls-cert-file |
包含HTTPS的預設x509證書的檔案。 CA證書,如果有的話,在伺服器證書之後連線。如果啟用了HTTPS服務,但未提供 --tls-cert-file和--tls-private-key-file,則會為公共地址生成自簽名證書和金鑰,並將其儲存到--cert-dir指定的目錄中。 |
tls-private-key-file |
包含和--tls-cert-file配對的預設x509私鑰的檔案 |
insecure-bind-address |
地址繫結到不安全服務埠,(default 127.0.0.1),將來會被remove |
client-ca-file |
啟用客戶端證書認證。該引數引用的檔案中必須包含一個或多個證書頒發機構,用於驗證提交給該元件的客戶端證書。如果客戶端證書已驗證,則用其中的 Common Name 作為請求的使用者名稱 |
enable-admission |
|
enable-swagger-ui |
啟用swagger ui |
etcd-cafile |
保護etcd通訊的SSL證書頒發機構檔案 |
etcd-certfile |
用於保護etcd通訊的SSL證書檔案 |
etcd-keyfile |
用來保護etcd通訊的SSL key檔案 |
etcd-servers |
etcd伺服器列表(格式://ip:port),逗號分隔 |
event-ttl |
保留事件的時間。預設值 1h0m0s |
kubelet-certificate-authority |
|
kubelet-client-certificate |
|
kubelet-client-key |
|
kubelet-https |
kubelet通訊使用https,預設值 true |
service-account-key-file |
包含PEM編碼的x509 RSA或ECDSA私有或者公共金鑰的檔案。用於驗證service account token。指定的檔案可以包含多個值。引數可以被指定多個不同的檔案。如未指定,--tls-private-key-file將被使用。如果提供了--service-account-signing-key,則必須指定該引數 |
service-cluster-ip-range |
CIDR表示IP範圍,用於分配服務叢集IP。不能與分配給pod節點的IP重疊 (default 10.0.0.0/24) |
v |
cd ~/kube wget https://storage.googleapis.com/kubernetes-release/release/v1.17.1/bin/linux/amd64/kubectl mv kubectl /usr/local/bin chmod 755 /usr/local/bin/kubectl
kubectl config set-cluster kubernetes-training \ --certificate-authority=/etc/kubernetes/ssl/ca.pem \ --embed-certs=true \ --server=https://127.0.0.1:6443 \ --kubeconfig=admin.config kubectl config set-credentials admin \ --client-certificate=/etc/kubernetes/ssl/admin.pem \ --client-key=/etc/kubernetes/ssl/admin-key.pem \ --embed-certs=true \ --kubeconfig=admin.config kubectl config set-context default \ --cluster=kubernetes-training \ --user=admin \ --kubeconfig=admin.config kubectl config use-context default --kubeconfig=admin.config
注意:
kubectl get ns NAME STATUS AGE default Active 4h31m kube-node-lease Active 4h32m kube-public Active 4h32m kube-system Active 4h32m
mv ~/kube/kube-controller-manager /usr/local/bin cd /usr/local/bin chmod 755 kube-controller-manager
[Unit] Description=Kubernetes Controller Manager Documentation=https://github.com/GoogleCloudPlatform/kubernetes [Service] ExecStart=/usr/local/bin/kube-controller-manager \ --address=0.0.0.0 \ --allocate-node-cidrs=true \ --cluster-cidr=10.244.0.0/16 \ --cluster-name=kubernetes \ --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \ --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \ --kubeconfig=/etc/kubernetes/kube-controller-manager.config\ --leader-elect=true \ --root-ca-file=/etc/kubernetes/ssl/ca.pem \ --service-account-private-key-file=/etc/kubernetes/ssl/service-account-key.pem \ --service-cluster-ip-range=10.254.0.0/16 \ --use-service-account-credentials=true \ --v=2 Restart=always LimitNOFILE=65536 [Install] WantedBy=multi-user.target
kubectl config set-cluster kubernetes-training \ --certificate-authority=/etc/kubernetes/ssl/ca.pem \ --embed-certs=true \ --server=https://127.0.0.1:6443 \ --kubeconfig=kube-controller-manager.config kubectl config set-credentials system:kube-controller-manager \ --client-certificate=/etc/kubernetes/ssl/kube-controller-manager.pem \ --client-key=/etc/kubernetes/ssl/kube-controller-manager-key.pem \ --embed-certs=true \ --kubeconfig=kube-controller-manager.config kubectl config set-context default \ --cluster=kubernetes-training \ --user=system:kube-controller-manager \ --kubeconfig=kube-controller-manager.config kubectl config use-context default --kubeconfig=kube-controller-manager.config
scp kube-controller-manager.config [email protected]:/etc/kubernetes/ scp kube-controller-manager.config [email protected]:/etc/kubernetes/
啟動
systemctl daemon-reload systemctl enable kube-controller-manager systemctl start kube-controller-manager systemctl status kube-controller-manager kubectl get componentstatus
重要引數說明
https://www.jianshu.com/p/bdb153daba21
address |
|
allocate-node-cidrs |
|
cluster-cidr |
|
cluster-name |
|
cluster-signing-cert-file |
一個PEM編碼的有X509 CA證書的檔案,用於在叢集內釋出證書 |
cluster-signing-key-file |
一個PEM編碼的有RSA或ECDSA私鑰的檔案,用於對叢集內的證書進行簽名 |
kubeconfig |
|
leader-elect |
|
root-ca-file |
|
service-account-private-key-file |
用於簽署 service account tokens 的 PEM 編碼的RSA或ECDSA金鑰檔案 |
service-cluster-ip-range |
叢集中服務的CIDR範圍。 要求--allocate-node-cidrs為true |
use-service-account-credentials |
|
v |
mv ~/kube/kube-scheduler /usr/local/bin cd /usr/local/bin chmod 755 kube-scheduler
kubectl config set-cluster kubernetes-training \ --certificate-authority=/etc/kubernetes/ssl/ca.pem \ --embed-certs=true \ --server=https://127.0.0.1:6443 \ --kubeconfig=kube-scheduler.config kubectl config set-credentials system:kube-scheduler \ --client-certificate=/etc/kubernetes/ssl/kube-scheduler.pem \ --client-key=/etc/kubernetes/ssl/kube-scheduler-key.pem \ --embed-certs=true \ --kubeconfig=kube-scheduler.config kubectl config set-context default \ --cluster=kubernetes-training \ --user=system:kube-scheduler \ --kubeconfig=kube-scheduler.config kubectl config use-context default --kubeconfig=kube-scheduler.config
vi /etc/kubernetes/config/kube-scheduler.yaml
apiVersion: kubescheduler.config.k8s.io/v1alpha1 kind: KubeSchedulerConfiguration clientConnection: kubeconfig: "/etc/kubernetes/kube-scheduler.config" leaderElection: leaderElect: true
[Unit] Description=Kubernetes Scheduler Documentation=https://github.com/kubernetes/kubernetes [Service] ExecStart=/usr/local/bin/kube-scheduler \ --config=/etc/kubernetes/config/kube-scheduler.yaml \ --v=2 Restart=on-failure RestartSec=5 [Install] WantedBy=multi-user.target
sudo systemctl daemon-reload sudo systemctl enable kube-scheduler sudo systemctl start kube-scheduler
sudo yum install -y socat conntrack ipset sudo yum install -y yum-utils device-mapper-persistent-data lvm2 sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo sudo yum install -y docker-ce docker-ce-cli containerd.io sudo systemctl enable docker sudo systemctl start docker
cd ~/kube wget --timestamping \ https://github.com/containernetworking/plugins/releases/download/v0.8.5/cni-plugins-linux-amd64-v0.8.5.tgz \ https://storage.googleapis.com/kubernetes-release/release/v1.17.1/bin/linux/amd64/kube-proxy \ https://storage.googleapis.com/kubernetes-release/release/v1.17.1/bin/linux/amd64/kubelet
cd ~/kube chmod +x kube-proxy kubelet sudo mv kube-proxy kubelet /usr/local/bin/ mkdir -p /opt/cni/bin tar -xvf cni-plugins-linux-amd64-v0.8.5.tgz --directory /opt/cni/bin/ scp cni-plugins-linux-amd64-v0.8.5.tgz [email protected]/root/kube cd ~/kube mkdir -p /opt/cni/bin tar -xvf cni-plugins-linux-amd64-v0.8.5.tgz --directory /opt/cni/bin scp cni-p lugins-linux-amd64-v0.8.5.tgz [email protected]:/root/kube cd ~/kube mkdir -p /opt/cni/bin tar -xvf cni-plugins-linux-amd64-v0.8.5.tgz --directory /opt/cni/bin --------------------------------------------------------- /etc/kubenetes 目錄下執行 # node00 kubectl config set-cluster kubernetes-training \ --certificate-authority=/etc/kubernetes/ssl/ca.pem \ --embed-certs=true \ --server=https://127.0.0.1:6443 \ --kubeconfig=kubelet.config kubectl config set-credentials system:node:node00 \ --client-certificate=/etc/kubernetes/ssl/node00.pem \ --client-key=/etc/kubernetes/ssl/node00-key.pem \ --embed-certs=true \ --kubeconfig=kubelet.config kubectl config set-context default \ --cluster=kubernetes-training \ --user=system:node:node00 \ --kubeconfig=kubelet.config kubectl config use-context default --kubeconfig=kubelet.config # node01 kubectl config set-cluster kubernetes-training \ --certificate-authority=/etc/kubernetes/ssl/ca.pem \ --embed-certs=true \ --server=https://127.0.0.1:6443 \ --kubeconfig=kubelet.config kubectl config set-credentials system:node:node01 \ --client-certificate=/etc/kubernetes/ssl/node01.pem \ --client-key=/etc/kubernetes/ssl/node01-key.pem \ --embed-certs=true \ --kubeconfig=kubelet.config kubectl config set-context default \ --cluster=kubernetes-training \ --user=system:node:node01 \ --kubeconfig=kubelet.config kubectl config use-context default --kubeconfig=kubelet.config # node02 kubectl config set-cluster kubernetes-training \ --certificate-authority=/etc/kubernetes/ssl/ca.pem \ --embed-certs=true \ --server=https://127.0.0.1:6443 \ --kubeconfig=kubelet.config kubectl config set-credentials system:node:node02 \ --client-certificate=/etc/kubernetes/ssl/node02.pem \ --client-key=/etc/kubernetes/ssl/node02-key.pem \ --embed-certs=true \ --kubeconfig=kubelet.config kubectl config set-context default \ --cluster=kubernetes-training \ --user=system:node:node02 \ --kubeconfig=kubelet.config kubectl config use-context default --kubeconfig=kubelet.config
Kind: KubeletConfiguration apiVersion: kubelet.config.k8s.io/v1beta1 authentication: anonymous: enabled: false webhook: enabled: true x509: clientCAFile: "/etc/kubernetes/ssl/ca.pem" authorization: mode: Webhook clusterDomain: "cluster.local" clusterDNS: - "10.254.0.10" runtimeRequestTimeout: "15m" tlsCertFile: "/etc/kubernetes/ssl/node00.pem" tlsPrivateKeyFile: "/etc/kubernetes/ssl/node00-key.pem"
[Unit] Description=Kubernetes Kubelet Documentation=https://github.com/kubernetes/kubernetes After=docker.service Requires=docker.service [Service] ExecStart=/usr/local/bin/kubelet \ --config=/etc/kubernetes/config/kubelet.yaml \ --image-pull-progress-deadline=2m \ --kubeconfig=/etc/kubernetes/kubelet.config \ --pod-infra-container-image=cargo.caicloud.io/caicloud/pause-amd64:3.1 \ --network-plugin=cni \ --register-node=true \ --cni-conf-dir=/etc/cni/net.d \ --cni-bin-dir=/opt/cni/bin \ --v=2 Restart=always RestartSec=5 [Install] WantedBy=multi-user.target
重要引數解釋
config |
|
image-pull-progress-deadline |
|
kubeconfig |
|
pod-infra-container-image |
|
network-plugin |
|
register-node |
|
cni-conf-dir |
|
cni-bin-dir |
|
v |
kubectl config set-cluster kubernetes-training \ --certificate-authority=/etc/kubernetes/ssl/ca.pem \ --embed-certs=true \ --server=https://127.0.0.1:6443 \ --kubeconfig=kube-proxy.config kubectl config set-credentials system:kube-proxy \ --client-certificate=/etc/kubernetes/ssl/kube-proxy.pem \ --client-key=/etc/kubernetes/ssl/kube-proxy-key.pem \ --embed-certs=true \ --kubeconfig=kube-proxy.config kubectl config set-context default \ --cluster=kubernetes-training \ --user=system:kube-proxy \ --kubeconfig=kube-proxy.config kubectl config use-context default --kubeconfig=kube-proxy.config
Kind: KubeProxyConfiguration apiVersion: kubeproxy.config.k8s.io/v1alpha1 clientConnection: kubeconfig: "/etc/kubernetes/kube-proxy.config" mode: "iptables" clusterCIDR: "10.244.0.0/16"
[Unit] Description=Kubernetes Kube Proxy Documentation=https://github.com/kubernetes/kubernetes [Service] ExecStart=/usr/local/bin/kube-proxy \ --config=/etc/kubernetes/config/kube-proxy-config.yaml Restart=always RestartSec=5 [Install] WantedBy=multi-user.target
sudo systemctl daemon-reload sudo systemctl enable kubelet kube-proxy sudo systemctl start kubelet kube-proxy
cat <<EOF | kubectl apply -f - apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRole metadata: annotations: rbac.authorization.kubernetes.io/autoupdate: "true" labels: kubernetes.io/bootstrapping: rbac-defaults name: system:kube-apiserver-to-kubelet rules: - apiGroups: - "" resources: - nodes/proxy - nodes/stats - nodes/log - nodes/spec - nodes/metrics verbs: - "*" EOF
cat <<EOF | kubectl apply -f - apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: name: system:kube-apiserver namespace: "" roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:kube-apiserver-to-kubelet subjects: - apiGroup: rbac.authorization.k8s.io kind: Group name: system:nodes EOF
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
https://shimo.im/docs/VWdqDhDg3wWJWqcQ/ 「kube-flannel.yml」,可複製連結後用石墨文件 App 或小程式開啟
kubectl apply -f https://raw.githubusercontent.com/caicloud/kube-ladder/master/tutorials/resources/coredns.yaml
kubectl run busybox --image=busybox:1.28.3 --command -- sleep 3600
kubectl get pods -l run=busybox
NAME READY STATUS RESTARTS AGE busybox-d967695b6-29hfh 1/1 Running 0 61s
POD_NAME=$(kubectl get pods -l run=busybox -o jsonpath="{.items[0].metadata.name}")
kubectl exec -ti $POD_NAME -- nslookup kubernetes
Server: 10.254.0.10 Address 1: 10.254.0.10 kube-dns.kube-system.svc.cluster.local Name: kubernetes Address 1: 10.254.0.1 kubernetes.default.svc.cluster.local
yum install haproxy yum install keepalived
cat >> /etc/sysctl.conf << EOF net.ipv4.ip_forward = 1 EOF
vrrp_script haproxy-check { script "killall -0 haproxy" interval 2 weight -2 fall 10 rise 2 } vrrp_instance haproxy-vip { state MASTER priority 250 interface ens33 virtual_router_id 47 advert_int 3 unicast_src_ip 192.168.0.201 unicast_peer { 192.168.0.202 192.168.0.203 } virtual_ipaddress { 192.168.0.210 } track_script { haproxy-check } }
cat >> /etc/sysctl.conf << EOF net.ipv4.ip_nonlocal_bind = 1 EOF
frontend k8s-api bind *:8443 #ingre 443 衝突 mode tcp option tcplog default_backend k8s-api backend k8s-api mode tcp option tcplog option tcp-check balance roundrobin default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100 server k8s-api-1 192.168.0.201:6443 check server k8s-api-2 192.168.0.202:6443 check server k8s-api-3 192.168.0.203:6443 check
systemctl enable keepalived haproxy systemctl restart keepalived haproxy
systemctl restart kube-controller-manager kube-scheduler kubelet kube-proxy