K8S測試自動安裝shell指令碼
阿新 • • 發佈:2019-01-09
#!/bin/bash function f_tls(){ #直接使用二進位制原始碼包安裝 ls -l /usr/local/bin/cfssl* > /dev/null 2>&1 if [ $? != 0 ];then wget http://172.23.210.21:83/software/linux/docker/cfssl_linux-amd64 chmod +x cfssl_linux-amd64 mv cfssl_linux-amd64 /usr/local/bin/cfssl wget http://172.23.210.21:83/software/linux/docker/cfssljson_linux-amd64 chmod +x cfssljson_linux-amd64 mv cfssljson_linux-amd64 /usr/local/bin/cfssljson wget http://172.23.210.21:83/software/linux/docker/cfssl-certinfo_linux-amd64 chmod +x cfssl-certinfo_linux-amd64 mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo export PATH=/usr/local/bin:$PATH #建立 CA 配置檔案 mkdir /root/ssl cd /root/ssl cfssl print-defaults config > config.json cfssl print-defaults csr > csr.json # 根據config.json檔案的格式建立如下的ca-config.json檔案 # 過期時間設定成了 8760h cat > ca-config.json <<EOF { "signing": { "default": { "expiry": "8760h" }, "profiles": { "kubernetes": { "usages": [ "signing", "key encipherment", "server auth", "client auth" ], "expiry": "8760h" } } } } EOF #建立 CA 證書籤名請求 cat > ca-csr.json <<EOF { "CN": "kubernetes", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "WuHan", "L": "WuHan", "O": "k8s", "OU": "System" } ] } EOF #生成 CA 證書和私鑰 cfssl gencert -initca ca-csr.json | cfssljson -bare ca ls ca* sleep 3 #建立 kubernetes 證書 cat > kubernetes-csr.json <<EOF { "CN": "kubernetes", "hosts": [ "127.0.0.1", "172.23.210.30", "172.23.210.31", "172.23.210.32", "172.23.210.33", "10.23.0.1", "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster", "kubernetes.default.svc.cluster.local" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "WuHan", "L": "WuHan", "O": "k8s", "OU": "System" } ] } EOF #生成 kubernetes 證書和私鑰 cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes ls kubernetes* sleep 3 #建立 admin 證書 cat > admin-csr.json << EOF { "CN": "admin", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "WuHan", "L": "WuHan", "O": "system:masters", "OU": "System" } ] } EOF #生成 admin 證書和私鑰 cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin ls admin* sleep 3 #建立 kube-proxy 證書 cat > kube-proxy-csr.json << EOF { "CN": "system:kube-proxy", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "WuHan", "L": "WuHan", "O": "k8s", "OU": "System" } ] } EOF #生成 kube-proxy 客戶端證書和私鑰 cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy ls kube-proxy* sleep 3 #分發證書 將生成的證書和祕鑰檔案(字尾名為.pem)拷貝到所有機器的 /etc/kubernetes/ssl 目錄下備用; mkdir -p /etc/kubernetes/ssl cp *.pem /etc/kubernetes/ssl else echo "cfssl已經安裝" fi } #end f_tls function f_etcd(){ #ls -l /usr/local/bin/etcd* >/dev/null #if [ $? != 0 ];then #下載etcd二進位制檔案 cd if [ ! -f etcd-v3.3.7-linux-amd64.tar.gz ];then wget http://172.23.210.21:83/software/linux/docker/etcd-v3.3.7-linux-amd64.tar.gz tar -zxvf etcd-v3.3.7-linux-amd64.tar.gz mv etcd-v3.3.7-linux-amd64/etcd* /usr/local/bin/ mkdir -p /var/lib/etcd #建立 etcd 的 systemd unit 檔案 cat > /lib/systemd/system/etcd.service << 'EOF' [Unit] Description=Etcd Server After=network.target After=network-online.target Wants=network-online.target Documentation=https://github.com/coreos [Service] Type=notify WorkingDirectory=/var/lib/etcd/ ExecStart=/usr/local/bin/etcd \ --name=chenzg \ --cert-file=/etc/kubernetes/ssl/kubernetes.pem \ --key-file=/etc/kubernetes/ssl/kubernetes-key.pem \ --peer-cert-file=/etc/kubernetes/ssl/kubernetes.pem \ --peer-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \ --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \ --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \ --initial-advertise-peer-urls=https://172.23.210.30:2380 \ --listen-peer-urls=https://172.23.210.30:2380 \ --listen-client-urls=https://172.23.210.30:2379,http://127.0.0.1:2379 \ --advertise-client-urls=https://172.23.210.30:2379 \ --initial-cluster-token=etcd-cluster \ --initial-cluster=chenzg=https://172.23.210.30:2380 \ --initial-cluster-state=new \ --data-dir=/var/lib/etcd Restart=on-failure RestartSec=5 LimitNOFILE=65536 [Install] WantedBy=multi-user.target EOF ##環境變數配置檔案 #cat > /etc/etcd/etcd.conf << EOF ## [member] #ETCD_NAME=kubernetes #ETCD_DATA_DIR="/var/lib/etcd" #ETCD_LISTEN_PEER_URLS="https://172.18.224.216:2380" #ETCD_LISTEN_CLIENT_URLS="https://172.18.224.216:2379" #[cluster] #ETCD_INITIAL_ADVERTISE_PEER_URLS="https://172.18.224.216:2380" #ETCD_INITIAL_CLUSTER_TOKEN="kubernetes" #ETCD_ADVERTISE_CLIENT_URLS="https://172.18.224.216:2379" #EOF #啟動 etcd 服務 systemctl daemon-reload systemctl enable etcd systemctl restart etcd else echo "ETCD已經安裝" fi } #end f_etcd function f_flanneld(){ cd if [ ! -f flannel-v0.10.0-linux-amd64.tar.gz ];then wget http://172.23.210.21:83/software/linux/docker/flannel-v0.10.0-linux-amd64.tar.gz cd mkdir flannel tar -zxvf flannel-v0.10.0-linux-amd64.tar.gz -C flannel cp flannel/{flanneld,mk-docker-opts.sh} /usr/local/bin etcdctl --endpoints=https://172.23.210.30:2379 --ca-file=/etc/kubernetes/ssl/ca.pem mkdir /kubernetes/network etcdctl --endpoints=https://172.23.210.30:2379 --ca-file=/etc/kubernetes/ssl/ca.pem mk /kubernetes/network/config '{"Network":"10.30.0.0/16","SubnetLen":24,"Backend":{"Type":"vxlan"}}' cat > /lib/systemd/system/flanneld.service << 'EOF' [Unit] Description=Flanneld overlay address etcd agent After=network.target After=network-online.target Wants=network-online.target After=etcd.service Before=docker.service [Service] Type=notify ExecStart=/usr/local/bin/flanneld \ -etcd-cafile=/etc/kubernetes/ssl/ca.pem \ -etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem \ -etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem \ -etcd-endpoints=https://172.23.210.30:2379 \ -etcd-prefix=/kubernetes/network ExecStartPost=/usr/local/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/docker Restart=on-failure [Install] WantedBy=multi-user.target RequiredBy=docker.service EOF systemctl daemon-reload systemctl enable flanneld systemctl start flanneld else echo "flanneld已經安裝" fi } #end f_flanneld function f_k8s(){ ls -l /usr/local/bin/kube* > /dev/null if [ $? != 0 ];then #部署 kubectl 工具,建立kubeconfig檔案 cd if [ ! -f kubernetes-server-linux-amd64.tar.gz ];then wget http://172.23.210.21:83/software/linux/docker/kubernetes-server-linux-amd64.tar.gz fi tar -xzvf kubernetes-server-linux-amd64.tar.gz cp -r kubernetes/server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler,kubectl,kube-proxy,kubelet} /usr/local/bin/ chmod a+x /usr/local/bin/kube* export PATH=/root/local/bin:$PATH #建立/root/.kube/config # 設定叢集引數,--server指定Master節點ip kubectl config set-cluster kubernetes \ --certificate-authority=/etc/kubernetes/ssl/ca.pem \ --embed-certs=true \ --server=https://172.23.210.30:6443 # 設定客戶端認證引數 kubectl config set-credentials admin \ --client-certificate=/etc/kubernetes/ssl/admin.pem \ --embed-certs=true \ --client-key=/etc/kubernetes/ssl/admin-key.pem # 設定上下文引數 kubectl config set-context kubernetes \ --cluster=kubernetes \ --user=admin # 設定預設上下文 kubectl config use-context kubernetes #建立bootstrap.kubeconfig #kubelet訪問kube-apiserver的時候是通過bootstrap.kubeconfig進行使用者驗證 #生成token 變數 export BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ') cat > token.csv <<EOF ${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap" EOF mv token.csv /etc/kubernetes/ # 設定叢集引數--server為master節點ip kubectl config set-cluster kubernetes \ --certificate-authority=/etc/kubernetes/ssl/ca.pem \ --embed-certs=true \ --server=https://172.23.210.30:6443 \ --kubeconfig=bootstrap.kubeconfig # 設定客戶端認證引數 kubectl config set-credentials kubelet-bootstrap \ --token=${BOOTSTRAP_TOKEN} \ --kubeconfig=bootstrap.kubeconfig # 設定上下文引數 kubectl config set-context default \ --cluster=kubernetes \ --user=kubelet-bootstrap \ --kubeconfig=bootstrap.kubeconfig # 設定預設上下文 kubectl config use-context default --kubeconfig=bootstrap.kubeconfig mv bootstrap.kubeconfig /etc/kubernetes/ #建立kube-proxy.kubeconfig # 設定叢集引數 --server引數為master ip kubectl config set-cluster kubernetes \ --certificate-authority=/etc/kubernetes/ssl/ca.pem \ --embed-certs=true \ --server=https://172.23.210.30:6443 \ --kubeconfig=kube-proxy.kubeconfig # 設定客戶端認證引數 kubectl config set-credentials kube-proxy \ --client-certificate=/etc/kubernetes/ssl/kube-proxy.pem \ --client-key=/etc/kubernetes/ssl/kube-proxy-key.pem \ --embed-certs=true \ --kubeconfig=kube-proxy.kubeconfig # 設定上下文引數 kubectl config set-context default \ --cluster=kubernetes \ --user=kube-proxy \ --kubeconfig=kube-proxy.kubeconfig # 設定預設上下文 kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig mv kube-proxy.kubeconfig /etc/kubernetes/ #部署 master 節點 #上面的那一堆都是準備工作,下面開始正式部署kubernetes了 #下載kubernetes的二進位制檔案 #cd #if [ ! -f kubernetes.tar.gz ];then #wget https://github.com/kubernetes/kubernetes/releases/download/v1.8.2/kubernetes.tar.gz #fi #tar -zxvf kubernetes.tar.gz #cd kubernetes #echo y | /bin/sh /root/kubernetes/cluster/get-kube-binaries.sh #cd /root/kubernetes/server/ #tar -zxvf kubernetes-server-linux-amd64.tar.gz #cp -r /root/kubernetes/server/kubernetes/server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler,kubectl,kube-proxy,kubelet} /usr/local/bin/ #建立 kube-apiserver的service配置檔案 cat > /lib/systemd/system/kube-apiserver.service << 'EOF' [Unit] Description=Kubernetes API Service Documentation=https://github.com/GoogleCloudPlatform/kubernetes After=network.target After=etcd.service [Service] ExecStart=/usr/local/bin/kube-apiserver \ --logtostderr=true \ --v=0 \ --etcd-servers=https://172.23.210.30:2379 \ --advertise-address=172.23.210.30 --bind-address=172.23.210.30 --insecure-bind-address=172.23.210.30 \ --allow-privileged=true \ --service-cluster-ip-range=10.23.0.0/16 \ --admission-control=ServiceAccount,NamespaceLifecycle,NamespaceExists,LimitRanger,ResourceQuota \ --authorization-mode=RBAC --runtime-config=rbac.authorization.k8s.io/v1beta1 --kubelet-https=true --token-auth-file=/etc/kubernetes/token.csv --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem --tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem --client-ca-file=/etc/kubernetes/ssl/ca.pem --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem --etcd-cafile=/etc/kubernetes/ssl/ca.pem --etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem --etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem --enable-swagger-ui=true --apiserver-count=3 --audit-log-maxage=30 --audit-log-maxbackup=3 --audit-log-maxsize=100 --audit-log-path=/var/lib/audit.log --event-ttl=1h Restart=on-failure Type=notify LimitNOFILE=65536 [Install] WantedBy=multi-user.target EOF #/etc/kubernetes/config檔案的內容為 cat > /etc/kubernetes/config << 'EOF' ## # kubernetes system config # # The following values are used to configure various aspects of all # kubernetes services, including # # kube-apiserver.service # kube-controller-manager.service # kube-scheduler.service # kubelet.service # kube-proxy.service # logging to stderr means we get it in the systemd journal KUBE_LOGTOSTDERR="--logtostderr=true" # journal message level, 0 is debug KUBE_LOG_LEVEL="--v=0" # Should this cluster be allowed to run privileged docker containers KUBE_ALLOW_PRIV="--allow-privileged=true" # How the controller-manager, scheduler, and proxy find the apiserver #KUBE_MASTER="--master=http://sz-pg-oam-docker-test-001.tendcloud.com:8080" KUBE_MASTER="--master=http://172.23.210.30:8080" EOF #配置檔案/etc/kubernetes/apiserver內容為 cat > /etc/kubernetes/apiserver << 'EOF' ## ## kubernetes system config ## ## The following values are used to configure the kube-apiserver ## # ## The address on the local server to listen to. #KUBE_API_ADDRESS="--insecure-bind-address=sz-pg-oam-docker-test-001.tendcloud.com" KUBE_API_ADDRESS="--advertise-address=172.23.210.30 --bind-address=172.23.210.30 --insecure-bind-address=172.23.210.30" # ## The port on the local server to listen on. #KUBE_API_PORT="--port=8080" # ## Port minions listen on #KUBELET_PORT="--kubelet-port=10250" # ## Comma separated list of nodes in the etcd cluster KUBE_ETCD_SERVERS="--etcd-servers=https://172.23.210.30:2379" # ## Address range to use for services KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.23.0.0/16" # ## default admission control policies KUBE_ADMISSION_CONTROL="--admission-control=ServiceAccount,NamespaceLifecycle,NamespaceExists,LimitRanger,ResourceQuota" # ## Add your own! KUBE_API_ARGS="--authorization-mode=RBAC --runtime-config=rbac.authorization.k8s.io/v1beta1 --kubelet-https=true --experimental-bootstrap-token-auth --token-auth-file=/etc/kubernetes/token.csv --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem --tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem --client-ca-file=/etc/kubernetes/ssl/ca.pem --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem --etcd-cafile=/etc/kubernetes/ssl/ca.pem --etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem --etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem --enable-swagger-ui=true --apiserver-count=3 --audit-log-maxage=30 --audit-log-maxbackup=3 --audit-log-maxsize=100 --audit-log-path=/var/lib/audit.log --event-ttl=1h" EOF #啟動kube-apiserver systemctl daemon-reload systemctl enable kube-apiserver systemctl start kube-apiserver #建立 kube-controller-manager的serivce配置檔案 cat > /lib/systemd/system/kube-controller-manager.service << 'EOF' [Unit] Description=Kubernetes Controller Manager Documentation=https://github.com/GoogleCloudPlatform/kubernetes [Service] ExecStart=/usr/local/bin/kube-controller-manager \ --logtostderr=true \ --v=0 \ --master=http://172.23.210.30:8080 \ --address=127.0.0.1 --service-cluster-ip-range=10.23.0.0/16 --cluster-name=kubernetes --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem --service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem --root-ca-file=/etc/kubernetes/ssl/ca.pem --leader-elect=true Restart=on-failure LimitNOFILE=65536 [Install] WantedBy=multi-user.target EOF systemctl daemon-reload systemctl start kube-controller-manager #建立 kube-scheduler的serivce配置檔案 cat > /lib/systemd/system/kube-scheduler.service << 'EOF' [Unit] Description=Kubernetes Scheduler Plugin Documentation=https://github.com/GoogleCloudPlatform/kubernetes [Service] ExecStart=/usr/local/bin/kube-scheduler \ --logtostderr=true \ --v=0 \ --master=http://172.23.210.30:8080 \ --leader-elect=true --address=127.0.0.1 Restart=on-failure LimitNOFILE=65536 [Install] WantedBy=multi-user.target EOF systemctl daemon-reload systemctl start kube-scheduler else echo "kube安裝完畢" fi } #end f_k8s function f_node(){ cd yum install docker systemctl start docker mkdir -p /etc/kubernetes/ssl scp 172.23.210.30:/root/ssl/*.pem /etc/kubernetes/ssl/ scp 172.23.210.30:/etc/kubernetes/{bootstrap.kubeconfig,kube-proxy.kubeconfig} /etc/kubernetes/ scp 172.23.210.30:/root/kubernetes/server/bin/kube-proxy /usr/bin/ wget http://172.23.210.21:83/software/linux/docker/kubernetes-client-linux-amd64.tar.gz tar zxvf kubernetes-client-linux-amd64.tar.gz cp kubernetes/client/bin/* /usr/bin/ cat > /usr/lib/systemd/system/kubelet.service << 'EOF' [Unit] Description=Kubernetes Kubelet Server Documentation=https://github.com/GoogleCloudPlatform/kubernetes After=docker.service Requires=docker.service [Service] WorkingDirectory=/var/lib/kubelet ExecStart=/usr/bin/kubelet \ --address=172.23.210.31 \ --hostname-override=172.23.210.31 \ --pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest \ --experimental-bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \ --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \ --cert-dir=/etc/kubernetes/ssl \ --container-runtime=docker \ --cluster-dns=114.114.114.114 \ --hairpin-mode promiscuous-bridge \ --allow-privileged=true \ --runtime-cgroups=/systemd/system.slice \ --kubelet-cgroups=/systemd/system.slice \ --serialize-image-pulls=false \ --register-node=true \ --logtostderr=true \ --cgroup-driver=cgroupfs \ # --network-plugin=cni \ # --cni-conf-dir=/etc/cni/ \ # --cni-bin-dir=/opt/cni/bin \ --v=2 Restart=on-failure [Install] WantedBy=multi-user.target EOF systemctl daemon-reload systemctl restart docker systemctl restart kubelet echo "請在master節點新增node:kubectl get csr && kubectl certificate approve node-id" } #end f_node cat > /usr/lib/systemd/system/kube-proxy.service << 'EOF' [Unit] Description=Kubernetes Kube-Proxy Server Documentation=https://github.com/GoogleCloudPlatform/kubernetes After=network.target [Service] WorkingDirectory=/var/lib/kube-proxy ExecStart=/usr/bin/kube-proxy \ --bind-address=172.23.210.31 \ --hostname-override=172.23.210.31 \ --cluster-cidr=10.23.0.0/16 \ --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig \ --logtostderr=true \ --v=2 Restart=on-failure RestartSec=5 LimitNOFILE=65536 [Install] WantedBy=multi-user.target EOF systemctl daemon-reload systemctl restart kube-proxy HELP_TEXT=" 程式版本: $VERSION 最後更新: $LAST_MODIFIED 維護人: 陳智剛 使用方法: ./nginx.sh [ 選項 ] --tls 安裝RA --etcd 安裝etcd --k8s 安裝k8s " #end help info # pre check [ $# -eq 0 ] && echo "$HELP_TEXT" && exit 2 [ $# -ne 1 ] && echo '只支援單一選項.' && exit 3 OPTION=$1 case $OPTION in --tls) f_tls ;; --etcd) f_etcd ;; --flanneld) f_flanneld ;; --k8s) f_k8s ;; --node) f_flanneld&&f_node ;; esac