1. 程式人生 > >編譯安裝ETCD+Kubernetes1.7.14

編譯安裝ETCD+Kubernetes1.7.14

kuberentes集群

環境準備

一.安裝Go環境

1.獲取GO安裝包

$ wget https://www.golangtc.com/static/go/1.9.2/go1.9.2.linux-amd64.tar.gz

2.解壓安裝包並創建目錄

#創建language目錄存放go程序--GOROOT
$ mkdir /opt/language/

#創建go及go下的三個目錄存放go的包,二進制,源代碼  --GOPATH
$ mkdir -p /go/{src,bin,pkg}
$ tar -xvf go1.9.2.linux-amd64.tar.gz -C /opt/language/

3.配置環境變量

$ sudo vim /etc/profile.d/go.sh
    export GOROOT=/opt/language/go
    export GOBIN=/opt/go/bin
    export GOPATH=/opt/go
    export PATH=$PATH:$GOROOT/bin
#加載環境變量文件
$ source /etc/profile.d/go.sh

4.host文件的編寫

10.39.35.19 etcd1 
10.39.35.20 etcd2 
10.39.35.21 etcd3 
10.39.35.19 bc-master-35-19 
10.39.35.20 bc-master-35-20 
10.39.35.21 bc-master-35-21 
10.39.35.22 bc-slave-35-22
10.39.35.23 bc-slave-35-23
10.39.35.25 bc-slave-35-25
10.39.35.26 bc-slave-35-26
10.39.35.27 bc-slave-35-27
10.39.35.28 bc-slave-35-28
10.39.35.29 bc-slave-35-29
10.39.35.30 bc-slave-35-30

5.配置ansible

本文分發和批量操作均用ansible進行

yum install ansible -y
vim /etc/ansible/hosts

二.安裝證書生成工具

1.下載cfssl

mkdir -p /opt/local/cfssl

cd /opt/local/cfssl

wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
mv cfssl_linux-amd64 cfssl
cp cfssl /bin/cfssl

wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
mv cfssljson_linux-amd64 cfssljson
cp cfssljson /bin/cfssljson

wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
mv cfssl-certinfo_linux-amd64 cfssl-certinfo
cp cfssl-certinfo /bin/cfssl-certinfo

chmod +x *

2.生成配置文件並

mkdir /opt/ssl

cd /opt/ssl

cfssl print-defaults config > config.json

cfssl print-defaults csr > csr.json

3.修改配置文件

# config.json 文件

{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
        "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ],
        "expiry": "87600h"
      }
    }
  }
}

# csr.json 文件
{
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "ShenZhen",
      "L": "ShenZhen",
      "O": "k8s",
      "OU": "System"
    }
  ]
}

4.生成秘鑰

$ cfssl gencert -initca csr.json | cfssljson -bare ca

$ ll
總用量 20
-rw-r--r-- 1 root root 1001 2月  27 10:08 ca.csr
-rw------- 1 root root 1675 2月  27 10:08 ca-key.pem
-rw-r--r-- 1 root root 1363 2月  27 10:08 ca.pem
-rw-r--r-- 1 root root  397 2月  26 17:31 config.json
-rw-r--r-- 1 root root  266 2月  27 10:08 csr.json

5.頒發證書

# 創建證書目錄
mkdir -p /etc/kubernetes/ssl

# 拷貝所有文件到目錄下
cp * /etc/kubernetes/ssl

這裏要將文件拷貝到所有的k8s 機器上

三.生成etcd秘鑰並安裝etcd

1.獲取etcd

$ mkdir  /opt/go/src/github.com
$ get clone https://github.com/coreos/etcd.git

2.編譯etcd

$ cd etcd
$ ./build
$ cd /opt/go/src/github.com/coreos/etcd/bin
$ cp etcd /bin
$ cp etcdctl /bin 

編譯好後將etcd二進制文件拷貝到其他3臺服務器下

編譯好後的二進制文件在當前目錄下的bin目錄下

3.生成etcd秘鑰


cd /opt/ssl
vi etcd-csr.json

{
  "CN": "etcd",
  "hosts": [
    "127.0.0.1",
    "10.39.35.19",
    "10.39.35.20",
    "10.39.35.21"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}

cfssl gencert -ca=/opt/ssl/ca.pem   -ca-key=/opt/ssl/ca-key.pem   -config=/opt/ssl/config.json   -profile=kubernetes etcd-csr.json | /opt/cfssl/cfssljson -bare etcd

cp etcd* /etc/kubernetes/ssl

4.Unit文件編寫

vim /etc/system/system/etcd.service

10.39.35.19
vi /root/etcd.service

[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
User=root
# set GOMAXPROCS to number of processors
ExecStart=/bin/etcd   --name=etcd3   --cert-file=/etc/kubernetes/ssl/etcd.pem   --key-file=/etc/kubernetes/ssl/etcd-key.pem   --peer-cert-file=/etc/kubernetes/ssl/etcd.pem   --peer-key-file=/etc/kubernetes/ssl/etcd-key.pem   --trusted-ca-file=/etc/kubernetes/ssl/ca.pem   --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem   --initial-advertise-peer-urls=https://10.39.35.19:2380   --listen-peer-urls=https://10.39.35.19:2380   --listen-client-urls=https://10.39.35.19:2379,http://127.0.0.1:2379   --advertise-client-urls=https://10.39.35.19:2379   --initial-cluster-token=k8s-etcd-cluster   --initial-cluster=etcd1=https://10.39.35.19:2380,etcd2=https://10.39.35.20:2380,etcd3=https://10.39.35.21:2380   --initial-cluster-state=new   --data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

cp /root/etcd.service /etc/systemd/system

5.分發etcd unti配置

將文件復制到其他節點後,修改IP為本節點IP

scp etcd.service 10.39.35.20:/etc/systemd/system/
scp etcd.service 10.39.35.21:/etc/systemd/system/

6.配置完成後啟動服務

mkdir /var/lib/etcd
systemctl daemon-reload
systemctl enable etcd
systemctl start etcd
systemctl status etcd

7.查看etcd集群信息

7.1查看集群信息
etcdctl --endpoints=https://10.39.13.22:2379 --cert-file=/etc/kubernetes/ssl/etcd.pem --ca-file=/etc/kubernetes/ssl/ca.pem --key-file=/etc/kubernetes/ssl/etcd-key.pem cluster-health
cluster is healt
7.2查看節點信息
etcdctl --endpoints=https://10.39.13.22:2379 --cert-file=/etc/kubernetes/ssl/etcd.pem --ca-file=/etc/kubernetes/ssl/ca.pem --key-file=/etc/kubernetes/ssl/etcd-key.pem member list

三、安裝Kubernetes

1.下載代碼、編譯、分發


$ git clone https://github.com/kubernetes/kubernetes.git

$ make

$ ll /opt/app/k8s/source-k8s/kubernetes/_output/local/bin/linux/amd64
總用量 2.3G
-rwxr-xr-x 1 root root  54M 1月  28 11:10 apiextensions-apiserver
-rwxr-xr-x 1 root root 112M 1月  28 11:10 cloud-controller-manager
-rwxr-xr-x 1 root root 6.5M 1月  28 11:04 conversion-gen
-rwxr-xr-x 1 root root 6.5M 1月  28 11:03 deepcopy-gen
-rwxr-xr-x 1 root root 6.5M 1月  28 11:04 defaulter-gen
-rwxr-xr-x 1 root root 225M 1月  28 11:10 e2e_node.test
-rwxr-xr-x 1 root root 154M 1月  28 11:10 e2e.test
-rwxr-xr-x 1 root root  65M 1月  28 11:10 gendocs
-rwxr-xr-x 1 root root 167M 1月  28 11:10 genkubedocs
-rwxr-xr-x 1 root root 174M 1月  28 11:10 genman
-rwxr-xr-x 1 root root 5.0M 1月  28 11:10 genswaggertypedocs
-rwxr-xr-x 1 root root  65M 1月  28 11:10 genyaml
-rwxr-xr-x 1 root root 9.7M 1月  28 11:10 ginkgo
-rwxr-xr-x 1 root root  38M 1月  28 11:10 gke-certificates-controller
-rwxr-xr-x 1 root root 2.6M 1月  28 11:03 go-bindata
-rwxr-xr-x 1 root root 240M 1月  28 11:09 hyperkube
-rwxr-xr-x 1 root root 144M 1月  28 11:10 kubeadm
-rwxr-xr-x 1 root root  53M 1月  28 11:10 kube-aggregator
-rwxr-xr-x 1 root root 200M 1月  28 11:10 kube-apiserver
-rwxr-xr-x 1 root root 131M 1月  28 11:09 kube-controller-manager
-rwxr-xr-x 1 root root  65M 1月  28 11:10 kubectl
-rwxr-xr-x 1 root root 141M 1月  28 11:10 kubelet
-rwxr-xr-x 1 root root 142M 1月  28 11:10 kubemark
-rwxr-xr-x 1 root root  61M 1月  28 11:09 kube-proxy
-rwxr-xr-x 1 root root  59M 1月  28 11:10 kube-scheduler
-rwxr-xr-x 1 root root 6.0M 1月  28 11:10 linkcheck
-rwxr-xr-x 1 root root 2.1M 1月  28 11:10 mounter
-rwxr-xr-x 1 root root  12M 1月  28 11:04 openapi-gen
-rwxr-xr-x 1 root root 2.6M 1月  28 11:03 teststale

拷貝kubectl、kubelet、kube-apiserver、kube-controller-manager、kube-scheduler、kube-proxy所有主節點的/bin目錄下,所有文件要有執行權限

拷貝kubectl、kube-proxy到所有slave節點的/bin目錄下,所有文件要有執行權限

將整個目錄拷貝到其他節點

2.安裝docker並啟動

2.1安裝docker
yum-config-manager

yum -y install yum-util

yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo

yum install -y docker-ce
2.2磁盤分區
vim disk.sh

    #添加主分區
    disk_partition () {
        parted -s /dev/vdc mklabel msdos
        parted -s /dev/vdc mkpart primary 0 100%
    }

    #添加邏輯卷
    disk_lvm () {
        pvcreate /dev/vdc1
        vgcreate docker /dev/vdc1
        lvcreate --wipesignatures y -n thinpool docker -l 95%VG
        lvcreate --wipesignatures y -n thinpoolmeta docker -l 1%VG
        lvconvert -y --zero n -c 512K --thinpool docker/thinpool --poolmetadata docker/thinpoolmeta
        echo -e ‘activation {\nthin_pool_autoextend_threshold=90\nthin_pool_autoextend_percent=20\n}‘ > /etc/lvm/profile/docker-thinpool.profile
        lvchange --metadataprofile docker-thinpool docker/thinpool
    }
    disk_partition
    disk_lvm

bash disk.sh
2.3修改docker配置文件
vim /etc/docker/daemon.json
{
"hosts": ["unix:///var/run/docker.sock"],
"storage-driver": "devicemapper",
"storage-opts": [
"dm.basesize=10G",
"dm.thinpooldev=/dev/mapper/docker-thinpool",
"dm.use_deferred_removal=true",
"dm.use_deferred_deletion=true"
],
"log-driver": "json-file",
"log-opts": {
"max-size": "20m",
"max-file": "10"
  },      
"live-restore": false
}  
2.4啟動docker
systemctl enable docker

systemctl start docker

3. 建立admin認證

mkdir /opt/ssl
cd /opt/ssl

vim admin-csr.json
{
    "CN": "admin",
    "hosts": [],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing",
            "O": "system.masters",
            "OU": "System"
        }
    ]
}

4.生成證書

$ cfssl gencert -ca=/opt/ssl/ca.pem -ca-key=/opt/ssl/ca-key.pem -config=/opt/ssl/config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
$ ll admin*
-rwxr-xr-x 1 root root 1009 3月   8 14:36 admin.csr
-rwxr-xr-x 1 root root  288 3月   8 14:32 admin-csr.json
-rwx--x--x 1 root root 1675 3月   8 14:36 admin-key.pem
-rwxr-xr-x 1 root root 1403 3月   8 14:36 admin.pem

5.配置 kubectl kubeconfig 文件


kubectl config set-cluster kubernetes   --certificate-authority=/etc/kubernetes/ssl/ca.pem   --embed-certs=true   --server=https://10.39.35.19:6443

# 配置 客戶端認證

kubectl config set-credentials admin   --client-certificate=/etc/kubernetes/ssl/admin.pem   --embed-certs=true   --client-key=/etc/kubernetes/ssl/admin-key.pem

kubectl config set-context kubernetes   --cluster=kubernetes   --user=admin

kubectl config use-context kubernetes

cp /root/.kube/config /etc/kubernetes/kubelet.kubeconfig

# 生成證書後,證書在/root/.kube/config

# 將上面配置的 kubeconfig 文件分發到其他機器

# 其他服務器創建目錄

mkdir /root/.kube

scp /root/.kube/config 10.39.35.20:/root/.kube/

scp /root/.kube/config 10.39.35.21:/root/.kube/

分發文件後將文件的中IP地址修改為本機IP

四、創建kubernetes證書

1.創建json文件

vim kubernetes-csr.json
{
    "CN": "kuberenetes",
    "hosts": [
       "127.0.0.1",
       "10.39.35.19",
       "10.39.35.20",
       "10.39.35.21",
       "10.254.0.1",
       "kubernetes",
       "kubernetes.default",
       "kubernetes.default.svc",
       "kubernetes.default.svc.cluster",
       "kubernetes.default.svc.cluster.local"
]
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}

這裏 hosts 字段中 三個 IP 分別為 127.0.0.1 本機10.39.13.21,10.39.13.22,10.39.13.23為 Master 的IP, 10.254.0.1 為 kubernetes SVC 的 IP, 一般是 部署網絡的第一個IP , 如: 10.254.0.1 , 在啟動完成後,我們使用 kubectl get svc , 就可以查看到

$ cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem -ca-key=/etc/kubernetes/ssl/ca-key.pem -config=/etc/kubernetes/ssl/config.json 、
-profile=kubernetes kubernetes-csr.json |cfssljson -bare kubernetes
$ll kubernetes*
-rwxr-xr-x 1 root root 1261 3月   9 10:17 kubernetes.csr
-rwxr-xr-x 1 root root  561 3月   9 10:16 kubernetes-csr.json
-rwx--x--x 1 root root 1675 3月   9 10:17 kubernetes-key.pem
-rwxr-xr-x 1 root root 1631 3月   9 10:17 kubernetes.pem

2.編寫kube-apiserver.service unti文件

10.39.13.21
mkdir /root/service/
vi /root/service/kube-apiserver.service

[Unit]
Description=kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

[Service]
User=root
ExecStart=/bin/kube-apiserver   --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota   --advertise-address=10.39.35.19   --allow-privileged=true   --apiserver-count=3   --audit-log-maxage=30   --audit-log-maxbackup=3   --audit-log-maxsize=100   --audit-log-path=/var/log/audit.log   --authorization-mode=AlwaysAllow   --bind-address=10.39.35.19   --client-ca-file=/etc/kubernetes/ssl/ca.pem   --enable-swagger-ui=true   --etcd-cafile=/etc/kubernetes/ssl/ca.pem   --etcd-certfile=/etc/kubernetes/ssl/etcd.pem   --etcd-keyfile=/etc/kubernetes/ssl/etcd-key.pem   --etcd-servers=https://10.39.35.19:2379,https://10.39.35.20:2379,https://10.39.35.21:2379   --event-ttl=1h   --kubelet-https=true   --insecure-bind-address=10.39.35.19   --runtime-config=rbac.authorization.k8s.io/v1alpha1   --service-account-key-file=/etc/kubernetes/ssl/ca.pem   --service-cluster-ip-range=10.254.0.0/16   --service-node-port-range=30000-32000   --tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem   --tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem   --experimental-bootstrap-token-auth   --token-auth-file=/etc/kubernetes/token.csv   --v=2
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

cp /root/service/kube-apiserver.service /etc/systemd/system/

3.重啟服務

systemctl daemon-reload
systemctl enable kube-apiserver
systemctl start kube-apiserver
systemctl status kube-apiserver

4.生成token

# 生成 token

[root@k8s-master-1 ssl]# head -c 16 /dev/urandom | od -An -t x | tr -d ‘ ‘
d51cd04061186edab41cf11abba63d5f

# 創建 token.csv 文件

/opt/ssl

vi token.csv

d51cd04061186edab41cf11abba63d5f,kubelet-bootstrap,10001,"system:kubelet-bootstrap"

# 拷貝

cp token.csv /etc/kubernetes/ssl/

3.編寫 kube-controller-manager.service Unit文件

10.39.35.19
[Unit]
vi /root/service/kube-controller-manager.service

Description=kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]
ExecStart=//bin/kube-controller-manager   --address=127.0.0.1   --master=http://10.39.35.19:8080   --allocate-node-cidrs=true   --service-cluster-ip-range=10.254.0.0/16   --cluster-cidr=10.233.0.0/16   --cluster-name=kubernetes   --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem   --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem   --service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem   --root-ca-file=/etc/kubernetes/ssl/ca.pem   --leader-elect=true   --v=2
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target

cp /root/service/kube-controller-manager.service /etc/systemd/system/kube-controller-manager.service
啟動服務
systemctl daemon-reload
systemctl enable kube-controller-manager
systemctl start kube-controller-manager
systemctl status kube-controller-manager

4.編寫 kube-scheduler.service

10.39.35.19
vi /root/service/kube-scheduler.service 

[Unit]
Description=kubernetes Scheduler
Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]
ExecStart=/bin/kube-scheduler   --address=127.0.0.1   --master=http://10.39.35.19:8080   --leader-elect=true   --v=2
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target

cp /root/service/kube-scheduler.service /etc/systemd/system/ 
啟動服務
systemctl daemon-reload
systemctl enable scheduler
systemctl start scheduler
systemctl status scheduler

5.配置kubelet

kubectl config set-cluster kubernetes   --certificate-authority=/etc/kubernetes/ssl/ca.pem   --embed-certs=true   --server=https://10.39.35.19:6443   --kubeconfig=bootstrap.kubeconfig

# 配置客戶端認證

kubectl config set-credentials kubelet-bootstrap   --token=11849e4f70904706ab3e631e70e6af0d   --kubeconfig=bootstrap.kubeconfig

# 配置關聯

kubectl config set-context default   --cluster=kubernetes   --user=kubelet-bootstrap   --kubeconfig=bootstrap.kubeconfig

# 配置默認關聯
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig

# 拷貝生成的 bootstrap.kubeconfig 文件

mv bootstrap.kubeconfig /etc/kubernetes/
編輯kubelet.service

vi /root/service/kubelet.service

[Unit]
Description=kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service

[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/bin/kubelet   --address=10.39.35.19   --hostname-override=bc-master-35-19   --pod-infra-container-image=jicki/pause-amd64:3.0   --experimental-bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig   --kubeconfig=/etc/kubernetes/kubelet.kubeconfig   --require-kubeconfig   --cert-dir=/etc/kubernetes/ssl   --cluster_dns=10.254.0.2   --cluster_domain=cluster.local.   --hairpin-mode promiscuous-bridge   --allow-privileged=true   --serialize-image-pulls=false   --logtostderr=true   --network-plugin=cni   --v=2
Restart=on-failure
RestartSec=5

WantedBy=multi-user.target

cp /rot/service/kubelet.service /etc/systemd
啟動kubelet
mkdir /var/lib/kubelet
systemctl daemon-reload
systemctl enable kubelet
systemctl start kubelet
systemctl status kubelet

6.部署kube-proxy

建立證書
vi /opt/ssl

{
  "CN": "system:kube-proxy",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
生成證書
cfssl gencert -ca=/opt/ssl/ca.pem   -ca-key=/opt/ssl/ca-key.pem   -config=/opt/ssl/config.json   -profile=kubernetes  kube-proxy-csr.json | cfssljson -bare kube-proxy
cp kube-proxy* /etc/kubernetes/ssl/
創建kube-proxy kubeconfig文件
# 配置集群

kubectl config set-cluster kubernetes   --certificate-authority=/etc/kubernetes/ssl/ca.pem   --embed-certs=true   --server=https://10.39.35.19:6443   --kubeconfig=kube-proxy.kubeconfig

# 配置客戶端認證

kubectl config set-credentials kube-proxy   --client-certificate=/etc/kubernetes/ssl/kube-proxy.pem   --client-key=/etc/kubernetes/ssl/kube-proxy-key.pem   --embed-certs=true   --kubeconfig=kube-proxy.kubeconfig

# 配置關聯

kubectl config set-context default   --cluster=kubernetes   --user=kube-proxy   --kubeconfig=kube-proxy.kubeconfig

# 配置默認關聯
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

# 拷貝到目錄
mv kube-proxy.kubeconfig /etc/kubernetes/
創建kube-proxy unti文件
vi /root/service/kube-proxy.service
[Unit]
Description=kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/bin/kube-proxy   --bind-address=10.39.35.19   --hostname-override=bc-master-35-19   --cluster-cidr=10.254.0.0/16   --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig   --logtostderr=true   --v=2
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

cp /root/service/kube-proxy /etc/systemd/system
啟動服務
systemctl daemon-reload
systemctl enable kube-proxy
systemctl start kube-proxy
systemctl status kube-proxy

添加其它主節點


拷貝service目錄,修改unti文件中對應的IP地址為對應節點的IP,並創建好對應的目錄
scp -r /root/service 10.39.35.20:/root
scp -r /root/service 10.39.35.21:/root

拷貝kubernetes目錄,修改*.kubeconfig文件中對應的IP地址
scp -r /etc/kubernetes 10.39.35.20:/etc/
scp -r /etc/kubernetes 10.39.35.20:/etc/

添加Node節點(此處指寫一個節點的配置)

修改hosts文件

vi /etc/hosts

10.39.35.19 kubernetes.default.svc.cluster.local
10.39.35.20 kubernetes.default.svc.cluster.local
10.39.35.21 kubernetes.default.svc.cluster.local
在修改配置文件

#拷貝kubelet.service、kube-proxy.service的配置文件
scp /root/service/kubelet.service /root/service/kube-proxy.service 10.39.35.22:/root

#修改對應的IP與主機名
cp /root/kubelet.service /etc/systemd/system
cp /root/kube-proxy.service /etc/systemd/system

#建立目錄
mkdir /var/lib/kubelet
mkdir /var/lib/kube-proxy

#拷貝kubernetes目錄到node節點中
scp -r /etc/kubernetes 10.39.35.22:/etc/

#將*.kubeconfig中的IP替換為kubernetes.default.svc.cluster.local

配置kube-dns

所需鏡像
jicki/k8s-dns-sidecar-amd64:1.14.4
jicki/k8s-dns-kube-dns-amd64:1.14.4
jicki/k8s-dns-dnsmasq-nanny-amd64:1.14.4
mkdir /root/kube-dns

#拷貝源碼文件中的kube-dns文件到kube-dns目錄下

cd /opt/kubernetes/cluster/addons/dns

cp kubedns-sa.yaml kubedns-cm.yaml /root/kube-dns

cp kubedns-controller.yaml.base /root/kube-dns/kubedns-controller.yaml

cp kubedns-svc.yaml.base  /root/kube-dns/kubedns-svc.yaml
修改kubedns-controller.yaml文件
cd /root/kube-dns

vi kubedns-controller.yaml 

# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Should keep target in cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml
# in sync with this file.

# __MACHINE_GENERATED_WARNING__

apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: kube-dns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
spec:
  # replicas: not specified here:
  # 1. In order to make Addon Manager do not reconcile this replicas parameter.
  # 2. Default is 1.
  # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
  strategy:
    rollingUpdate:
      maxSurge: 10%
      maxUnavailable: 0
  selector:
    matchLabels:
      k8s-app: kube-dns
  template:
    metadata:
      labels:
        k8s-app: kube-dns
      annotations:
        scheduler.alpha.kubernetes.io/critical-pod: ‘‘
    spec:
      tolerations:
      - key: "CriticalAddonsOnly"
        operator: "Exists"
      volumes:
      - name: kube-dns-config
        configMap:
          name: kube-dns
          optional: true
      containers:
      - name: kubedns
        image: reg.enncloud.cn/enncloud/k8s-dns-kube-dns-amd64:1.14.4  #修改鏡像
        resources:
          # TODO: Set memory limits when we‘ve profiled the container for large
          # clusters, then set request = limit to keep this container in
          # guaranteed class. Currently, this container falls into the
          # "burstable" category so the kubelet doesn‘t backoff from restarting it.
          limits:
            memory: 170Mi
          requests:
            cpu: 100m
            memory: 70Mi
        livenessProbe:
          httpGet:
            path: /healthcheck/kubedns
            port: 10054
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /readiness
            port: 8081
            scheme: HTTP
          # we poll on pod startup for the Kubernetes master service and
          # only setup the /readiness HTTP server once that‘s available.
          initialDelaySeconds: 3
          timeoutSeconds: 5
        args:
        - --domain=cluster.local          #需要改__PILLAR__DNS__DOMAIN__為cluster.local
        - --dns-port=10053
        - --config-dir=/kube-dns-config
        - --v=2
        env:
        - name: PROMETHEUS_PORT
          value: "10055"
        ports:
        - containerPort: 10053
          name: dns-local
          protocol: UDP
        - containerPort: 10053
          name: dns-tcp-local
          protocol: TCP
        - containerPort: 10055
          name: metrics
          protocol: TCP
        volumeMounts:
        - name: kube-dns-config
          mountPath: /kube-dns-config
      - name: dnsmasq
        image: reg.enncloud.cn/enncloud/k8s-dns-dnsmasq-nanny-amd64:1.14.4  #修改鏡像
        livenessProbe:
          httpGet:
            path: /healthcheck/dnsmasq
            port: 10054
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        args:
        - -v=2
        - -logtostderr
        - -configDir=/etc/k8s/dns/dnsmasq-nanny
        - -restartDnsmasq=true
        - --
        - -k
        - --cache-size=1000
        - --log-facility=-
        - --server=/cluster.local/127.0.0.1#10053    #需要改__PILLAR__DNS__DOMAIN__為cluster.local
        - --server=/in-addr.arpa/127.0.0.1#10053
        - --server=/ip6.arpa/127.0.0.1#10053
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        # see: https://github.com/kubernetes/kubernetes/issues/29055 for details
        resources:
          requests:
            cpu: 150m
            memory: 20Mi
        volumeMounts:
        - name: kube-dns-config
          mountPath: /etc/k8s/dns/dnsmasq-nanny
      - name: sidecar
        image: reg.enncloud.cn/enncloud/k8s-dns-sidecar-amd64:1.14.4   #修改鏡像
        livenessProbe:
          httpGet:
            path: /metrics
            port: 10054
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        args:
        - --v=2
        - --logtostderr
        - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,A   #需要改__PILLAR__DNS__DOMAIN__為cluster.local
        - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,A      #需要改__PILLAR__DNS__DOMAIN__為cluster.local
        ports:
        - containerPort: 10054
          name: metrics
          protocol: TCP
        resources:
          requests:
            memory: 20Mi
            cpu: 10m
      dnsPolicy: Default  # Don‘t use cluster DNS.

修改


vi kubedns-svc.yaml 

# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# __MACHINE_GENERATED_WARNING__

apiVersion: v1
kind: Service
metadata:
  name: kube-dns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "KubeDNS"
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: 10.254.0.2   #__PILLAR__DNS__SERVER__該為10.254.0.2
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP
創建kube-dns服務
kubectl create -f .

配置calico組建

獲取鏡像
jicki/node:v1.3.0
jicki/cni:v1.9.1
jicki/kube-policy-controller:v0.6.0
獲取文件並修改文件
wget http://docs.projectcalico.org/v2.3/getting-started/kubernetes/installation/hosted/calico.yaml

vi calico.yaml

# 註意修改如下選項:

  etcd_endpoints: "https://10.39.35.19:2379,https://10.39.35.20:2379,https://10.39.35.21:2379"

    etcd_ca: "/calico-secrets/etcd-ca"  
    etcd_cert: "/calico-secrets/etcd-cert"
    etcd_key: "/calico-secrets/etcd-key"  

# 這裏面要寫入 base64 的信息
# 分別執行括號內的命令,填寫到 etcd-key , etcd-cert, etcd-ca 中,不用括號。

data:
  etcd-key: (cat /etc/kubernetes/ssl/etcd-key.pem | base64 | tr -d ‘\n‘)
  etcd-cert: (cat /etc/kubernetes/ssl/etcd.pem | base64 | tr -d ‘\n‘)
  etcd-ca: (cat /etc/kubernetes/ssl/ca.pem | base64 | tr -d ‘\n‘)

    - name: CALICO_IPV4POOL_CIDR
      value: "10.233.0.0/16"
創建calico
kubectl create -f calico.yaml
測試集群
# 創建一個 nginx deplyment

vi nginx.yaml

apiVersion: extensions/v1beta1 
kind: Deployment 
metadata: 
  name: nginx-dm
spec: 
  replicas: 2
  template: 
    metadata: 
      labels: 
        name: nginx 
    spec: 
      containers: 
        - name: nginx 
          image: nginx:alpine 
          imagePullPolicy: IfNotPresent
          ports: 
            - containerPort: 80

---

apiVersion: v1 
kind: Service
metadata: 
  name: nginx-svc 
spec: 
  ports: 
    - port: 80
      targetPort: 80
      protocol: TCP 
  selector: 
    name: nginx

kubectl  create nginx.yaml

編譯安裝ETCD+Kubernetes1.7.14