1. 程式人生 > >kubernetes yaml 檔案註釋

kubernetes yaml 檔案註釋

記錄一下kubernetes的 yamlfile寫法
#test-pod
apiVersion: v1 #指定api版本,此值必須在kubectl apiversion中  
kind: Pod #指定建立資源的角色/型別  
metadata: #資源的元資料/屬性  
  name: test-pod #資源的名字,在同一個namespace中必須唯一  
  labels: #設定資源的標籤
    k8s-app: apache  
    version: v1  
    kubernetes.io/cluster-service: "true"  
  annotations:            #自定義註解列表  
    - name: String        #自定義註解名字  
spec: #specification of the resource content 指定該資源的內容  
  restartPolicy: Always #表明該容器一直執行,預設k8s的策略,在此容器退出後,會立即建立一個相同的容器  
  nodeSelector:     #節點選擇,先給主機打標籤kubectl label nodes kube-node1 zone=node1  
    zone: node1  
  containers:  
  - name: test-pod #容器的名字  
    image: 10.192.21.18:5000/test/chat:latest #容器使用的映象地址  
    imagePullPolicy: Never #三個選擇Always、Never、IfNotPresent,每次啟動時檢查和更新(從registery)images的策略,
                           # Always,每次都檢查
                           # Never,每次都不檢查(不管本地是否有)
                           # IfNotPresent,如果本地有就不檢查,如果沒有就拉取
    command: ['sh'] #啟動容器的執行命令,將覆蓋容器中的Entrypoint,對應Dockefile中的ENTRYPOINT  
    args: ["$(str)"] #啟動容器的命令引數,對應Dockerfile中CMD引數  
    env: #指定容器中的環境變數  
    - name: str #變數的名字  
      value: "/etc/run.sh" #變數的值  
    resources: #資源管理
      requests: #容器執行時,最低資源需求,也就是說最少需要多少資源容器才能正常執行  
        cpu: 0.1 #CPU資源(核數),兩種方式,浮點數或者是整數+m,0.1=100m,最少值為0.001核(1m)
        memory: 32Mi #記憶體使用量  
      limits: #資源限制  
        cpu: 0.5  
        memory: 1000Mi  
    ports:  
    - containerPort: 80 #容器開發對外的埠
      name: httpd  #名稱
      protocol: TCP  
    livenessProbe: #pod內容器健康檢查的設定
      httpGet: #通過httpget檢查健康,返回200-399之間,則認為容器正常  
        path: / #URI地址  
        port: 80  
        #host: 127.0.0.1 #主機地址  
        scheme: HTTP  
      initialDelaySeconds: 180 #表明第一次檢測在容器啟動後多長時間後開始  
      timeoutSeconds: 5 #檢測的超時時間  
      periodSeconds: 15  #檢查間隔時間  
      #也可以用這種方法  
      #exec: 執行命令的方法進行監測,如果其退出碼不為0,則認為容器正常  
      #  command:  
      #    - cat  
      #    - /tmp/health  
      #也可以用這種方法  
      #tcpSocket: //通過tcpSocket檢查健康   
      #  port: number   
    lifecycle: #生命週期管理  
      postStart: #容器執行之前執行的任務  
        exec:  
          command:  
            - 'sh'  
            - 'yum upgrade -y'  
      preStop:#容器關閉之前執行的任務  
        exec:  
          command: ['service httpd stop']  
    volumeMounts:  #掛載持久儲存卷
    - name: volume #掛載裝置的名字,與volumes[*].name 需要對應    
      mountPath: /data #掛載到容器的某個路徑下  
      readOnly: True  
  volumes: #定義一組掛載裝置  
  - name: volume #定義一個掛載裝置的名字  
    #meptyDir: {}  
    hostPath:  
      path: /opt #掛載裝置型別為hostPath,路徑為宿主機下的/opt,這裡裝置型別支援很多種
    #nfs

# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Should keep target in cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml
# in sync with this file.

# Warning: This is a file generated from the base underscore template file: kube-dns.yaml.base

apiVersion: v1
kind: Service
metadata:
  name: kube-dns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "KubeDNS"
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: 10.254.16.115
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: kube-dns
  namespace: kube-system
  labels:
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: kube-dns
  namespace: kube-system
  labels:
    addonmanager.kubernetes.io/mode: EnsureExists
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: kube-dns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
spec:
  # replicas: not specified here:
  # 1. In order to make Addon Manager do not reconcile this replicas parameter.
  # 2. Default is 1.
  # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
  strategy:
    rollingUpdate:
      maxSurge: 10%
      maxUnavailable: 0
  selector:
    matchLabels:
      k8s-app: kube-dns
  template:
    metadata:
      labels:
        k8s-app: kube-dns
      annotations:
        scheduler.alpha.kubernetes.io/critical-pod: ''
    spec:
      tolerations:
      - key: "CriticalAddonsOnly"
        operator: "Exists"
      volumes:
      - name: kube-dns-config
        configMap:
          name: kube-dns
          optional: true
      containers:
      - name: kubedns
        image: registryip:port/dns/kube-dns-mips64le
        resources:
          # TODO: Set memory limits when we've profiled the container for large
          # clusters, then set request = limit to keep this container in
          # guaranteed class. Currently, this container falls into the
          # "burstable" category so the kubelet doesn't backoff from restarting it.
          limits:
            memory: 170Mi
          requests:
            cpu: 100m
            memory: 70Mi
        livenessProbe:
          httpGet:
            path: /healthcheck/kubedns
            port: 10054
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /readiness
            port: 8081
            scheme: HTTP
          # we poll on pod startup for the Kubernetes master service and
          # only setup the /readiness HTTP server once that's available.
          initialDelaySeconds: 3
          timeoutSeconds: 5
        args:
        - --domain=cluster.local.
        - --dns-port=10053
        - --config-dir=/kube-dns-config
        - --v=2
        env:
        - name: PROMETHEUS_PORT
          value: "10055"
        ports:
        - containerPort: 10053
          name: dns-local
          protocol: UDP
        - containerPort: 10053
          name: dns-tcp-local
          protocol: TCP
        - containerPort: 10055
          name: metrics
          protocol: TCP
        volumeMounts:
        - name: kube-dns-config
          mountPath: /kube-dns-config
      - name: dnsmasq
        image: registryip:port/dns/dnsmasq-nanny-mips64le
        livenessProbe:
          httpGet:
            path: /healthcheck/dnsmasq
            port: 10054
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        args:
        - -v=2
        - -logtostderr
        - -configDir=/etc/k8s/dns/dnsmasq-nanny
        - -restartDnsmasq=true
        - --
        - -k
        - --cache-size=1000
        - --log-facility=-
        - --server=/cluster.local/127.0.0.1#10053
        - --server=/in-addr.arpa/127.0.0.1#10053
        - --server=/ip6.arpa/127.0.0.1#10053
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        # see: https://github.com/kubernetes/kubernetes/issues/29055 for details
        resources:
          requests:
            cpu: 150m
            memory: 20Mi
        volumeMounts:
        - name: kube-dns-config
          mountPath: /etc/k8s/dns/dnsmasq-nanny
      - name: sidecar
        image: registryip:port/dns/sidecar-mips64le
        livenessProbe:
          httpGet:
            path: /metrics
            port: 10054
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        args:
        - --v=2
        - --logtostderr
        - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,A
        - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,A
        ports:
        - containerPort: 10054
          name: metrics
          protocol: TCP
        resources:
          requests:
            memory: 20Mi
            cpu: 10m
      dnsPolicy: Default  # Don't use cluster DNS.
      serviceAccountName: kube-dns


##pv-0
apiVersion: v1
kind: PersistentVolume
metadata:
  name: test1-nfs
  labels:
    type: nfs
    app: test1
spec:
  capacity:
    storage: 1Gi
  accessModes:
    - ReadWriteOnce
  persistentVolumeReclaimPolicy: Retain
  nfs :
    path: /data/docker
    server: nfsserverip
---
#pvc-0
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: test1-pvc
spec:
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 1Gi
---
#headless service
apiVersion: v1
kind: Service
metadata:
  name: test1-svc
  labels:
    app: test1
spec:
  ports:
  - port: 80 
    targetPort: 80  
  clusterIP: None
  selector:
    app: test1
---
#service
apiVersion: v1
kind: Service
metadata:
  name: test1-service
  labels:
    app: test1
spec:
   type: NodePort
   clusterIP: clusterip
   ports:
   - port: 80
     nodePort: 30080
     targetPort: 80
   selector:
     app: test1
---
#StatefulSet
apiVersion: apps/v1beta1
kind: StatefulSet 
metadata:   
  name: test1
spec: 
  serviceName: "test1service"
  replicas: 1
  template: 
    metadata:
      labels: 
        app: test1
    spec: 
      nodeSelector:
        zone: node1
      containers:  
      - name: test1
        image: registryip:port/test2017:v0.0.2
        ports:
        - containerPort: 2003
          name: test1 
        volumeMounts:
        - name: test-storage
          mountPath: /data/docker
        securityContext:
          privileged: true
      volumes:
        - name: test-storage
          persistentVolumeClaim:
            claimName: test1-pvc