1. 程式人生 > 實用技巧 >k8s-3-容器雲監控系統

k8s-3-容器雲監控系統

apollo小結

 

課程目錄

一、容器雲監控prometheus概述

https://prometheus.io/docs/introduction/overview/ #官方文件

https://github.com/prometheus/prometheus #程式碼github官網

 

 

 

原理

Exporter相當於監控通訊的中介軟體

Pushgateway比pull速度快,讓job主動將資料傳送給pushgateway

服務發現(難度大) ,靜態配置發現目標

Grafana專門做資料展示

 

 

 

架構介紹

Promtheus對比zabbix

 

二、監控必備exporter

部署kube-state-metrics

監控k8s基礎資訊,有多少個dp,svc,有哪些pod,通過這些基礎資料,訪問叢集元資料

拉取上傳映象

運維主機HDSS7-200.host.com上:

[root@hdss7-200 ~]# docker pull quay.io/coreos/kube-state-metrics:v1.5.0

v1.5.0: Pulling from coreos/kube-state-metrics

cd784148e348: Pull complete

f622528a393e: Pull complete

Digest: sha256:b7a3143bd1eb7130759c9259073b9f239d0eeda09f5210f1cd31f1a530599ea1

Status: Downloaded newer image for quay.io/coreos/kube-state-metrics:v1.5.0

[root@hdss7-200 ~]# docker tag 91599517197a harbor.od.com/public/kube-state-metrics:v1.5.0

[root@hdss7-200 ~]# docker push harbor.od.com/public/kube-state-metrics:v1.5.0

 

資源配置清單rbac

[root@hdss7-200 ~]# mkdir /data/k8s-yaml/kubu-state-metrics

[root@hdss7-200 kubu-state-metrics]# cat rbac.yaml

apiVersion: v1

kind: ServiceAccount

metadata:

labels:

addonmanager.kubernetes.io/mode: Reconcile

kubernetes.io/cluster-service: "true"

name: kube-state-metrics

namespace: kube-system

---

apiVersion: rbac.authorization.k8s.io/v1

kind: ClusterRole

metadata:

labels:

addonmanager.kubernetes.io/mode: Reconcile

kubernetes.io/cluster-service: "true"

name: kube-state-metrics

rules:

- apiGroups:

- ""

resources:

- configmaps

- secrets

- nodes

- pods

- services

- resourcequotas

- replicationcontrollers

- limitranges

- persistentvolumeclaims

- persistentvolumes

- namespaces

- endpoints

verbs:

- list

- watch

- apiGroups:

- extensions

resources:

- daemonsets

- deployments

- replicasets

verbs:

- list

- watch

- apiGroups:

- apps

resources:

- statefulsets

verbs:

- list

- watch

- apiGroups:

- batch

resources:

- cronjobs

- jobs

verbs:

- list

- watch

- apiGroups:

- autoscaling

resources:

- horizontalpodautoscalers

verbs:

- list

- watch

---

apiVersion: rbac.authorization.k8s.io/v1

kind: ClusterRoleBinding

metadata:

labels:

addonmanager.kubernetes.io/mode: Reconcile

kubernetes.io/cluster-service: "true"

name: kube-state-metrics

roleRef:

apiGroup: rbac.authorization.k8s.io

kind: ClusterRole

name: kube-state-metrics

subjects:

- kind: ServiceAccount

name: kube-state-metrics

namespace: kube-system

 

 

資源配置清單deploy

[root@hdss7-200 kubu-state-metrics]# cat dp.yaml

apiVersion: extensions/v1beta1

kind: Deployment

metadata:

annotations:

deployment.kubernetes.io/revision: "2"

labels:

grafanak8sapp: "true"

app: kube-state-metrics

name: kube-state-metrics

namespace: kube-system

spec:

selector:

matchLabels:

grafanak8sapp: "true"

app: kube-state-metrics

strategy:

rollingUpdate:

maxSurge: 25%

maxUnavailable: 25%

type: RollingUpdate

template:

metadata:

creationTimestamp: null

labels:

grafanak8sapp: "true"

app: kube-state-metrics

spec:

containers:

- image: harbor.od.com/public/kube-state-metrics:v1.5.0

name: kube-state-metrics

ports:

- containerPort: 8080

name: http-metrics

protocol: TCP

readinessProbe: #就緒性探針,對於不可達pod資源不提供訪問資源.不斷檢查該pod是否正常,防止pod關了網頁404.

failureThreshold: 3

httpGet:

path: /healthz

port: 8080

scheme: HTTP

initialDelaySeconds: 5

periodSeconds: 10

successThreshold: 1

timeoutSeconds: 5

imagePullPolicy: IfNotPresent

imagePullSecrets:

- name: harbor

restartPolicy: Always

serviceAccount: kube-state-metrics

serviceAccountName: kube-state-metrics

 

應用資源配置清單

kubectl apply -f http://k8s-yaml.od.com/kubu-state-metrics/rbac.yaml

kubectl apply -f http://k8s-yaml.od.com/kubu-state-metrics/dp.yaml

判斷正常啟動

 

 

 

 

 

 

部署node-exporter

#作用:監控運算節點宿主機資源,cpu,記憶體等宿主機資源

拉取上傳映象

運維主機HDSS7-200.host.com上:

[root@hdss7-200 kubu-state-metrics]# docker pull prom/node-exporter:v0.15.0

docker tag b3e7f67a1480 harbor.od.com/public/node-exporter:v0.15.0

[root@hdss7-200 kubu-state-metrics]# docker push harbor.od.com/public/node-exporter:v0.15.0

[root@hdss7-200 kubu-state-metrics]# mkdir /data/k8s-yaml/node-exporter/

 

資源配置清單daemonset

Daemonset會在每個運算節點開一個pod

[root@hdss7-200 node-exporter]# vi ds.yaml

kind: DaemonSet

apiVersion: extensions/v1beta1

metadata:

name: node-exporter

namespace: kube-system

labels:

daemon: "node-exporter"

grafanak8sapp: "true"

spec:

selector:

matchLabels:

daemon: "node-exporter"

grafanak8sapp: "true"

template:

metadata:

name: node-exporter

labels:

daemon: "node-exporter"

grafanak8sapp: "true"

spec:

volumes:

- name: proc

hostPath:

path: /proc

type: ""

- name: sys

hostPath:

path: /sys

type: ""

containers:

- name: node-exporter

image: harbor.od.com/public/node-exporter:v0.15.0

args:

- --path.procfs=/host_proc

- --path.sysfs=/host_sys

ports:

- name: node-exporter

hostPort: 9100 #暴露的主機埠

containerPort: 9100

protocol: TCP

volumeMounts:

- name: sys

readOnly: true

mountPath: /host_sys

- name: proc

readOnly: true

mountPath: /host_proc

imagePullSecrets:

- name: harbor

restartPolicy: Always

hostNetwork: true

[root@hdss7-21 ~]# curl 10.4.7.21:9100/metrics #取出宿主機資源使用資訊

確認安裝成功

 

 

應用資源配置清單

[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/node-exporter/ds.yaml

 

 

 

部署cadivisor

監控pop使用資源,cpu,記憶體等

注意版本,與grafana有些地方有bug,28版本比較推薦

運維主機HDSS7-200.host.com上:

拉取上傳映象

[root@hdss7-200 node-exporter]# docker pull google/cadvisor:v0.28.3

[root@hdss7-200 node-exporter]# docker tag 75f88e3ec333 harbor.od.com/public/cadvisor:v0.28.3

[root@hdss7-200 ~]# docker push !$

 

資源配置清單daemonset

[root@hdss7-200 node-exporter]# mkdir /data/k8s-yaml/cadvisor

vi ds.yaml

apiVersion: apps/v1

kind: DaemonSet

metadata:

name: cadvisor

namespace: kube-system

labels:

app: cadvisor

spec:

selector:

matchLabels:

name: cadvisor

template:

metadata:

labels:

name: cadvisor

spec:

hostNetwork: true

tolerations: #容忍汙點節點,在汙點節點允許建立pod,系統會盡量將pod放在汙點節點上執行

- key: node-role.kubernetes.io/master

effect: NoSchedule

containers:

- name: cadvisor

image: harbor.od.com/public/cadvisor:v0.28.3

imagePullPolicy: IfNotPresent

volumeMounts:

- name: rootfs

mountPath: /rootfs

readOnly: true

- name: var-run

mountPath: /var/run

- name: sys

mountPath: /sys

readOnly: true

- name: docker

mountPath: /var/lib/docker

readOnly: true

ports:

- name: http

containerPort: 4194

protocol: TCP

readinessProbe:

tcpSocket:

port: 4194

initialDelaySeconds: 5

periodSeconds: 10

args:

- --housekeeping_interval=10s

- --port=4194

terminationGracePeriodSeconds: 30

volumes:

- name: rootfs

hostPath:

path: /

- name: var-run

hostPath:

path: /var/run

- name: sys

hostPath:

path: /sys

- name: docker

hostPath:

path: /data/docker

 

修改運算節點軟連線

所有運算節點上:

mount -o remount,rw /sys/fs/cgroup/ #remount重新掛載,不需要加裝置

ln -s /sys/fs/cgroup/cpu,cpuacct /sys/fs/cgroup/cpuacct,cpu #建立一個新的軟連結,修改後狀態,容器用的目錄名稱

 

ll /sys/fs/cgroup/ | grep cpu

 

 

應用資源配置清單

任意運算節點上:

kubectl apply -f http://k8s-yaml.od.com/cadvisor/ds.yaml

netstat -luntp|grep 4194 #檢查ds暴露的埠是否開啟

 

 

 

 

汙點節點

加角色標籤

標籤可以過濾某些節點

[root@hdss7-21 cert]# kubectl label node hdss7-21.host.com node-role.kubernetes.io/master=

[root@hdss7-21 cert]# kubectl label node hdss7-21.host.com node-role.kubernetes.io/node=

 

[root@hdss7-22 ~]# kubectl get nodes

NAME STATUS ROLES AGE VERSION

hdss7-21.host.com Ready master,node 15h v1.15.2

hdss7-22.host.com Ready master,node 12m v1.15.2

 

 

 

 

影響k8s排程策略的三種方法:

1.汙點,容忍度方法

2.容忍度:pod是否能夠榮仍汙點

Nodename:讓pod執行在指定在node上

Nodeselector: 通過標籤選擇器,讓pod執行指定的一類node上

 

 

 

 

 

 

 

 

 

運算節點打汙點

 

針對於排程器schedule,不使用主節點排程器,使用資源清單的排程規則

 

#汙染21節點,不使用系統排程規則

#這個汙點的key為node-role.kubernetes.io/master value為master,,,動作為:NoSchedule

 

[root@hdss7-21 ~]# kubectl taint node hdss7-21.host.com node-role.kubernetes.io/master=master:NoSchedule

node/hdss7-21.host.com tainted

 

 

 

 

 

汙點使用小結

Key-value,value可以為空,為空時匹配的規則就是key值相同

汙點: 針對node--->運算節點node上的汙點

容忍: 針對pod資源清單加的容忍汙點,匹配到的節點汙點規則相同,將能夠在該汙點節點上執行

 

 

刪除汙點

[root@hdss7-21 ~]# kubectl taint node hdss7-21.host.com node-role.kubernetes.io/master- #刪除汙點,注意master-

刪除汙點使用key-就可以刪除

汙點容忍度補充

#在2個節點上建立2個pod後,縮容成1個,再到沒有該pod的21節點打汙點測試

 

 

#在21節點上加汙點

#打了一個汙點,汙點的key叫quedian,value叫buxijiao,匹配到這個key-value後執行的動作是noschedule

Kubectl taint node hdss7-21.host.com quedian=buxijiao:NoSchedule

 

Kubectl describe node hdss7-21.host.com

 

容忍汙點資源清單配置

#寫在container上面,第二個spec下面

汙點容忍後,就可以在打了汙點的節點上執行

 

 

汙點2種動作:

Kubectl taint node hdss7-21.host.com quedian=buxijiao:NoSchedule #對於沒匹配到的不允許排程到該汙點節點

Kubectl taint node hdss7-21.host.com quedian=buxijiao:NoExecute #對於匹配到的,允許排程到該節點,但不允許你執行你的pod

# 被打了NoExecute的汙點,如果有其他節點正常,一般也不會排程過去.

去掉汙點就是,key-,,,value可以不寫的,汙點區分主要是靠key

#因為21節點上有2個汙點,不洗腳和不洗澡,pod也得容忍2個這樣的汙點才能排程過去. #只容忍1個汙點排程不過去

 

應用場景

1. 針對io密集型來區分到不同磁碟型別的節點

2. ,還可以根據佔用記憶體特別多的promtheus單獨跑一個節點

3. 某個運算節點要下線維修,將該節點打上汙點,將pod驅逐;

Kubectl taint node hdss7-21.host.com key=broken:NoExecute

 

 

 

 

 

 

 

 

部署blackbox-expoter

最常用的監控元件:監控業務容器的存活性,promtheus帶著引數來請求blackbox

探測專案存活性,如果專案有http介面,一般走http,沒有走tcp,只能走這2種介面.

Blackbox探測專案時,需要在專案pod資源清單加上annotion或者label的匹配規則,供promtheus匹配

#tcp監控項

 

#http監控項

伸縮並不會報警,因為annotation在pod資源清單中,縮容後,annotation也會消失.

 

下載上傳映象

運維主機HDSS7-200.host.com上:

[root@hdss7-200 blackbox-exporter]# docker pull prom/blackbox-exporter:v0.15.1

[root@hdss7-200 blackbox-exporter]# docker tag 81b70b6158be harbor.od.com/public/blackbox-exporter:v0.15.1

[root@hdss7-200 ~]# docker push harbor.od.com/public/blackbox-exporter:v0.15.1

 

準備資源配置清單

•    ConfigMap

•    Deployment

•    Service

•    Ingress

mkdir /data/k8s-yaml/blackbox-exporter/

vi /data/k8s-yaml/blackbox-exporter/configmap.yaml

apiVersion: v1
kind: ConfigMap
metadata:
labels:
app: blackbox-exporter
name: blackbox-exporter
namespace: kube-system
data:
blackbox.yml: |-
modules:
http_2xx:
prober: http
timeout: 2s
http:
valid_http_versions: ["HTTP/1.1", "HTTP/2"]
valid_status_codes: [200,301,302]
method: GET
preferred_ip_protocol: "ip4"
tcp_connect:
prober: tcp
timeout: 2s

 

 

 

 

vi /data/k8s-yaml/blackbox-exporter/deployment.yaml

kind: Deployment
apiVersion: extensions/v1beta1
metadata:
name: blackbox-exporter
namespace: kube-system
labels:
app: blackbox-exporter
annotations:
deployment.kubernetes.io/revision: 1
spec:
replicas: 1
selector:
matchLabels:
app: blackbox-exporter
template:
metadata:
labels:
app: blackbox-exporter
spec:
volumes:
- name: config
configMap:
name: blackbox-exporter
defaultMode: 420
containers:
- name: blackbox-exporter
image: harbor.od.com/public/blackbox-exporter:v0.15.1
args:
- --config.file=/etc/blackbox_exporter/blackbox.yml
- --log.level=debug
- --web.listen-address=:9115
ports:
- name: blackbox-port
containerPort: 9115
protocol: TCP
resources:
limits:
cpu: 200m
memory: 256Mi
requests:
cpu: 100m
memory: 50Mi
volumeMounts:
- name: config
mountPath: /etc/blackbox_exporter
readinessProbe: #
就緒性探針,對於不存活的節點不提供訪問
tcpSocket:
port: 9115
initialDelaySeconds: 5
timeoutSeconds: 5
periodSeconds: 10
successThreshold: 1
failureThreshold: 3
imagePullPolicy: IfNotPresent
imagePullSecrets:
- name: harbor
restartPolicy: Always

 

vi /data/k8s-yaml/blackbox-exporter/service.yaml

kind: Service
apiVersion: v1
metadata:
name: blackbox-exporter
namespace: kube-system
spec:
selector:
app: blackbox-exporter
ports:
- protocol: TCP
port: 9115
name: http

 

 

vi /data/k8s-yaml/blackbox-exporter/ingress.yaml

apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: blackbox-exporter
namespace: kube-system
spec:
rules:
- host: blackbox.od.com
http:
paths:
- backend:
serviceName: blackbox-exporter
servicePort: 9115

 

 

 

解析域名

HDSS7-11.host.com上

複製/var/named/od.com.zone

blackbox    A 10.4.7.10

[root@hdss7-11 ~]# systemctl restart named

 

[root@hdss7-21 ~]# dig -t A blackbox.od.com @192.168.0.2 +short

10.4.7.10

 

 

應用資源配置清單

[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/blackbox-exporter/configmap.yaml
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/blackbox-exporter/deployment.yaml
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/blackbox-exporter/service.yaml
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/blackbox-exporter/ingress.yaml

 

 

 

瀏覽器訪問

http://blackbox.od.com

 

 

 

三、部署prometheus及其配置詳解

 

部署prometheus

# docker pull prom/prometheus:v2.14.0

# docker tag 7317640d555e harbor.od.com/infra/prometheus:v2.14.0

# docker push harbor.od.com/infra/prometheus:v2.14.0

 

[root@hdss7-200 ~]# mkdir /data/k8s-yaml/prometheus

[root@hdss7-200 ~]# cd /data/k8s-yaml/prometheus

 

 

資源配置清單-rbac

[root@hdss7-200 prometheus]# cat rbac.yaml

apiVersion: v1

kind: ServiceAccount

metadata:

labels:

addonmanager.kubernetes.io/mode: Reconcile

kubernetes.io/cluster-service: "true"

name: prometheus

namespace: infra

---

apiVersion: rbac.authorization.k8s.io/v1

kind: ClusterRole

metadata:

labels:

addonmanager.kubernetes.io/mode: Reconcile

kubernetes.io/cluster-service: "true"

name: prometheus

rules:

- apiGroups:

- ""

resources:

- nodes

- nodes/metrics

- services

- endpoints

- pods

verbs:

- get

- list

- watch

- apiGroups:

- ""

resources:

- configmaps

verbs:

- get

- nonResourceURLs:

- /metrics

verbs:

- get

---

apiVersion: rbac.authorization.k8s.io/v1

kind: ClusterRoleBinding

metadata:

labels:

addonmanager.kubernetes.io/mode: Reconcile

kubernetes.io/cluster-service: "true"

name: prometheus

roleRef:

apiGroup: rbac.authorization.k8s.io

kind: ClusterRole

name: prometheus

subjects:

- kind: ServiceAccount

name: prometheus

namespace: infra

 

 

資源配置清單-dp

加上--web.enable-lifecycle啟用遠端熱載入配置檔案

呼叫指令是curl -X POST http://localhost:9090/-/reload

 

storage.tsdb.min-block-duration=10m #只加載10分鐘資料到記憶體

storage.tsdb.retention=72h #保留72小時資料

 

apiVersion: extensions/v1beta1

kind: Deployment

metadata:

annotations:

deployment.kubernetes.io/revision: "5"

labels:

name: prometheus

name: prometheus

namespace: infra

spec:

progressDeadlineSeconds: 600

replicas: 1

revisionHistoryLimit: 7

selector:

matchLabels:

app: prometheus

strategy:

rollingUpdate:

maxSurge: 1

maxUnavailable: 1

type: RollingUpdate

template:

metadata:

labels:

app: prometheus

spec:

nodeName: hdss7-21.host.com #與jenkins毫記憶體大的錯開,指定pod執行位置

containers:

- name: prometheus

image: harbor.od.com/infra/prometheus:v2.14.0

imagePullPolicy: IfNotPresent

command:

- /bin/prometheus #啟動promtheus命令

args:

- --config.file=/data/etc/prometheus.yml #配置檔案

- --storage.tsdb.path=/data/prom-db #容器裡目錄

- --storage.tsdb.min-block-duration=10m #只加載10分鐘資料到記憶體,虛擬機器測試

- --storage.tsdb.retention=72h #存多少時間的資料,測試環境

- --web.enable-lifecycle #啟用遠端熱載入配置檔案

ports:

- containerPort: 9090

protocol: TCP

volumeMounts:

- mountPath: /data #容器掛載目錄

name: data

resources: #限制容器資源的一種配置方法

requests:#申請

cpu: "1000m" #1000m=1000毫核=1核

memory: "1.5Gi"

limits: #不能超過

cpu: "2000m"

memory: "3Gi"

imagePullSecrets:

- name: harbor

securityContext:

runAsUser: 0

serviceAccountName: prometheus

volumes:

- name: data

nfs:

server: hdss7-200

path: /data/nfs-volume/prometheus #宿主機目錄,上面的容器目錄掛載到該目錄

 

資源配資清單-svc

vi svc.yaml

apiVersion: v1

kind: Service

metadata:

name: prometheus

namespace: infra

spec:

ports:

- port: 9090

protocol: TCP

targetPort: 9090

selector:

app: prometheus

 

資源配置清單-ingress

vi ingress.yaml

apiVersion: extensions/v1beta1

kind: Ingress

metadata:

annotations:

kubernetes.io/ingress.class: traefik

name: prometheus

namespace: infra

spec:

rules:

- host: prometheus.od.com

http:

paths:

- path: /

backend:

serviceName: prometheus

servicePort: 9090

 

拷貝證書

在200主機上

建立必要的目錄,prometheus掛載出來的目錄

# mkdir -p /data/nfs-volume/prometheus/{etc,prom-db}

拷貝配置檔案中用到的證書

# cd /data/nfs-volume/prometheus/etc/
# cp /opt/certs/ca.pem ./

# cp /opt/certs/client.pem ./

# cp /opt/certs/client-key.pem ./

 

 

準備prometheus的配置檔案

在運維主機hdss7-200.host.com上:

修改prometheus配置檔案:別問為啥這麼寫,問就是不懂~

# vi /data/nfs-volume/prometheus/etc/prometheus.yml

global:

scrape_interval: 15s

evaluation_interval: 15s

scrape_configs:

- job_name: 'etcd'

tls_config:

ca_file: /data/etc/ca.pem

cert_file: /data/etc/client.pem

key_file: /data/etc/client-key.pem

scheme: https

static_configs:

- targets:

- '10.4.7.12:2379'

- '10.4.7.21:2379'

- '10.4.7.22:2379'

- job_name: 'kubernetes-apiservers'

kubernetes_sd_configs:

- role: endpoints

scheme: https

tls_config:

ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt

bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token

relabel_configs:

- source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]

action: keep

regex: default;kubernetes;https

- job_name: 'kubernetes-pods'

kubernetes_sd_configs:

- role: pod

relabel_configs:

- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]

action: keep

regex: true

- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]

action: replace

target_label: __metrics_path__

regex: (.+)

- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]

action: replace

regex: ([^:]+)(?::\d+)?;(\d+)

replacement: $1:$2

target_label: __address__

- action: labelmap

regex: __meta_kubernetes_pod_label_(.+)

- source_labels: [__meta_kubernetes_namespace]

action: replace

target_label: kubernetes_namespace

- source_labels: [__meta_kubernetes_pod_name]

action: replace

target_label: kubernetes_pod_name

- job_name: 'kubernetes-kubelet'

kubernetes_sd_configs:

- role: node

relabel_configs:

- action: labelmap

regex: __meta_kubernetes_node_label_(.+)

- source_labels: [__meta_kubernetes_node_name]

regex: (.+)

target_label: __address__

replacement: ${1}:10255

- job_name: 'kubernetes-cadvisor'

kubernetes_sd_configs:

- role: node

relabel_configs:

- action: labelmap

regex: __meta_kubernetes_node_label_(.+)

- source_labels: [__meta_kubernetes_node_name]

regex: (.+)

target_label: __address__

replacement: ${1}:4194

- job_name: 'kubernetes-kube-state'

kubernetes_sd_configs:

- role: pod

relabel_configs:

- action: labelmap

regex: __meta_kubernetes_pod_label_(.+)

- source_labels: [__meta_kubernetes_namespace]

action: replace

target_label: kubernetes_namespace

- source_labels: [__meta_kubernetes_pod_name]

action: replace

target_label: kubernetes_pod_name

- source_labels: [__meta_kubernetes_pod_label_grafanak8sapp]

regex: .*true.*

action: keep

- source_labels: ['__meta_kubernetes_pod_label_daemon', '__meta_kubernetes_pod_node_name']

regex: 'node-exporter;(.*)'

action: replace

target_label: nodename

- job_name: 'blackbox_http_pod_probe'

metrics_path: /probe

kubernetes_sd_configs:

- role: pod

params:

module: [http_2xx]

relabel_configs:

- source_labels: [__meta_kubernetes_pod_annotation_blackbox_scheme]

action: keep

regex: http

- source_labels: [__address__, __meta_kubernetes_pod_annotation_blackbox_port, __meta_kubernetes_pod_annotation_blackbox_path]

action: replace

regex: ([^:]+)(?::\d+)?;(\d+);(.+)

replacement: $1:$2$3

target_label: __param_target

- action: replace

target_label: __address__

replacement: blackbox-exporter.kube-system:9115

- source_labels: [__param_target]

target_label: instance

- action: labelmap

regex: __meta_kubernetes_pod_label_(.+)

- source_labels: [__meta_kubernetes_namespace]

action: replace

target_label: kubernetes_namespace

- source_labels: [__meta_kubernetes_pod_name]

action: replace

target_label: kubernetes_pod_name

- job_name: 'blackbox_tcp_pod_probe'

metrics_path: /probe

kubernetes_sd_configs:

- role: pod

params:

module: [tcp_connect]

relabel_configs:

- source_labels: [__meta_kubernetes_pod_annotation_blackbox_scheme]

action: keep

regex: tcp

- source_labels: [__address__, __meta_kubernetes_pod_annotation_blackbox_port]

action: replace

regex: ([^:]+)(?::\d+)?;(\d+)

replacement: $1:$2

target_label: __param_target

- action: replace

target_label: __address__

replacement: blackbox-exporter.kube-system:9115

- source_labels: [__param_target]

target_label: instance

- action: labelmap

regex: __meta_kubernetes_pod_label_(.+)

- source_labels: [__meta_kubernetes_namespace]

action: replace

target_label: kubernetes_namespace

- source_labels: [__meta_kubernetes_pod_name]

action: replace

target_label: kubernetes_pod_name

- job_name: 'traefik'

kubernetes_sd_configs:

- role: pod

relabel_configs:

- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scheme]

action: keep

regex: traefik

- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]

action: replace

target_label: __metrics_path__

regex: (.+)

- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]

action: replace

regex: ([^:]+)(?::\d+)?;(\d+)

replacement: $1:$2

target_label: __address__

- action: labelmap

regex: __meta_kubernetes_pod_label_(.+)

- source_labels: [__meta_kubernetes_namespace]

action: replace

target_label: kubernetes_namespace

- source_labels: [__meta_kubernetes_pod_name]

action: replace

target_label: kubernetes_pod_name

 

 

 

 

新增dns解析

[root@hdss7-11 ~]# vi /var/named/od.com.zone

prometheus A 10.4.7.10

 

[root@hdss7-11 ~]# systemctl restart named

[root@hdss7-11 ~]# dig -t A prometheus.od.com @10.4.7.11 +short

10.4.7.10

 

應用資源配置清單

 

# kubectl apply -f http://k8s-yaml.od.com/prometheus/rbac.yaml

# kubectl apply -f http://k8s-yaml.od.com/prometheus/dp.yaml

# kubectl apply -f http://k8s-yaml.od.com/prometheus/svc.yaml

# kubectl apply -f http://k8s-yaml.od.com/prometheus/ingress.yaml

 

 

 

 

 

檢查:

[root@hdss7-21 ~]# kubectl logs prometheus-7f656dbdcd-svm76 -n infra

 

 

瀏覽器驗證:prometheus.od.com

這裡點選status-targets,這裡展示的就是我們在prometheus.yml中配置的job-name,這些targets基本可以滿足我們收集資料的需求。

 

 

配置prometheus

Configuration

決定了多少秒curl一下exporter元件,返回k8s叢集資訊

 

 

 

 

 

動作action,keep,只保留匹配到的

動作action,drop,沒標籤,沒匹配到的

 

 

 

函式使用方法

 

 

 

 

 

 

使pod匹配上promtheus的監控

traefik匹配監控

修改traefikyaml

dashboard裡找到traefikyaml,跟labels同級新增annotations

"annotations": {

"prometheus_io_scheme": "traefik",

"prometheus_io_path": "/metrics",

"prometheus_io_port": "8080"

}

注意前面有個逗號,

Template-àmetadataàannotation

 

等待pod重啟以後,在去prometheus上去看

[root@hdss7-21 ~]# kubectl delete pod traefik-ingress-9jcr9 -n kube-system

[root@hdss7-21 ~]# kubectl delete pod traefik-ingress-wb7cs -n kube-system

自動發現了

監控dubbo-service

#現在就可以呼叫了,重啟pod,就可以監控到

 

 

 

 

Endpoint使用的是svc_name

 

 

Blackbox相當於pod到promtheus的中介軟體,promtheus在blackbox取資料

 

 

 

 

 

 

 

blackbox

 

這個是檢測容器內服務存活性的,也就是埠健康狀態檢查,分為tcphttp

首先準備兩個服務,將dubbo-demo-servicedubbo-demo-consumer都調整為使用master映象不依賴apollo的(節省資源)

等兩個服務起來以後,首先在dubbo-demo-service資源中新增一個TCPannotation

   

"annotations": {


					"blackbox_port": "20880",


					"blackbox_scheme": "tcp"
								

}

   

   

 

   

   

這裡會自動發現我們服務中,執行tcp port埠為20880的服務,並監控其狀態

   

監控dubbo-demo-consumer

接下來dubbo-demo-consumer資源中新增一個HTTPannotation

   

"annotations": {


					"blackbox_path": "/hello?name=health",


					"blackbox_port": "8080",


					"blackbox_scheme": "http"
								

}

   

   

#檢查介面要寫對

 

 

   

 去檢查blackbox.od.com

http://blackbox.od.com/

   

   

 新增監控jvm資訊的annotation

"annotations": {


					"prometheus_io_scrape": "true",


					"prometheus_io_port": "12346",


					"prometheus_io_path": "/"
								

}

dubbo-demo-servicedubbo-demo-consumer都新增:

   

 

   

   

   

 匹配規則,要去prometheus.yml中去看。

   

   

 

 

 

 

 

四、部署雲監控展示平臺Grafana

 

下載上傳映象:

# docker pull grafana/grafana:5.4.2

# docker tag 6f18ddf9e552 harbor.od.com/infra/grafana:v5.4.2

# docker push harbor.od.com/infra/grafana:v5.4.2

 

 

準備資源配置清單:

建立目錄

# mkdir /data/nfs-volume/grafana

 

cd /data/k8s-yaml/grafana

 

1rbac.yaml

vi rbac.yaml

apiVersion: rbac.authorization.k8s.io/v1

kind: ClusterRole

metadata:

labels:

addonmanager.kubernetes.io/mode: Reconcile

kubernetes.io/cluster-service: "true"

name: grafana

rules:

- apiGroups:

- "*"

resources:

- namespaces

- deployments

- pods

verbs:

- get

- list

- watch

---

apiVersion: rbac.authorization.k8s.io/v1

kind: ClusterRoleBinding

metadata:

labels:

addonmanager.kubernetes.io/mode: Reconcile

kubernetes.io/cluster-service: "true"

name: grafana

roleRef:

apiGroup: rbac.authorization.k8s.io

kind: ClusterRole

name: grafana

subjects:

- kind: User

name: k8s-node

 

2dp.yaml

vi dp.yaml

apiVersion: extensions/v1beta1

kind: Deployment

metadata:

labels:

app: grafana

name: grafana

name: grafana

namespace: infra

spec:

progressDeadlineSeconds: 600

replicas: 1

revisionHistoryLimit: 7

selector:

matchLabels:

name: grafana

strategy:

rollingUpdate:

maxSurge: 1

maxUnavailable: 1

type: RollingUpdate

template:

metadata:

labels:

app: grafana

name: grafana

spec:

containers:

- name: grafana

image: harbor.od.com/infra/grafana:v5.4.2

imagePullPolicy: IfNotPresent

ports:

- containerPort: 3000

protocol: TCP

volumeMounts:

- mountPath: /var/lib/grafana

name: data

imagePullSecrets:

- name: harbor

securityContext:

runAsUser: 0

volumes:

- nfs:

server: hdss7-200

path: /data/nfs-volume/grafana

name: data

 

 

3svc.yaml

vi svc.yaml

apiVersion: v1

kind: Service

metadata:

name: grafana

namespace: infra

spec:

ports:

- port: 3000

protocol: TCP

targetPort: 3000

selector:

app: grafana

 

4ingress.yaml

vi ingress.yaml

apiVersion: extensions/v1beta1

kind: Ingress

metadata:

name: grafana

namespace: infra

spec:

rules:

- host: grafana.od.com

http:

paths:

- path: /

backend:

serviceName: grafana

servicePort: 3000

 

 

域名解析

[root@hdss7-11 ~]# vi /var/named/od.com.zone

grafana A 10.4.7.10

 

[root@hdss7-11 ~]# systemctl restart named

 

 

應用資源配置清單

# kubectl apply -f http://k8s-yaml.od.com/grafana/rbac.yaml

# kubectl apply -f http://k8s-yaml.od.com/grafana/dp.yaml

# kubectl apply -f http://k8s-yaml.od.com/grafana/svc.yaml

# kubectl apply -f http://k8s-yaml.od.com/grafana/ingress.yaml

 

 

五、Grafana配置及外掛儀表盤製作

設定

 

裝外掛

進入容器安裝外掛:

安裝有點慢

# kubectl exec -it grafana-d6588db94-xr4s6 /bin/bash -n infra

 

grafana-cli plugins install grafana-kubernetes-app

 

grafana-cli plugins install grafana-clock-panel

 

grafana-cli plugins install grafana-piechart-panel

 

grafana-cli plugins install briangann-gauge-panel

 

grafana-cli plugins install natel-discrete-panel

 

 

 

 

 

 

 

 

 

刪除grafana的pod,重啟pod

[root@hdss7-21 ~]# kubectl delete pod grafana-d6588db94-7c66l -n infra

 

 

新增promtheus資料來源

 

 

 

 

Add 資料集

準備證書

[root@hdss7-200 certs]# cat /opt/certs/ca.pem

[root@hdss7-200 certs]# cat /opt/certs/client.pem

[root@hdss7-200 certs]# cat /opt/certs/client-key.pem

 

 

 

 

配置外掛kubernetes

 

 

 

 

 

 

K8s外掛配置

7443會代理到21,22的apiserver埠6443

 

新增完需要稍等幾分鐘,在沒有取到資料之前,會報http forbidden,沒關係,等一會就好。大概2-5分鐘。

 

 

 

Container不顯示資料,改bug

 

 

sum(container_memory_usage_bytes{container_label_io_kubernetes_pod_name=~"$pod"}) by (pod_name)

sum(container_memory_usage_bytes{container_label_io_kubernetes_pod_name=~"$pod"}) by (container_label_io_kubernetes_pod_name)

刪除重建grafana-demo

 

 

載入其他dashboard

 

 

 

 

 

 

 

 

''

 

 

 

 

 

 

 

 

監控dubbo-jvm

加完註解,自動拉到監控裡,再根據grafana建立的儀表盤在promtheus取資料展示

 

 

六、微服務容器接入容器雲監控原理

內容中在第五章,沒細分

 

 

 

 

 

 

 

 

 

 

 

七、Alertmanager元件進行監控告警

配置alert告警外掛:

# docker pull docker.io/prom/alertmanager:v0.14.0

# docker tag 23744b2d645c harbor.od.com/infra/alertmanager:v0.14.0

# docker push harbor.od.com/infra/alertmanager:v0.14.0

 

資源配置清單:

mkdir /data/k8s-yaml/alertmanager

1cm.yaml

vi cm.yaml

apiVersion: v1

kind: ConfigMap

metadata:

name: alertmanager-config

namespace: infra

data:

config.yml: |-

global:

# 在沒有報警的情況下宣告為已解決的時間

resolve_timeout: 5m

# 配置郵件傳送資訊

smtp_smarthost: 'smtp.qq.com'

smtp_from: '[email protected]'

smtp_auth_username: '[email protected]'

smtp_auth_password: 'bdieyxrflckobcag'

smtp_require_tls: false

# 所有報警資訊進入後的根路由,用來設定報警的分發策略

route:

# 這裡的標籤列表是接收到報警資訊後的重新分組標籤,例如,接收到的報警資訊裡面有許多具有 cluster=A alertname=LatncyHigh 這樣的標籤的報警資訊將會批量被聚合到一個分組裡面

group_by: ['alertname', 'cluster']

# 當一個新的報警分組被建立後,需要等待至少group_wait時間來初始化通知,這種方式可以確保您能有足夠的時間為同一分組來獲取多個警報,然後一起觸發這個報警資訊。

group_wait: 30s

 

# 當第一個報警傳送後,等待'group_interval'時間來發送新的一組報警資訊。

group_interval: 5m

 

# 如果一個報警資訊已經發送成功了,等待'repeat_interval'時間來重新發送他們

repeat_interval: 5m

 

# 預設的receiver:如果一個報警沒有被一個route匹配,則傳送給預設的接收器

receiver: default

 

receivers:

- name: 'default'

email_configs:

- to: '[email protected]'

send_resolved: true

 

 

 

2dp.yaml

vi dp.yaml

apiVersion: extensions/v1beta1

kind: Deployment

metadata:

name: alertmanager

namespace: infra

spec:

replicas: 1

selector:

matchLabels:

app: alertmanager

template:

metadata:

labels:

app: alertmanager

spec:

containers:

- name: alertmanager

image: harbor.od.com/infra/alertmanager:v0.14.0

args:

- "--config.file=/etc/alertmanager/config.yml"

- "--storage.path=/alertmanager"

ports:

- name: alertmanager

containerPort: 9093

volumeMounts:

- name: alertmanager-cm

mountPath: /etc/alertmanager

volumes:

- name: alertmanager-cm

configMap:

name: alertmanager-config

imagePullSecrets:

- name: harbor

 

 

3svc.yaml

vi svc.yaml

apiVersion: v1

kind: Service

metadata:

name: alertmanager

namespace: infra

spec:

selector:

app: alertmanager

ports:

- port: 80

targetPort: 9093

 

應用資源配置清單

kubectl apply -f http://k8s-yaml.od.com/alertmanager/cm.yaml

kubectl apply -f http://k8s-yaml.od.com/alertmanager/dp.yaml

kubectl apply -f http://k8s-yaml.od.com/alertmanager/svc.yaml

 

 

 

 

 

 

配置基礎報警規則

vi /data/nfs-volume/prometheus/etc/rules.yml

 

groups:

- name: hostStatsAlert

rules:

- alert: hostCpuUsageAlert

expr: sum(avg without (cpu)(irate(node_cpu{mode!='idle'}[5m]))) by (instance) > 0.85

for: 5m

labels:

severity: warning

annotations:

summary: "{{ $labels.instance }} CPU usage above 85% (current value: {{ $value }}%)"

- alert: hostMemUsageAlert

expr: (node_memory_MemTotal - node_memory_MemAvailable)/node_memory_MemTotal > 0.85

for: 5m

labels:

severity: warning

annotations:

summary: "{{ $labels.instance }} MEM usage above 85% (current value: {{ $value }}%)"

- alert: OutOfInodes

expr: node_filesystem_free{fstype="overlay",mountpoint ="/"} / node_filesystem_size{fstype="overlay",mountpoint ="/"} * 100 < 10

for: 5m

labels:

severity: warning

annotations:

summary: "Out of inodes (instance {{ $labels.instance }})"

description: "Disk is almost running out of available inodes (< 10% left) (current value: {{ $value }})"

- alert: OutOfDiskSpace

expr: node_filesystem_free{fstype="overlay",mountpoint ="/rootfs"} / node_filesystem_size{fstype="overlay",mountpoint ="/rootfs"} * 100 < 10

for: 5m

labels:

severity: warning

annotations:

summary: "Out of disk space (instance {{ $labels.instance }})"

description: "Disk is almost full (< 10% left) (current value: {{ $value }})"

- alert: UnusualNetworkThroughputIn

expr: sum by (instance) (irate(node_network_receive_bytes[2m])) / 1024 / 1024 > 100

for: 5m

labels:

severity: warning

annotations:

summary: "Unusual network throughput in (instance {{ $labels.instance }})"

description: "Host network interfaces are probably receiving too much data (> 100 MB/s) (current value: {{ $value }})"

- alert: UnusualNetworkThroughputOut

expr: sum by (instance) (irate(node_network_transmit_bytes[2m])) / 1024 / 1024 > 100

for: 5m

labels:

severity: warning

annotations:

summary: "Unusual network throughput out (instance {{ $labels.instance }})"

description: "Host network interfaces are probably sending too much data (> 100 MB/s) (current value: {{ $value }})"

- alert: UnusualDiskReadRate

expr: sum by (instance) (irate(node_disk_bytes_read[2m])) / 1024 / 1024 > 50

for: 5m

labels:

severity: warning

annotations:

summary: "Unusual disk read rate (instance {{ $labels.instance }})"

description: "Disk is probably reading too much data (> 50 MB/s) (current value: {{ $value }})"

- alert: UnusualDiskWriteRate

expr: sum by (instance) (irate(node_disk_bytes_written[2m])) / 1024 / 1024 > 50

for: 5m

labels:

severity: warning

annotations:

summary: "Unusual disk write rate (instance {{ $labels.instance }})"

description: "Disk is probably writing too much data (> 50 MB/s) (current value: {{ $value }})"

- alert: UnusualDiskReadLatency

expr: rate(node_disk_read_time_ms[1m]) / rate(node_disk_reads_completed[1m]) > 100

for: 5m

labels:

severity: warning

annotations:

summary: "Unusual disk read latency (instance {{ $labels.instance }})"

description: "Disk latency is growing (read operations > 100ms) (current value: {{ $value }})"

- alert: UnusualDiskWriteLatency

expr: rate(node_disk_write_time_ms[1m]) / rate(node_disk_writes_completedl[1m]) > 100

for: 5m

labels:

severity: warning

annotations:

summary: "Unusual disk write latency (instance {{ $labels.instance }})"

description: "Disk latency is growing (write operations > 100ms) (current value: {{ $value }})"

- name: http_status

rules:

- alert: ProbeFailed

expr: probe_success == 0

for: 1m

labels:

severity: error

annotations:

summary: "Probe failed (instance {{ $labels.instance }})"

description: "Probe failed (current value: {{ $value }})"

- alert: StatusCode

expr: probe_http_status_code <= 199 OR probe_http_status_code >= 400

for: 1m

labels:

severity: error

annotations:

summary: "Status Code (instance {{ $labels.instance }})"

description: "HTTP status code is not 200-399 (current value: {{ $value }})"

- alert: SslCertificateWillExpireSoon

expr: probe_ssl_earliest_cert_expiry - time() < 86400 * 30

for: 5m

labels:

severity: warning

annotations:

summary: "SSL certificate will expire soon (instance {{ $labels.instance }})"

description: "SSL certificate expires in 30 days (current value: {{ $value }})"

- alert: SslCertificateHasExpired

expr: probe_ssl_earliest_cert_expiry - time() <= 0

for: 5m

labels:

severity: error

annotations:

summary: "SSL certificate has expired (instance {{ $labels.instance }})"

description: "SSL certificate has expired already (current value: {{ $value }})"

- alert: BlackboxSlowPing

expr: probe_icmp_duration_seconds > 2

for: 5m

labels:

severity: warning

annotations:

summary: "Blackbox slow ping (instance {{ $labels.instance }})"

description: "Blackbox ping took more than 2s (current value: {{ $value }})"

- alert: BlackboxSlowRequests

expr: probe_http_duration_seconds > 2

for: 5m

labels:

severity: warning

annotations:

summary: "Blackbox slow requests (instance {{ $labels.instance }})"

description: "Blackbox request took more than 2s (current value: {{ $value }})"

- alert: PodCpuUsagePercent

expr: sum(sum(label_replace(irate(container_cpu_usage_seconds_total[1m]),"pod","$1","container_label_io_kubernetes_pod_name", "(.*)"))by(pod) / on(pod) group_right kube_pod_container_resource_limits_cpu_cores *100 )by(container,namespace,node,pod,severity) > 80

for: 5m

labels:

severity: warning

annotations:

summary: "Pod cpu usage percent has exceeded 80% (current value: {{ $value }}%)"

 

在prometheus.yml中新增配置

[root@hdss7-200 alertmanager]# vi /data/nfs-volume/prometheus/etc/prometheus.yml

alerting:

alertmanagers:

- static_configs:

- targets: ["alertmanager"]

rule_files:

- "/data/etc/rules.yml"

平滑重啟promtheus

#修改完配置檔案重啟promtheus

#21節點上(promtheus執行所在節點)

[root@hdss7-21 ~]# ps aux|grep prometheus

[root@hdss7-21 ~]# kill -SIGHUP 3441 #promtheus支援kill傳遞訊號過載

 

過載配置:

# curl -X POST http://prometheus.od.com/-/reload

以上這些就是我們的告警規則

測試告警:

app名稱空間裡的dubbo-demo-service給停掉:

看下blackbox裡的資訊:

   

看下alert

   

 紅色的時候就開會發郵件告警:

 

已經收到告警了,後續上生產,還會更新如何新增微信、釘釘、簡訊告警

   

 如果需要自己定製告警規則和告警內容,需要研究一下promql,自己修改配置檔案。

   

八、課程總結

對應關係

Exporter

4種,實現不同功能

 

Promtheus server

Retrieve(資料收集器)-à在exporter取資料-à儲存到tsdb(時間序列資料庫)

Configure 靜態配置,動態服務發現,基於檔案服務發現

Httpserver,promtheus提供的一個web訪問介面