|NO.Z.00229|——————————|^^ 部署 ^^|——|KuberNetes&細粒度許可權控制.V13|------------------------------------------------|Ratel.v01|k8s資源管理平臺部署|
阿新 • • 發佈:2022-03-31
[CloudNative:KuberNetes&細粒度許可權控制.V13] [Applications.KuberNetes] [|DevOps|k8s|細粒度許可權控制|安裝一鍵式k8s資源平臺Ratel到k8s叢集|]
一、k8s資源管理平臺Ratel
### --- ratel官方hub地址:
~~~ https://github.com/dotbalo/ratel-doc
二、安裝Ratel
### --- 建立secret ~~~ # 安裝Ratel [root@k8s-master01 ~]# mkdir ratel [root@k8s-master01 ~]# cd ratel
### --- 建立servers.yaml檔案 [root@k8s-master01 ratel]# vim servers.yaml - serverName: 'test1' serverAddress: 'https://192.168.1.20:8443' #serverAdminUser: 'xxx' #serverAdminPassword: 'xxx#' serverAdminToken: 'null' serverDashboardUrl: "https://k8s.test1.com.cn/#" production: 'false' kubeConfigPath: "/mnt/test1.config" ~~~ 注: - serverName: 'test1' serverAddress: 'https://192.168.1.20:8443' # 因為沒有配置賬號密碼,也沒有使用token,直接配置 kubeConfigPath即可 #serverAdminUser: 'xxx' #serverAdminPassword: 'xxx#' serverAdminToken: 'null' serverDashboardUrl: "https://k8s.test1.com.cn/#" # 該工具會使用到原生的dashboard,可以配置上該地址 production: 'false' kubeConfigPath: "/mnt/test1.config" # 因為我們是基於kubeconfig這個檔案去訪問的,所以只需要配置這個檔案即可
~~~ # 查詢serveraddress的地址
[root@k8s-master01 ~]# kubectl cluster-info
Kubernetes control plane is running at https://192.168.1.20:8443
CoreDNS is running at https://192.168.1.20:8443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
### --- kube/config檔案copy到當前目錄並定義為test1.config ~~~ 這個檔名字和kubeConfigPath:檔名保持一致 [root@k8s-master01 ratel]# cp /root/.kube/config test1.config
### --- 建立一個secret
~~~ 多個叢集的話可以加入進去,這個可以動態加入的
~~~ 注:建立的這個secret會掛載到這個deployment的檔案中,
~~~ 這個deployment就可以讀取到這個secret,
~~~ 啟動之後就可以把這個叢集給掛載進去
[root@k8s-master01 ratel]# kubectl create secret generic ratel-config --from-file=test1.config --from-file=servers.yaml -n kube-system
secret/ratel-config created
三、建立RBAC### --- 建立許可權管理namespace
[root@k8s-master01 ratel]# kubectl create ns kube-users
namespace/kube-users created
### --- 然後新增如下的ClusterroleBinding
[root@k8s-master01 ratel]# vim ratel-rbac.yaml
apiVersion: v1
items:
- apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
rbac.authorization.k8s.io/aggregate-to-edit: "true"
name: ratel-namespace-readonly
rules:
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
- metrics.k8s.io
resources:
- pods
verbs:
- get
- list
- watch
- apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: ratel-pod-delete
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- delete
- apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: ratel-pod-exec
rules:
- apiGroups:
- ""
resources:
- pods
- pods/log
verbs:
- get
- list
- apiGroups:
- ""
resources:
- pods/exec
verbs:
- create
- apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
name: ratel-resource-edit
rules:
- apiGroups:
- ""
resources:
- configmaps
- persistentvolumeclaims
- services
- services/proxy
verbs:
- patch
- update
- apiGroups:
- apps
resources:
- daemonsets
- deployments
- deployments/rollback
- deployments/scale
- statefulsets
- statefulsets/scale
verbs:
- patch
- update
- apiGroups:
- autoscaling
resources:
- horizontalpodautoscalers
verbs:
- patch
- update
- apiGroups:
- batch
resources:
- cronjobs
- jobs
verbs:
- patch
- update
- apiGroups:
- extensions
resources:
- daemonsets
- deployments
- deployments/rollback
- deployments/scale
- ingresses
- networkpolicies
verbs:
- patch
- update
- apiGroups:
- networking.k8s.io
resources:
- ingresses
- networkpolicies
verbs:
- patch
- update
- apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: ratel-resource-readonly
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- persistentvolumeclaims
- pods
- replicationcontrollers
- replicationcontrollers/scale
- serviceaccounts
- services
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- bindings
- events
- limitranges
- namespaces/status
- pods/log
- pods/status
- replicationcontrollers/status
- resourcequotas
- resourcequotas/status
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
- apps
resources:
- controllerrevisions
- daemonsets
- deployments
- deployments/scale
- replicasets
- replicasets/scale
- statefulsets
- statefulsets/scale
verbs:
- get
- list
- watch
- apiGroups:
- autoscaling
resources:
- horizontalpodautoscalers
verbs:
- get
- list
- watch
- apiGroups:
- batch
resources:
- cronjobs
- jobs
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- daemonsets
- deployments
- deployments/scale
- ingresses
- networkpolicies
- replicasets
- replicasets/scale
- replicationcontrollers/scale
verbs:
- get
- list
- watch
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- metrics.k8s.io
resources:
- pods
verbs:
- get
- list
- watch
kind: List
metadata:
resourceVersion: ""
selfLink: ""
### --- 建立ratel.rbac
[root@k8s-master01 ratel]# kubectl create -f ratel-rbac.yaml
clusterrole.rbac.authorization.k8s.io/ratel-namespace-readonly created
clusterrole.rbac.authorization.k8s.io/ratel-pod-delete created
clusterrole.rbac.authorization.k8s.io/ratel-pod-exec created
clusterrole.rbac.authorization.k8s.io/ratel-resource-edit created
clusterrole.rbac.authorization.k8s.io/ratel-resource-readonly created
### --- 建立rabtel-rbac-binding
[root@k8s-master01 ratel]# vim ratel-rbac-binding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: ratel-namespace-readonly-sa
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ratel-namespace-readonly
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:serviceaccounts:kube-users
[root@k8s-master01 ratel]# kubectl create -f ratel-rbac-binding.yaml
clusterrolebinding.rbac.authorization.k8s.io/ratel-namespace-readonly-sa created
四、部署Ratel### --- 建立yaml檔案
~~~ # 需要更改的內容如下:
~~~ ProRunMode: 區別在於dev模式列印的是debug日誌, 其他模式是info級別的日誌, 實際使用時應該配置為非dev
~~~ ADMIN_USERNAME: ratel自己的管理員賬號
~~~ ADMIN_PASSWORD: ratel自己的管理員密碼
~~~ 實際使用時賬號密碼應滿足複雜性要求,因為ratel可以直接操作所有配置的資源。
~~~ 其他無需配置, 埠配置暫不支援。
[root@k8s-master01 ratel]# vim ratel.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: ratel
name: ratel
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
app: ratel
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app: ratel
spec:
containers:
- command:
- sh
- -c
- ./ratel -c /mnt/servers.yaml # 啟動的時候就 指定指定了這個檔案
env:
- name: TZ
value: Asia/Shanghai
- name: LANG
value: C.UTF-8
- name: ProRunMode
value: prod
- name: ADMIN_USERNAME # 管理員賬號
value: admin
- name: ADMIN_PASSWORD # 管理員密碼
value: password
image: registry.cn-beijing.aliyuncs.com/dotbalo/ratel:latest
imagePullPolicy: Always # 設定為Always,開發者每次更新功能都會推到這個映象上
livenessProbe:
failureThreshold: 2
initialDelaySeconds: 10
periodSeconds: 60
successThreshold: 1
tcpSocket:
port: 8888
timeoutSeconds: 2
name: ratel
ports:
- containerPort: 8888
name: web
protocol: TCP
readinessProbe:
failureThreshold: 2
initialDelaySeconds: 10
periodSeconds: 60
successThreshold: 1
tcpSocket:
port: 8888
timeoutSeconds: 2
resources:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 500m
memory: 512Mi
volumeMounts:
- mountPath: /mnt # /mnt/servers.yaml這個檔案就是我們掛載到了mnt下
name: ratel-config
dnsPolicy: ClusterFirst
# imagePullSecrets: # 這個secret是沒有的,沒有直接去掉即可
# - name: myregistrykey
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
volumes:
- name: ratel-config
secret:
defaultMode: 420
secretName: ratel-config
### --- 建立Ratel
[root@k8s-master01 ratel]# kubectl create -f ratel.yaml -n kube-system
deployment.apps/ratel created
[root@k8s-master01 ratel]# kubectl get po -n kube-system
NAME READY STATUS RESTARTS AGE
ratel-85bd5d5697-2h65z 0/1 ContainerCreating 0 30s
~~~ # 拉取映象階段
[root@k8s-master01 ratel]# kubectl get po -n kube-system -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
ratel-85bd5d5697-2h65z 0/1 Running 0 77s 172.25.244.202 k8s-master01 <none> <none>
五、service和Ingress配置
~~~ # 注意:如果沒有安裝ingress controller,
~~~ 需要把type: ClusterIP改成type: NodePort,然後通過主機IP+Port進行訪問
### --- 編寫service和ingress的yaml檔案
[root@k8s-master01 Ratel]# vim ratel-svc.yaml
apiVersion: v1
kind: Service
metadata:
labels:
app: ratel
name: ratel
namespace: kube-system
spec:
ports:
- name: container-1-web-1
port: 8888
protocol: TCP
targetPort: 8888
selector:
app: ratel
type: ClusterIP
--- # 寫在一起使用---來劃分
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: ratel
namespace: kube-system
spec:
rules:
- host: krm.test.com
http:
paths:
- backend:
serviceName: ratel
servicePort: 8888
path: /
### --- 建立service和ingress
[root@k8s-master01 ratel]# kubectl create -f ratel-svc.yaml -n kube-system
ingress.extensions/ratel created
### --- 檢視建立的資源
[root@k8s-master01 ratel]# kubectl get svc,ingress,po -n kube-system
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/ratel ClusterIP 10.105.168.163 <none> 8888/TCP 4m50s
NAME CLASS HOSTS ADDRESS PORTS AGE
ingress.networking.k8s.io/ratel <none> krm.test.com 10.107.59.89 80 4m49s
NAME READY STATUS RESTARTS AGE
pod/ratel-85bd5d5697-2h65z 1/1 Running 0 17m
===============================END===============================
Walter Savage Landor:strove with none,for none was worth my strife.Nature I loved and, next to Nature, Art:I warm'd both hands before the fire of life.It sinks, and I am ready to depart ——W.S.Landor
來自為知筆記(Wiz)