1. 程式人生 > 其它 >部署Monstache普通高可用方式到k8s

部署Monstache普通高可用方式到k8s

指定配置檔案,並取名為config.toml

# connection settings
# print detailed information including request traces
#啟用除錯日誌,這項要放在最上面,否則日誌列印不到檔案
verbose = true
# connect to MongoDB using the following URL
# 指定mongo 連線地址,一定要搭建mongodb叢集
mongo-url = "mongodb://192.168.7.51:27021" // 修改為你的
#"mongodb://root:<your_mongodb_password>@dds-bp1aadcc629******.mongodb.rds.aliyuncs.com:3717"
# connect to the Elasticsearch REST API at the following node URLs
# 指定es 連線地址
elasticsearch-urls = ["http://localhost:9200"] // 修改為你的
 
# frequently required settings
# if you need to seed an index from a collection and not just listen and sync changes events
# you can copy entire collections or views from MongoDB to Elasticsearch
# 要監聽的mongodb的集合格式是 庫名.集合名
direct-read-namespaces = ["mssiot_forum_merossbeta.f_posts"]
 
# if you want to use MongoDB change streams instead of legacy oplog tailing use change-stream-namespaces
# change streams require at least MongoDB API 3.6+
# if you have MongoDB 4+ you can listen for changes to an entire database or entire deployment
# in this case you usually don't need regexes in your config to filter collections unless you target the deployment.
# to listen to an entire db use only the database name.  For a deployment use an empty string.
#change-stream-namespaces = ["mydb.col"]
 
# additional settings
 
# if you don't want to listen for changes to all collections in MongoDB but only a few
# e.g. only listen for inserts, updates, deletes, and drops from mydb.mycollection
# this setting does not initiate a copy, it is only a filter on the change event listener
#namespace-regex = '^mssiot_forum_merossbeta\.f_posts$'
# compress requests to Elasticsearch
#gzip = true
# generate indexing statistics
#stats = true
# index statistics into Elasticsearch
#index-stats = true
# use the following PEM file for connections to MongoDB
#mongo-pem-file = "/path/to/mongoCert.pem"
# disable PEM validation
#mongo-validate-pem-file = false
# use the following user name for Elasticsearch basic auth
elasticsearch-user = "elastic"
# use the following password for Elasticsearch basic auth
#elasticsearch-password = "<your_es_password>"
# use 8 go routines concurrently pushing documents to Elasticsearch
#monstache最多開幾個執行緒同步到es,預設為4
elasticsearch-max-conns = 8
# use the following PEM file to connections to Elasticsearch
#elasticsearch-pem-file = "/path/to/elasticCert.pem"
# validate connections to Elasticsearch
#elastic-validate-pem-file = true
# propogate dropped collections in MongoDB as index deletes in Elasticsearch
#mongodb刪除集合或庫時是否同步刪除es中的索引
dropped-collections = false
# propogate dropped databases in MongoDB as index deletes in Elasticsearch
dropped-databases = false
# do not start processing at the beginning of the MongoDB oplog
# if you set the replay to true you may see version conflict messages
# in the log if you had synced previously. This just means that you are replaying old docs which are already
# in Elasticsearch with a newer version. Elasticsearch is preventing the old docs from overwriting new ones.
#replay = false
# resume processing from a timestamp saved in a previous run
# 從上一個時間點恢復
resume = true
# do not validate that progress timestamps have been saved
#resume-write-unsafe = false
# override the name under which resume state is saved
#resume-name = "default"
# use a custom resume strategy (tokens) instead of the default strategy (timestamps)
# tokens work with MongoDB API 3.6+ while timestamps work only with MongoDB API 4.0+
resume-strategy = 0
# exclude documents whose namespace matches the following pattern
#namespace-exclude-regex = '^mydb\.ignorecollection$'
# turn on indexing of GridFS file content
#index-files = true
# turn on search result highlighting of GridFS content
#file-highlighting = true
# index GridFS files inserted into the following collections
#file-namespaces = ["users.fs.files"]
# enable clustering mode
# 指定monstache叢集名,高可用模式中很重要
cluster-name = 'merossdev'
# worker模式
#workers = ["Tom", "Dick", "Harry"]
# do not exit after full-sync, rather continue tailing the oplog
#exit-after-direct-reads = false
namespace-regex = '^mssiot_forum_merossbeta\.(f_posts|\$cmd)$'
 
[[mapping]]
namespace = "mssiot_forum_merossbeta"
index = "f_posts"
 
#生產環境記錄日誌必不可少,monstache預設是輸出到標準輸出的,這裡指定它輸出到指定的日誌檔案(這個也是踩坑踩出來的哦!)
#[logs]
#info = "/var/monstache/log/info.log"
#warn = "/var/monstache/log/wran.log"
#error = "/var/monstache/log/error.log"
#trace = "/var/monstache/log/trace.log"

2.將該config上傳到eks configmap

使用命令:kubectl create configmap monstache-config --from-file=config.toml 如圖

3.建立deployment.yaml

apiVersion: apps/v1
kind: Deployment
metadata:
  namespace: mssiot
  name: monstache
  labels:
    app: monstache
spec:
  replicas: 2
  selector:
    matchLabels:
      app: monstache
  template:
    metadata:
      namespace: mssiot
      labels:
        app: monstache
    spec:
      containers:
        - args:
          - -f
          - /user/local/config.toml
          - cluster-name
          - merossdev
          volumeMounts:
            - name: monstache-config
              mountPath: /user/local
              readOnly: true
          name: monstache
          image: rwynn/monstache:6.7.6
          imagePullPolicy: Always
      volumes:
        - name: monstache-config
          configMap:
            name: monstache-config

\4. 使用命令列應該該deployment
kubectl create -f deployment.yaml

5.由此部署成功