1. 程式人生 > 實用技巧 >core dump分析

core dump分析

1 架構設計

對於量很大的資料,需要考慮使用中介軟體做緩衝層,然後用logstash做資料聚合和處理

CURD

//建立文件,自動生成 _id
POST users/_doc
{
  "user": "Jack",
  "post_date": "20202010"
}

//建立文件,指定id。id存在則報錯
PUT users/_doc/1?op_type=create
{
  "user": "Mike",
  "post_date": "20202011"
}

//根據文件id獲取和刪除文件
GET users/_doc/1
DELETE users/_doc/1

//索引文件,id已經存在則刪除文件並重新建立,_version+1
PUT users/_doc/1
{
  "user": "Mile"
}

//給文件增加欄位
POST users/_update/1/
{
  "doc":{
    "post_date": "20202010"
  }
}

//批量操作 
POST _bulk
{"index": { "_index": "test", "_id": "1" }}
{"delete": { "_index": "test", "_id": "2" }}
{"create": { "_index": "test2", "_id": "3" }}
{"update": { "_index": "test", "_id": "1" }}

//批量獲取
GET /_mget
{
  "docs": [
      {
        "_index": "test",
        "_id": "1"
      },
        {
        "_index": "test",
        "_id": "2"
      }
    ]
}

//批量查詢
POST kibana_sample_data_ecommerce/_msearch
{}
{"query" : {"match_all": {}}, "size":"1"}
{"index": "kibana_sample_data_flights"}
{"query" : {"match_all": {}}, "size":"2"}