ELK實戰篇--logstash日誌收集eslaticsearch和kibana
阿新 • • 發佈:2018-12-23
前篇:
ELK6.2.2日誌分析監控系統搭建和配置
ELK實戰篇
好,現在索引也可以建立了,現在可以來輸出nginx、apache、message、secrue的日誌到前臺展示(Nginx有的話直接修改,沒有自行安裝)
編輯nginx配置檔案,修改以下內容(在http模組下新增)
log_format json '{"@timestamp":"$time_iso8601",'
'"@version":"1",'
'"client":"$remote_addr",'
'"url":"$uri",'
'"status":"$status",'
'"domian":"$host",'
'"host":"$server_addr",'
'"size":"$body_bytes_sent",'
'"responsetime":"$request_time",'
'"referer":"$http_referer",'
'"ua":"$http_user_agent"'
'}';
修改access_log的輸出格式為剛才定義的json
access_log logs/elk.access.log json;
繼續修改apache的配置檔案
LogFormat "{ \
\" @timestamp\": \"%{%Y-%m-%dT%H:%M:%S%z}t\", \
\"@version\": \"1\", \
\"tags\":[\"apache\"], \
\"message\": \"%h %l %u %t \\\"%r\\\" %>s %b\", \
\"clientip\": \"%a\", \
\"duration\": %D, \
\"status\": %>s, \
\"request\": \"%U%q\", \
\" urlpath\": \"%U\", \
\"urlquery\": \"%q\", \
\"bytes\": %B, \
\"method\": \"%m\", \
\"site\": \"%{Host}i\", \
\"referer\": \"%{Referer}i\", \
\"useragent\": \"%{User-agent}i\" \
}" ls_apache_json
一樣修改輸出格式為上面定義的json格式
CustomLog logs/access_log ls_apache_json
編輯logstash配置檔案,進行日誌收集
vim /etc/logstash/conf.d/full.conf
input {
file {
path => "/var/log/messages"
type => "system"
start_position => "beginning"
}
file {
path => "/var/log/secure"
type => "secure"
start_position => "beginning"
}
file {
path => "/var/log/httpd/access_log"
type => "http"
start_position => "beginning"
}
file {
path => "/usr/local/nginx/logs/elk.access.log"
type => "nginx"
start_position => "beginning"
}
}
output {
if [type] == "system" {
elasticsearch {
hosts => ["192.168.1.202:9200"]
index => "nagios-system-%{+YYYY.MM.dd}"
}
}
if [type] == "secure" {
elasticsearch {
hosts => ["192.168.1.202:9200"]
index => "nagios-secure-%{+YYYY.MM.dd}"
}
}
if [type] == "http" {
elasticsearch {
hosts => ["192.168.1.202:9200"]
index => "nagios-http-%{+YYYY.MM.dd}"
}
}
if [type] == "nginx" {
elasticsearch {
hosts => ["192.168.1.202:9200"]
index => "nagios-nginx-%{+YYYY.MM.dd}"
}
}
}
執行看看效果如何
logstash -f /etc/logstash/conf.d/full.conf
可以發現所有建立日誌的索引都已存在,接下來就去Kibana建立日誌索引,進行展示(按照上面的方法進行建立索引即可),看下展示的效果
接下來再來一發MySQL慢日誌的展示
由於MySQL的慢日誌查詢格式比較特殊,所以需要用正則進行匹配,並使用multiline能夠進行多行匹配(看具體配置)
input {
file {
path => "/var/log/messages"
type => "system"
start_position => "beginning"
}
file {
path => "/var/log/secure"
type => "secure"
start_position => "beginning"
}
file {
path => "/var/log/httpd/access_log"
type => "http"
start_position => "beginning"
}
file {
path => "/usr/local/nginx/logs/elk.access.log"
type => "nginx"
start_position => "beginning"
}
file {
path => "/var/log/mysql/mysql.slow.log"
type => "mysql"
start_position => "beginning"
codec => multiline {
pattern => "^# [email protected]:"
negate => true
what => "previous"
}
}
}
filter {
grok {
match => { "message" => "SELECT SLEEP" }
add_tag => [ "sleep_drop" ]
tag_on_failure => []
}
if "sleep_drop" in [tags] {
drop {}
}
grok {
match => { "message" => "(?m)^# [email protected]: %{USER:User}\[[^\]]+\] @ (?:(?<clienthost>\S*) )?\[(?:%{IP:Client_IP})?\]\s.*# Query_time: %{NUMBER:Query_Time:float}\s+Lock_time: %{NUMBER:Lock_Time:float}\s+Rows_sent: %{NUMBER:Rows_Sent:int}\s+Rows_examined: %{NUMBER:Rows_Examined:int}\s*(?:use %{DATA:Database};\s*)?SET timestamp=%{NUMBER:timestamp};\s*(?<Query>(?<Action>\w+)\s+.*)\n# Time:.*$" }
}
date {
match => [ "timestamp", "UNIX" ]
remove_field => [ "timestamp" ]
}
}
output {
if [type] == "system" {
elasticsearch {
hosts => ["192.168.1.202:9200"]
index => "nagios-system-%{+YYYY.MM.dd}"
}
}
if [type] == "secure" {
elasticsearch {
hosts => ["192.168.1.202:9200"]
index => "nagios-secure-%{+YYYY.MM.dd}"
}
}
if [type] == "http" {
elasticsearch {
hosts => ["192.168.1.202:9200"]
index => "nagios-http-%{+YYYY.MM.dd}"
}
}
if [type] == "nginx" {
elasticsearch {
hosts => ["192.168.1.202:9200"]
index => "nagios-nginx-%{+YYYY.MM.dd}"
}
}
if [type] == "mysql" {
elasticsearch {
hosts => ["192.168.1.202:9200"]
index => "nagios-mysql-slow-%{+YYYY.MM.dd}"
}
}
}
檢視效果(一條慢日誌查詢會顯示一條,如果不進行正則匹配,那麼一行就會顯示一條)
具體的日誌輸出需求,進行具體的分析
三:ELK終極篇
安裝reids
# yum install -y redis
修改redis的配置檔案
# vim /etc/redis.conf
修改內容如下
daemonize yes
bind 192.168.1.202
啟動redis服務
# /etc/init.d/redis restart
測試redis的是否啟用成功
# redis-cli -h 192.168.1.202
輸入info如果有不報錯即可
redis 192.168.1.202:6379> info
redis_version:2.4.10
....
編輯配置redis-out.conf配置檔案,把標準輸入的資料儲存到redis中
# vim /etc/logstash/conf.d/redis-out.conf
新增如下內容
input {
stdin {}
}
output {
redis {
host => "192.168.1.202"
port => "6379"
password => 'test'
db => '1'
data_type => "list"
key => 'elk-test'
}
}
執行logstash指定redis-out.conf的配置檔案
# /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/redis-out.conf
執行成功以後,在logstash中輸入內容(檢視下效果)
編輯配置redis-in.conf配置檔案,把reids的儲存的資料輸出到elasticsearch中
# vim /etc/logstash/conf.d/redis-out.conf
新增如下內容
input{
redis {
host => "192.168.1.202"
port => "6379"
password => 'test'
db => '1'
data_type => "list"
key => 'elk-test'
batch_count => 1 #這個值是指從佇列中讀取資料時,一次性取出多少條,預設125條(如果redis中沒有125條,就會報錯,所以在測試期間加上這個值)
}
}
output {
elasticsearch {
hosts => ['192.168.1.202:9200']
index => 'redis-test-%{+YYYY.MM.dd}'
}
}
執行logstash指定redis-in.conf的配置檔案
# /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/redis-out.conf
把之前的配置檔案修改一下,變成所有的日誌監控的來原始檔都存放到redis中,然後通過redis在輸出到elasticsearch中
更改為如下,編輯full.conf
input {
file {
path => "/var/log/httpd/access_log"
type => "http"
start_position => "beginning"
}
file {
path => "/usr/local/nginx/logs/elk.access.log"
type => "nginx"
start_position => "beginning"
}
file {
path => "/var/log/secure"
type => "secure"
start_position => "beginning"
}
file {
path => "/var/log/messages"
type => "system"
start_position => "beginning"
}
}
output {
if [type] == "http" {
redis {
host => "192.168.1.202"
password => 'test'
port => "6379"
db => "6"
data_type => "list"
key => 'nagios_http'
}
}
if [type] == "nginx" {
redis {
host => "192.168.1.202"
password => 'test'
port => "6379"
db => "6"
data_type => "list"
key => 'nagios_nginx'
}
}
if [type] == "secure" {
redis {
host => "192.168.1.202"
password => 'test'
port => "6379"
db => "6"
data_type => "list"
key => 'nagios_secure'
}
}
if [type] == "system" {
redis {
host => "192.168.1.202"
password => 'test'
port => "6379"
db => "6"
data_type => "list"
key => 'nagios_system'
}
}
}
執行logstash指定shipper.conf的配置檔案
# /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/full.conf
在redis中檢視是否已經將資料寫到裡面(有時候輸入的日誌檔案不產生日誌,會導致redis裡面也沒有寫入日誌)
把redis中的資料讀取出來,寫入到elasticsearch中(需要另外一臺主機做實驗)
編輯配置檔案
# vim /etc/logstash/conf.d/redis-out.conf
新增如下內容
input {
redis {
type => "system"
host => "192.168.1.202"
password => 'test'
port => "6379"
db => "6"
data_type => "list"
key => 'nagios_system'
batch_count => 1
}
redis {
type => "http"
host => "192.168.1.202"
password => 'test'
port => "6379"
db => "6"
data_type => "list"
key => 'nagios_http'
batch_count => 1
}
redis {
type => "nginx"
host => "192.168.1.202"
password => 'test'
port => "6379"
db => "6"
data_type => "list"
key => 'nagios_nginx'
batch_count => 1
}
redis {
type => "secure"
host => "192.168.1.202"
password => 'test'
port => "6379"
db => "6"
data_type => "list"
key => 'nagios_secure'
batch_count => 1
}
}
output {
if [type] == "system" {
elasticsearch {
hosts => ["192.168.1.202:9200"]
index => "nagios-system-%{+YYYY.MM.dd}"
}
}
if [type] == "http" {
elasticsearch {
hosts => ["192.168.1.202:9200"]
index => "nagios-http-%{+YYYY.MM.dd}"
}
}
if [type] == "nginx" {
elasticsearch {
hosts => ["192.168.1.202:9200"]
index => "nagios-nginx-%{+YYYY.MM.dd}"
}
}
if [type] == "secure" {
elasticsearch {
hosts => ["192.168.1.202:9200"]
index => "nagios-secure-%{+YYYY.MM.dd}"
}
}
}
注意:
input是從客戶端收集的
output是同樣也儲存到192.168.1.202中的elasticsearch中,如果要儲存到當前的主機上,可以把output中的hosts修改成localhost,如果還需要在kibana中顯示,需要在本機上部署kabana,為何要這樣做,起到一個鬆耦合的目的
說白了,就是在客戶端收集日誌,寫到服務端的redis裡或是本地的redis裡面,輸出的時候對接ES伺服器即可
執行命令看看效果
# /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/redis-out.conf
效果是和直接往ES伺服器輸出一樣的(這樣是先將日誌存到redis資料庫,然後再從redis資料庫裡取出日誌)
上線ELK
1. 日誌分類
系統日誌 rsyslog logstash syslog外掛
訪問日誌 nginx logstash codec json
錯誤日誌 file logstash mulitline
執行日誌 file logstash codec json
裝置日誌 syslog logstash syslog外掛
Debug日誌 file logstash json 或者 mulitline
2. 日誌標準化
路徑 固定
格式 儘量json
3. 系統個日誌開始-->錯誤日誌-->執行日誌-->訪問日誌
因為ES儲存日誌是永久儲存,所以需要定期刪除一下日誌,下面命令為刪除指定時間前的日誌
curl -X DELETE http://xx.xx.com:9200/logstash-*-`date +%Y-%m-%d -d "-$n days"`
版權宣告:本文內容為轉載,版權歸作者所有。