logstash推送mysql慢查询日志
# cat /usr/local/logstash/etc/logstach.confinput {
file {
type => "mysql-slow"
path => "/mydata/slow-query.log"
codec => multiline {
pattern => "^# User@Host:"
negate => true
what => "previous"
}
}
}
#input节的配置定义了输入的日志类型为mysql慢查询日志类型以及日志路径,采用合并多行数据。negate字段是一个选择开关,可以正向匹配和反向匹配
filter {
# drop sleep events
grok {
match => { "message" => "SELECT SLEEP" }
add_tag => [ "sleep_drop" ]
tag_on_failure => [] # prevent default _grokparsefailure tag on real records
}
if "sleep_drop" in {
drop {}
}
#filter节的配置定义了过滤mysql查询为sleep状态SQL语句
grok {
match => [ "message", "(?m)^# User@Host: %{USER:user}\[[^\]]+\] @ (?:(?\S*) )?\[(?:%{IP:clientip})?\]\s*# Query_time: %{NUMBER:query_time:float}\s+Lock_time: %{NUMBER:lock_time:float}\s+Rows_sent: %{NUMBER:rows_sent:int}\s+Rows_examined: %{NUMBER:rows_examined:int}\s*(?:use %{DATA:database};\s*)?SET timestamp=%{NUMBER:timestamp};\s*(?(?\w+)\s+.*)\n# Time:.*$" ]
}
date {
match => [ "timestamp", "UNIX" ]
remove_field => [ "timestamp" ]
}
}
#grok节定义了慢查询日志输出的正则切割,这个容易头晕眼花!
output {
stdout {
codec => rubydebug {}
}
elasticsearch {
hosts => "192.168.1.226:9200"
index => "mysql-server81-%{+YYYY.MM.dd}"
}
}
#output节定义了输出,这里除了打印到屏幕上之外,还输入到elasticsearch,同时起一个自定义的索引名称
页:
[1]