设为首页 收藏本站
查看: 964|回复: 0

[经验分享] Elk实时日志分析平台5.0版本源码安装配置

[复制链接]

尚未签到

发表于 2019-1-28 13:17:44 | 显示全部楼层 |阅读模式
  目录
  一、 安装JAVA. 1
  二、 安装elasticsearch. 2
  三、 配置elasticsearch. 2
  四、 安装logstash. 3
  五、 配置 logstash. 3
  六、 安装kibana. 4
  七、 配置kibana. 5
  八、 安装x-pack插件... 5
  九、 x-pack管理用户... 6
  1、 添加用户... 6
  2、 查看用户... 6
  3、 测试用户登录... 6
  4、 删除用户... 6
  十、 安装filebeat. 7
一、 安装JAVA
# mkdir /usr/local/java/ –p
# cd /usr/local/java/# tar zxvf /data/elk5.0/jdk-8u111-linux-x64.tar.gz
# cat >>/etc/profile&1}case "$1" instart)
        rh_status_q && exit 0$1;;
    stop)
        rh_status_q || exit 0$1;;
    restart)
        $1;;
    reload)
        rh_status_q || exit 7$1;;
    force-reload)
        force_reload
        ;;
    status)
        rh_status
        ;;
    condrestart|try-restart)
        rh_status_q || exit 0restart
        ;;*)echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"exit 2esacexit $?# chmod +x /etc/init.d/elasticsearch
# /etc/init.d/elasticsearch start
# /etc/init.d/elasticsearch status
elasticsearch (pid 20895) is running...
# netstat -ntlp |grep 9[2-3]00tcp 0 0 :::9300 :::* LISTEN 20895/java
tcp 0 0 :::9200 :::* LISTEN 20895/java三、 配置elasticsearch
  内存低于2G,需要修改jvm配置
  -Xms512m
  -Xmx512m
# cat /data/PRG/elasticsearch/config/elasticsearch.yml |grep -v '#'network.host: 0.0.0.0 ###开启监听地址,
action.auto_create_index: .security,.monitoring*,.watches,.triggered_watches,.watcher-history*####以下模块视情况是否开启
xpack.security.enabled: true ####开启用户认证
xpack.monitoring.enabled: truexpack.graph.enabled: truexpack.watcher.enabled: truexpack.security.authc.realms: ####用户认证模式,ldap、file、pki、Active Directory等
file1:
type: fileorder: 0四、 安装logstash
# cd /data/PRG/# tar zxvf /data/elk5.0/logstash-5.0.2.tar.gz
# mv logstash-5.0.2 logstash
# useradd logstash -s /sbin/nologin
# chown logstash. logstash /data/PRG/logstash  添加启动脚本
  vim /etc/init.d/logstash
#!/bin/sh# Init script for logstash
# Maintained by Elasticsearch
# Generated by pleaserun.
# Implemented based on LSB Core 3.1:
#   * Sections: 20.2, 20.3#
### BEGIN INIT INFO
# Provides:          logstash
# Required-Start:    $remote_fs $syslog
# Required-Stop:     $remote_fs $syslog
# Default-Start:     2 3 4 5# Default-Stop:      0 1 6# Short-Description:
# Description:        Starts Logstash as a daemon.
### END INIT INFO
PATH=/sbin:/usr/sbin:/bin:/usr/bin
export PATHif [ `id -u` -ne 0 ]; then
   echo "You need root privileges to run this script"
   exit 1finame=logstash
pidfile="/var/run/$name.pid"LS_USER=logstash
LS_GROUP=logstash
LS_HOME=/var/lib/logstash
LS_HEAP_SIZE="1g"LS_LOG_DIR=/var/log/logstash
LS_LOG_FILE="${LS_LOG_DIR}/$name.log"LS_CONF_DIR=/etc/logstash/conf.d
LS_OPEN_FILES=16384LS_NICE=19KILL_ON_STOP_TIMEOUT=${KILL_ON_STOP_TIMEOUT-0} #default value is zero to this variable but could be updated by user request
LS_OPTS=""[ -r /etc/default/$name ] && . /etc/default/$name
[ -r /etc/sysconfig/$name ] && . /etc/sysconfig/$name
program=/opt/logstash/bin/logstash
args="agent -f ${LS_CONF_DIR} -l ${LS_LOG_FILE} ${LS_OPTS}"quiet() {  "$@" > /dev/null 2>&1
  return $?}
start() {
  LS_JAVA_OPTS="${LS_JAVA_OPTS} -Djava.io.tmpdir=${LS_HOME}"
  HOME=${LS_HOME}
  export PATH HOME LS_HEAP_SIZE LS_JAVA_OPTS LS_USE_GC_LOGGING LS_GC_LOG_FILE
  # chown doesn't grab the suplimental groups when setting the user:group - so we have to do it for it.
  # Boy, I hope we're root here.
  SGROUPS=$(id -Gn "$LS_USER" | tr " " "," | sed 's/,$//'; echo '')  if [ ! -z $SGROUPS ]  thenEXTRA_GROUPS="--groups $SGROUPS"
  fi
  # set ulimit as (root, presumably) first, before we drop privileges
  ulimit -n ${LS_OPEN_FILES}
  # Run the program!  nice -n ${LS_NICE} chroot --userspec $LS_USER:$LS_GROUP $EXTRA_GROUPS / sh -c "    cd $LS_HOME
    ulimit -n ${LS_OPEN_FILES}
    exec \"$program\" $args
  " > "${LS_LOG_DIR}/$name.stdout" 2> "${LS_LOG_DIR}/$name.err" &
  # Generate the pidfile from here. If we instead made the forked process
  # generate it there will be a race condition between the pidfile writing
  # and a process possibly asking for status.  echo $! > $pidfile  echo "$name started."
  return 0}
stop() {
  # Try a few times to kill TERM the program  if status ; thenpid=`cat "$pidfile"`echo "Killing $name (pid $pid) with SIGTERM"kill -TERM $pid
    # Wait for it to exit.for i in 1 2 3 4 5 6 7 8 9 ; do  echo "Waiting $name (pid $pid) to die..."  status || break      sleep 1doneif status ; then  if [ $KILL_ON_STOP_TIMEOUT -eq 1 ] ; thenecho "Timeout reached. Killing $name (pid $pid) with SIGKILL. This may result in data loss."kill -KILL $pidecho "$name killed with SIGKILL."  elseecho "$name stop failed; still running."return 1 # stop timed out and not forced      fielse  echo "$name stopped."fi
  fi}
status() {  if [ -f "$pidfile" ] ; thenpid=`cat "$pidfile"`if kill -0 $pid > /dev/null 2> /dev/null ; then  # process by this pid is running.
      # It may not be our pid, but that's what you get with just pidfiles.  # TODO(sissel): Check if this process seems to be the same as the one we
      # expect. It'd be nice to use flock here, but flock uses fork, not exec,  # so it makes it quite awkward to use in this case.
      return 0else  return 2 # program is dead but pid file existsfi
  elsereturn 3 # program is not running  fi}
reload() {  if status ; thenkill -HUP `cat "$pidfile"`  fi}
force_stop() {  if status ; thenstop
    status && kill -KILL `cat "$pidfile"`  fi}
configtest() {
  # Check if a config file exists  if [ ! "$(ls -A ${LS_CONF_DIR}/* 2> /dev/null)" ]; thenecho "There aren't any configuration files in ${LS_CONF_DIR}"return 1
  fi
  HOME=${LS_HOME}
  export PATH HOME
  test_args="--configtest -f ${LS_CONF_DIR} ${LS_OPTS}"
  $program ${test_args}
  [ $? -eq 0 ] && return 0
  # Program not configured
  return 6}case "$1" in
  start)
    status
    code=$?if [ $code -eq 0 ]; then  echo "$name is already running"else  start
      code=$?fiexit $code
    ;;
  stop) stop ;;
  force-stop) force_stop ;;
  status)
    status
    code=$?if [ $code -eq 0 ] ; then  echo "$name is running"else  echo "$name is not running"fiexit $code
    ;;
  reload) reload ;;
  restart)
    quiet configtest
    RET=$?if [ ${RET} -ne 0 ]; then  echo "Configuration error. Not restarting. Re-run with configtest parameter for details"  exit ${RET}fistop && start
    ;;
  configtest)
    configtest
    exit $?;;  *)echo "Usage: $SCRIPTNAME {start|stop|force-stop|status|reload|restart|configtest}" >&2exit 3
  ;;esacexit $?# chmod +x /etc/init.d/logstash
# /etc/init.d/logstash start
# /etc/init.d/logstash status
logstash is running
# netstat -ntlp|grep 9600tcp 0 0 :::9600 :::* LISTEN 10141/java五、 配置 logstash
# cat /data/PRG/logstash/config/logstash.yml |grep -v '#'http.host: "0.0.0.0" ###开启监听地址  nginx日志收集

# cat /data/PRG/logstash/conf.d/filter.conf
input {
beats {
port => 10200}
}
filter {
grok {
match => {
message => "%{IPORHOST:remote_addr} , \[%{HTTPDATE:timestamp}\] , %{IPORHOST:http_host} , \"%{WORD:http_verb} (?:%{PATH:baseurl}\?%{NOTSPACE:params}|%{DATA:raw_http_request})\" , %{NUMBER:http_status_code} , %{NUMBER:bytes_read} , %{QS:referrer} , %{QS:agent} , \"%{IPORHOST:client_ip}, %{IPORHOST:proxy_server}\" , - , - , - , %{IPORHOST:server_ip} , %{BASE10NUM:request_duration}" }
match => {
message => "%{IPORHOST:remote_addr} , \[%{HTTPDATE:timestamp}\] , %{IPORHOST:http_host} , \"%{WORD:http_verb} (?:%{PATH:baseurl}\?%{NOTSPACE:params}|%{DATA:raw_http_request})\" , %{NUMBER:http_status_code} , %{NUMBER:bytes_read} , %{QUOTEDSTRING:referrer} , %{QS:agent} , \"%{IPORHOST:client_ip}, %{IPORHOST:proxy_server}\" , %{IPORHOST}:%{INT} , %{INT} , %{BASE10NUM} , %{IPORHOST} , %{BASE10NUM:request_duration}" }
}
}
output {
elasticsearch {
hosts => ["192.168.62.200:9200"]
index => "operation-%{+YYYY.MM.dd}"document_type => "nginx2"user => 'admin' #### elasticsearch的用户名,用X-PACK插件创建
password => 'kbsonlong' #### elasticsearch的用户名
}
stdout { codec => rubydebug }
}

六、 安装kibana
# cd /data/PRG/# tar zxvf /data/elk5.0/kibana-5.0.2-linux-x86_64.tar.gz
# mv kibana-5.0.2-linux-x86_64 kibana
# useradd kibana –s /sbin/nologin
# chown kibana. kibana /data/PRG/kibana  添加启动脚本
  # vim /etc/init.d/kibana
#!/bin/sh# Init script for kibana
# Maintained by
# Generated by pleaserun.
# Implemented based on LSB Core 3.1:
#   * Sections: 20.2, 20.3#
### BEGIN INIT INFO
# Provides:          kibana
# Required-Start:    $remote_fs $syslog
# Required-Stop:     $remote_fs $syslog
# Default-Start:     2 3 4 5# Default-Stop:      0 1 6# Short-Description:
# Description:       Kibana
### END INIT INFO
PATH=/sbin:/usr/sbin:/bin:/usr/bin
export PATH
KIBANA_HOME=/data/PRG/kibana
name=kibana
program=$KIBANA_HOME/bin/kibana
args=''pidfile="$KIBANA_HOME/logs/$name.pid"LOG_HOME="$KIBANA_HOME/logs"[ -r /etc/default/$name ] && . /etc/default/$name
[ -r /etc/sysconfig/$name ] && . /etc/sysconfig/$name
[ -z "$nice" ] && nice=0trace() {
  logger -t "/etc/init.d/kibana" "$@"}
emit() {
  trace "$@"
  echo "$@"}
start() {
  # Ensure the log directory is setup correctly.
  [ ! -d "$LOG_HOME" ] && mkdir "$LOG_HOME"
  chmod 755 "$LOG_HOME"

  # Setup any environmental stuff beforehand

  # Run the program!
  #chroot --userspec "$user":"$group" "$chroot" sh -c "   
  $program $args >> $LOG_HOME/kibana.stdout 2>> $LOG_HOME/kibana.stderr &
  # Generate the pidfile from here. If we instead made the forked process
  # generate it there will be a race condition between the pidfile writing
  # and a process possibly asking for status.  echo $! > $pidfile
  emit "$name started"
  return 0}
stop() {
  # Try a few times to kill TERM the program  if status ; thenpid=$(cat "$pidfile")echo "Killing $name (pid $pid) with SIGTERM"ps -ef |grep $pid |grep -v 'grep' |awk '{print $2}' | xargs kill -9# Wait for it to exit.for i in 1 2 3 4 5 ; do  trace "Waiting $name (pid $pid) to die..."  status || break      sleep 1doneif status ; then  if [ "$KILL_ON_STOP_TIMEOUT" -eq 1 ] ; thentrace "Timeout reached. Killing $name (pid $pid) with SIGKILL.  This may result in data loss."kill -KILL $pid
        emit "$name killed with SIGKILL."  elseemit "$name stop failed; still running."  fielse  emit "$name stopped."fi
  fi}
status() {  if [ -f "$pidfile" ] ; thenpid=$(cat "$pidfile")if ps -p $pid > /dev/null 2> /dev/null ; then  # process by this pid is running.
      # It may not be our pid, but that's what you get with just pidfiles.  # TODO(sissel): Check if this process seems to be the same as the one we
      # expect. It'd be nice to use flock here, but flock uses fork, not exec,  # so it makes it quite awkward to use in this case.
      return 0else  return 2 # program is dead but pid file existsfi
  elsereturn 3 # program is not running  fi}case "$1" in
  force-start|start|stop|status|restart)
    trace "Attempting '$1' on kibana";;esaccase "$1" in
  force-start)
    PRESTART=no
    exec "$0" start
    ;;
  start)
    status
    code=$?if [ $code -eq 0 ]; then  emit "$name is already running"  exit $codeelse  start
      exit $?fi;;
  stop) stop ;;
  status)
    status
    code=$?if [ $code -eq 0 ] ; then  emit "$name is running"else  emit "$name is not running"fiexit $code
    ;;
  restart)
    stop && start
    ;;  *)echo "Usage: $SCRIPTNAME {start|force-start|stop|force-start|force-stop|status|restart}" >&2exit 3
  ;;esacexit $?# chmod +x /etc/init.d/kibana
# /etc/init.d/kibana start
# /etc/init.d/kibana status
# netstat -ntlp |grep 5601tcp 0 0 0.0.0.0:5601 0.0.0.0:* LISTEN 13052/node七、 配置kibana
# cat /data/PRG/kibana/config/kibana.yml |grep -v '#'server.host: "0.0.0.0"####以下模块视情况是否开启
xpack.security.enabled: truexpack.monitoring.enabled: truexpack.graph.enabled: truexpack.reporting.enabled: true八、 安装x-pack插件
# /data/PRG/kibana/bin/kibana-plugin install file:///root/x-pack-5.0.0.zip# /data/PRG/elasticsearch/bin/elasticsearch-plugin install file:///root/x-pack-5.0.0.zip  离线安装x-pack要修改用户脚本,默认创建用户配置文件在/etc/elasticsearch/x-pack目录
  在创建用户的时候提示/etc/elasticsearch/x-pack/users…tmp不存在,直接创建目录或者修改/data/PRG/elasticsearch/bin/x-pack/users脚本
# mkdir /etc/elasticsearch/x-pack/# chown elasticsearch. elasticsearch /etc/elasticsearch/x-pack/ -R九、 x-pack管理用户
1、 添加用户
# cd /data/PRG/elasticsearch
# bin/x-pack/users useradd admin -p kbsonlong -r superuser2、 查看用户
# /data/PRG/elasticsearch/bin/x-pack/users list
admin : superuser  test : - ###创建用户时没有添加-r参数,所以没有用户角色
3、 测试用户登录
# curl http://localhost:9200/_xpack/ --user admin:kbsonlong{"build":{"hash":"7763f8e","date":"2016-10-26T04:51:59.202Z"},"license":{"uid":"06a82587-66ac-4d4a-90c4-857d9ca7f3bc","type":"trial","mode":"trial","status":"active","expiry_date_in_millis":1483753731066},"features":{"graph":{"description":"Graph Data Exploration for the Elastic Stack","available":true,"enabled":true},"monitoring":{"description":"Monitoring for the Elastic Stack","available":true,"enabled":true},"security":{"description":"Security for the Elastic Stack","available":true,"enabled":true},"watcher":{"description":"Alerting, Notification and Automation for the Elastic Stack","available":true,"enabled":true}},"tagline":"You know, for X"}4、 删除用户
# /data/PRG/elasticsearch/bin/x-pack/users userdel test
# /data/PRG/elasticsearch/bin/x-pack/users list
admin : superuser十、 安装filebeat
# cd /data/PRG
# tar zxvf / data/elk5.0/filebeat-5.0.0-linux-x86_64.tar.gz
# mv filebeat-5.0.0-linux-x86_64 filebeat  配置启动脚本
  # vim /etc/init.d/filebeat
#!/bin/bash
#
# filebeat          filebeat shipper
#
# chkconfig: 2345 98 02#
### BEGIN INIT INFO
# Provides:          filebeat
# Required-Start:    $local_fs $network $syslog
# Required-Stop:     $local_fs $network $syslog
# Default-Start:     2 3 4 5# Default-Stop:      0 1 6# Short-Description: Sends log files to Logstash or directly to Elasticsearch.
# Description:       filebeat is a shipper part of the Elastic Beats
#                     family. Please see: https://www.elastic.co/products/beats### END INIT INFO

PATH=/usr/bin:/sbin:/bin:/usr/sbin
export PATH
[ -f /etc/sysconfig/filebeat ] && . /etc/sysconfig/filebeat
pidfile=${PIDFILE-/data/PRG/filebeat/filebeat.pid}
agent=${PB_AGENT-/data/PRG/filebeat/filebeat}
args="-c /data/PRG/filebeat/filebeat.yml"test_args="-e -configtest"wrapper="filebeat-god"wrapperopts="-r / -n -p $pidfile"RETVAL=0# Source function library.
. /etc/rc.d/init.d/functions
# Determine if we can use the -p option to daemon, killproc, and status.
# RHEL < 5 can't.if status | grep -q -- '-p' 2>/dev/null; thendaemonopts="--pidfile $pidfile"pidopts="-p $pidfile"fitest() {
    $agent $args $test_args
}
start() {echo -n $"Starting filebeat: "testif [ $? -ne 0 ]; thenechoexit 1fidaemon $daemonopts $wrapper $wrapperopts -- $agent $args
    RETVAL=$?echoreturn $RETVAL
}
stop() {echo -n $"Stopping filebeat: "killproc $pidopts $wrapper
    RETVAL=$?echo[ $RETVAL = 0 ] && rm -f ${pidfile}
}
restart() {
    testif [ $? -ne 0 ]; thenreturn 1fistop
    start
}
rh_status() {
    status $pidopts $wrapper
    RETVAL=$?return $RETVAL
}
rh_status_q() {
    rh_status >/dev/null 2>&1}case "$1" instart)
        start
    ;;
    stop)
        stop
    ;;
    restart)
        restart
    ;;
    condrestart|try-restart)
        rh_status_q || exit 0restart
    ;;
    status)
        rh_status
    ;;*)echo $"Usage: $0 {start|stop|status|restart|condrestart}"exit 1esacexit $RETVAL  # cat filebeat/filebeat.yml |grep -v '#'
filebeat.prospectors:
- input_type: log
paths:
- /tmp/nginx.log
output.logstash:
enabled: true
hosts: ["localhost:10200"]  启动filebeat
# /etc/init.d/filebeat5 start
Starting filebeat: 2016/12/08 07:18:37.177631 beat.go:264: INFO Home path: [/data/PRG/filebeat] Config path: [/data/PRG/filebeat] Data path: [/data/PRG/filebeat/data] Logs path: [/data/PRG/filebeat/logs]2016/12/08 07:18:37.177681 beat.go:174: INFO Setup Beat: filebeat; Version: 5.0.02016/12/08 07:18:37.177760 logstash.go:90: INFO Max Retries set to: 32016/12/08 07:18:37.177828 outputs.go:106: INFO Activated logstash as output plugin.2016/12/08 07:18:37.177912 publish.go:291: INFO Publisher name: operation2016/12/08 07:18:37.178158 async.go:63: INFO Flush Interval set to: 1s2016/12/08 07:18:37.178170 async.go:64: INFO Max Bulk Size set to: 2048Config OK
[ OK ]
# /etc/init.d/filebeat5 status
filebeat-god (pid 7365) is running...
# ps -ef |grep filebeat
root 7405 1 0 15:18 pts/1 00:00:00 filebeat-god -r / -n -p /data/PRG/filebeat/filebeat.pid -- /data/PRG/filebeat/filebeat -c /data/PRG/filebeat/filebeat.yml
root 7406 7405 0 15:18 pts/1 00:00:00 /data/PRG/filebeat/filebeat -c /data/PRG/filebeat/filebeat.yml
# netstat -ntlp | egrep '9200|9300|5601|9600|10200'tcp 0 0 0.0.0.0:5601 0.0.0.0:* LISTEN 14339/node
tcp 0 0 :::9300 :::* LISTEN 14205/java
tcp 0 0 :::10200 :::* LISTEN 14309/java
tcp 0 0 ::ffff:127.0.0.1:9600 :::* LISTEN 14309/java
tcp 0 0 :::9200 :::* LISTEN 14205/java



运维网声明 1、欢迎大家加入本站运维交流群:群②:261659950 群⑤:202807635 群⑦870801961 群⑧679858003
2、本站所有主题由该帖子作者发表,该帖子作者与运维网享有帖子相关版权
3、所有作品的著作权均归原作者享有,请您和我们一样尊重他人的著作权等合法权益。如果您对作品感到满意,请购买正版
4、禁止制作、复制、发布和传播具有反动、淫秽、色情、暴力、凶杀等内容的信息,一经发现立即删除。若您因此触犯法律,一切后果自负,我们对此不承担任何责任
5、所有资源均系网友上传或者通过网络收集,我们仅提供一个展示、介绍、观摩学习的平台,我们不对其内容的准确性、可靠性、正当性、安全性、合法性等负责,亦不承担任何法律责任
6、所有作品仅供您个人学习、研究或欣赏,不得用于商业或者其他用途,否则,一切后果均由您自己承担,我们对此不承担任何法律责任
7、如涉及侵犯版权等问题,请您及时通知我们,我们将立即采取措施予以解决
8、联系人Email:admin@iyunv.com 网址:www.yunweiku.com

所有资源均系网友上传或者通过网络收集,我们仅提供一个展示、介绍、观摩学习的平台,我们不对其承担任何法律责任,如涉及侵犯版权等问题,请您及时通知我们,我们将立即处理,联系人Email:kefu@iyunv.com,QQ:1061981298 本贴地址:https://www.iyunv.com/thread-668748-1-1.html 上篇帖子: 统一日志ELK部署配置(5)——kibana 下篇帖子: ELK 完整部署和使用
您需要登录后才可以回帖 登录 | 立即注册

本版积分规则

扫码加入运维网微信交流群X

扫码加入运维网微信交流群

扫描二维码加入运维网微信交流群,最新一手资源尽在官方微信交流群!快快加入我们吧...

扫描微信二维码查看详情

客服E-mail:kefu@iyunv.com 客服QQ:1061981298


QQ群⑦:运维网交流群⑦ QQ群⑧:运维网交流群⑧ k8s群:运维网kubernetes交流群


提醒:禁止发布任何违反国家法律、法规的言论与图片等内容;本站内容均来自个人观点与网络等信息,非本站认同之观点.


本站大部分资源是网友从网上搜集分享而来,其版权均归原作者及其网站所有,我们尊重他人的合法权益,如有内容侵犯您的合法权益,请及时与我们联系进行核实删除!



合作伙伴: 青云cloud

快速回复 返回顶部 返回列表