filebeat 使用pipeline 收集nginx日志到

1,首先格式化nginx日志格式

log_format  main  '$time_iso8601 $remote_addr - $remote_user "$request" '
             '$status $body_bytes_sent $http_referer '
             '$http_x_forwarded_for $upstream_addr $upstream_response_time $request_time "$http_cookie" '
             '"$http_user_agent"';

2,输入ingest_pipeline自定义模板

PUT /_ingest/pipeline/nginxaccess
{
    "description": "nginx access log pipeline",
    "processors": [{
        "grok": {
            "field": "message",
            "patterns": ["%{TIMESTAMP_ISO8601:timestamp} (%{IPORHOST:client_ip}|-) (%{USER:ident}|-) (%{USER:auth}|-) \"(?:%{WORD:verb} %{NOTSPACE:request} (?:HTTP/%{NUMBER:http_version})?|-)\" (?:%{NUMBER:status}|-) (?:%{NUMBER:bytes}|-) %{NOTSPACE:request_body} (%{IPORHOST:forwardedFor}|-) (%{URIHOST:upstream_host}|-) (%{BASE16FLOAT:upstream_response_time}|-) (%{BASE16FLOAT:request_time}) \"(%{DATA:http_cookei}|-)\" \"%{DATA:user_agent}\""]
            }
        },
        {
        "date": {
            "field": "timestamp",
                "target_field": "@timestamp",
                "formats": [
                    "yyyy-MM-dd HH:mm:ss.SSS",
                    "ISO8601"
                ],
                "timezone": "Asia/Shanghai",
                "ignore_failure": true
            }
        },
        {
            "date_index_name": {
                "field": "@timestamp",
                "index_name_prefix": "nginx-",
                "index_name_format": "yyyy.MM.dd",
                "date_rounding": "d",
                "timezone": "Asia/Shanghai",
                "ignore_failure": true
            }
        },
        {
            "geoip":{
                "field": "client_ip",
                "target_field": "geoip",
                "ignore_missing": true
            }
        },
        {
            "user_agent": {
                "field": "user_agent",
                "target_field": "useragent"
            }    
        }
    ],
    "on_failure": [{
            "set": {
                "field": "parse_err_message",
                "value": "{{ _ingest.on_failure_message }}"
            }
        },
        {
            "set": {
                "field": "_index",
                "value": "splog-parse-failed"
            }
        }
    ]
}

3,将自定义template录入es

PUT /_template/nginxaccess
{
    "order": 0,
    "version": 1,
    "index_patterns": [
        "nginx-*"
    ],
    "settings": {
        "index": {
            "refresh_interval": "30s",
            "number_of_shards": "1",
            "translog": {
                "sync_interval": "5s",
                "durability": "async"
            },
            "merge": {
                "scheduler": {
                    "max_thread_count": "1"
                }
            },
            "number_of_replicas": "0"
        }
    },
    "mappings": {
            "dynamic_templates": [{
                "strings_as_keywords": {
                    "match_mapping_type": "string",
                    "mapping": {
                        "type": "keyword"
                    }
                }
            }],
            "properties": {
                "status": {
                    "type": "long"
                },
                "bytes": {
                    "type": "long"
                },
                "request_time": {
                    "type": "float"
                },
                "upstream_response_time": {
                    "type": "float"
                },
                "forwardedFor": {
                    "type": "text"
                },
                "geoip" : {
                  "properties" : {
                     "location": {
                       "type": "geo_point"
                      }
                  }
                }
            }
    },
    "aliases": {}
}

4,安装并编辑filebeat配置

传送门:https://www.elastic.co/cn/downloads/past-releases#filebeat

rpm -ivh filebeat–%{[observer.version]}-x86_64.rpm

编辑启动脚本

vi /usr/lib/systemd/system/filebeat.service
[Unit]
Description=Filebeat sends log files to Logstash or directly to Elasticsearch.
Documentation=https://www.elastic.co/products/beats/filebeat
Wants=network-online.target
After=network-online.target

[Service]

Environment="BEAT_LOG_OPTS="
Environment="BEAT_CONFIG_OPTS=-c /etc/filebeat/filebeat.yml"
Environment="BEAT_PATH_OPTS=-path.home /usr/share/filebeat -path.config /etc/filebeat -path.data /var/lib/filebeat -path.logs /var/log/filebeat"
ExecStart=/usr/share/filebeat/bin/filebeat $BEAT_LOG_OPTS $BEAT_CONFIG_OPTS $BEAT_PATH_OPTS
Restart=always

[Install]
WantedBy=multi-user.target

编辑配置文件

filebeat.inputs:
- type: log
  # Change to true to enable this input configuration.
  enabled: true
  # Paths that should be crawled and fetched. Glob based paths.
  paths:
    - /var/log/nginx/*.log
    #- c:\programdata\elasticsearch\logs\*
  # Exclude lines. A list of regular expressions to match. It drops the lines that are
  # matching any regular expression from the list.
  #exclude_lines: ['^DBG']
  # Include lines. A list of regular expressions to match. It exports the lines that are
  # matching any regular expression from the list.
  #include_lines: ['^ERR', '^WARN']
  # Exclude files. A list of regular expressions to match. Filebeat drops the files that
  # are matching any regular expression from the list. By default, no files are dropped.
  #exclude_files: ['.gz$']
  # Optional additional fields. These fields can be freely picked
  # to add additional information to the crawled log files for filtering
  #fields:
  #  level: debug
  #  review: 1
  ### Multiline options
  # Multiline can be used for log messages spanning multiple lines. This is common
  # for Java Stack Traces or C-Line Continuation
  # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [
  #multiline.pattern: ^\[
  multiline.pattern: '^\d{4}-\d{2}-\d{2}(\s\d{2}:\d{2}:\d{2})?|^{|^(\d{1,3}\.){3}\d{1,3}|^\d{2}-\d{2}\s+'
  # Defines if the pattern set under pattern should be negated or not. Default is false.
  #multiline.negate: false
  multiline.negate: true
  # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern
  # that was (not) matched before or after or as long as a pattern is not matched based on negate.
  # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash
  multiline.match: after
filebeat.config.modules:
  # Glob pattern for configuration loading
  path: ${path.config}/modules.d/*.yml
  # Set to true to enable config reloading
  reload.enabled: false
  # Period on which files under path should be checked for changes
  #reload.period: 10s
  #index.codec: best_compression
  #_source.enabled: false
setup.ilm.enabled: false
setup.kibana:
  # Kibana Host
  # Scheme and port can be left out and will be set to the default (http and 5601)
  # In case you specify and additional path, the scheme is required: http://localhost:5601/path
  # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
  host: "10.33.200.8:5601"
  username: "elastic"
  password: "xxxxxxxxxxxxxxxxx"
  # Kibana Space ID
  # ID of the Kibana Space into which the dashboards should be loaded. By default,
  # the Default Space will be used.
  #space.id:
output.elasticsearch:
  # Array of hosts to connect to.
  hosts: ["10.33.100.9:9200","10.33.100.14:9200","10.33.100.3:9200"]
  indices:
    - index: "nginx-%{+yyyy.MM.dd}"
  # Protocol - either `http` (default) or `https`.
  #protocol: "https"
  # Authentication credentials - either API key or username/password.
  #api_key: "id:api_key"
  username: "elastic"
  password: "xxxxxxxxxxxxxxxx"
  worker: 2
  bulk_max_size: 256
  pipeline: nginxaccess
  # The Logstash hosts
  #hosts: ["localhost:5044"]
  # Optional SSL. By default is off.
  # List of root certificates for HTTPS server verifications
  #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
  # Certificate for SSL client authentication
  #ssl.certificate: "/etc/pki/client/cert.pem"
  # Client Certificate Key
  #ssl.key: "/etc/pki/client/cert.key"
processors:
  - add_host_metadata: ~
  - add_cloud_metadata: ~
  - add_docker_metadata: ~
  - add_kubernetes_metadata: ~
monitoring.enabled: true
monitoring.elasticsearch:
  hosts: ["10.33.100.9:9200","10.33.100.14:9200","10.33.100.3:9200"]
  username: "elastic"
  password: "xxxxxxxxxxxxxxxx"

启动服务

systemctl enable filebeat
systemctl restart filebeat
此条目发表在ELK日志服务器分类目录。将固定链接加入收藏夹。

发表回复

您的电子邮箱地址不会被公开。 必填项已用*标注