|
#======================== Journalbeat Configuration ============================
|
|
|
|
journalbeat:
|
|
# What position in journald to seek to at start up
|
|
# options: cursor, tail, head (defaults to tail)
|
|
#seek_position: tail
|
|
|
|
# If seek_position is set to cursor and seeking to cursor fails
|
|
# fall back to this method. If set to none will it will exit
|
|
# options: tail, head, none (defaults to tail)
|
|
#cursor_seek_fallback: tail
|
|
|
|
# Store the cursor of the successfully published events
|
|
#write_cursor_state: true
|
|
|
|
# Path to the file to store the cursor (defaults to ".journalbeat-cursor-state")
|
|
#cursor_state_file: .journalbeat-cursor-state
|
|
|
|
# How frequently should we save the cursor to disk (defaults to 5s)
|
|
#cursor_flush_period: 5s
|
|
|
|
# Path to the file to store the queue of events pending (defaults to ".journalbeat-pending-queue")
|
|
#pending_queue.file: .journalbeat-pending-queue
|
|
|
|
# How frequently should we save the queue to disk (defaults to 1s).
|
|
# Pending queue represents the WAL of events queued to be published
|
|
# or being published and waiting for acknowledgement. In case of a
|
|
# regular restart of journalbeat all the events not yet acknowledged
|
|
# will be flushed to disk during the shutdown.
|
|
# In case of disaster most probably journalbeat won't get a chance to shutdown
|
|
# itself gracefully and this flush period option will serve you as a
|
|
# backup creation frequency option.
|
|
#pending_queue.flush_period: 1s
|
|
|
|
# Lowercase and remove leading underscores, e.g. "_MESSAGE" -> "message"
|
|
# (defaults to false)
|
|
#clean_field_names: false
|
|
|
|
# All journal entries are strings by default. You can try to convert them to numbers.
|
|
# (defaults to false)
|
|
#convert_to_numbers: false
|
|
|
|
# Store all the fields of the Systemd Journal entry under this field
|
|
# Can be almost any string suitable to be a field name of an ElasticSearch document.
|
|
# Dots can be used to create nested fields.
|
|
# Two exceptions:
|
|
# - no repeated dots;
|
|
# - no trailing dots, e.g. "journal..field_name." will fail
|
|
# (defaults to "" hence stores on the upper level of the event)
|
|
#move_metadata_to_field: ""
|
|
|
|
# Specific units to monitor.
|
|
units: ["{{service}}.service"]
|
|
|
|
# Specify Journal paths to open. You can pass an array of paths to Systemd Journal paths.
|
|
# If you want to open Journal from directory just pass an array consisting of one element
|
|
# representing the path. See: https://www.freedesktop.org/software/systemd/man/sd_journal_open.html
|
|
# By default this setting is empty thus journalbeat will attempt to find all journal files automatically
|
|
#journal_paths: ["/var/log/journal"]
|
|
|
|
#default_type: journal
|
|
|
|
#================================ General ======================================
|
|
|
|
# The name of the shipper that publishes the network data. It can be used to group
|
|
# all the transactions sent by a single shipper in the web interface.
|
|
# If this options is not defined, the hostname is used.
|
|
#name: journalbeat
|
|
|
|
# The tags of the shipper are included in their own field with each
|
|
# transaction published. Tags make it easy to group servers by different
|
|
# logical properties.
|
|
tags: ["{{service}}"]
|
|
|
|
# Optional fields that you can specify to add additional information to the
|
|
# output. Fields can be scalar values, arrays, dictionaries, or any nested
|
|
# combination of these.
|
|
fields:
|
|
logzio_codec: plain
|
|
token: {{LOGZIO_TOKEN}}
|
|
|
|
# If this option is set to true, the custom fields are stored as top-level
|
|
# fields in the output document instead of being grouped under a fields
|
|
# sub-dictionary. Default is false.
|
|
fields_under_root: true
|
|
|
|
# Internal queue size for single events in processing pipeline
|
|
#queue_size: 1000
|
|
|
|
# The internal queue size for bulk events in the processing pipeline.
|
|
# Do not modify this value.
|
|
#bulk_queue_size: 0
|
|
|
|
# Sets the maximum number of CPUs that can be executing simultaneously. The
|
|
# default is the number of logical CPUs available in the system.
|
|
#max_procs:
|
|
|
|
#================================ Processors ===================================
|
|
|
|
# Processors are used to reduce the number of fields in the exported event or to
|
|
# enhance the event with external metadata. This section defines a list of
|
|
# processors that are applied one by one and the first one receives the initial
|
|
# event:
|
|
#
|
|
# event -> filter1 -> event1 -> filter2 ->event2 ...
|
|
#
|
|
# The supported processors are drop_fields, drop_event, include_fields, and
|
|
# add_cloud_metadata.
|
|
#
|
|
# For example, you can use the following processors to keep the fields that
|
|
# contain CPU load percentages, but remove the fields that contain CPU ticks
|
|
# values:
|
|
#
|
|
processors:
|
|
#- include_fields:
|
|
# fields: ["cpu"]
|
|
- drop_fields:
|
|
fields: ["beat.name", "beat.version", "logzio_codec", "SYSLOG_IDENTIFIER", "SYSLOG_FACILITY", "PRIORITY"]
|
|
#
|
|
# The following example drops the events that have the HTTP response code 200:
|
|
#
|
|
#processors:
|
|
#- drop_event:
|
|
# when:
|
|
# equals:
|
|
# http.code: 200
|
|
#
|
|
# The following example enriches each event with metadata from the cloud
|
|
# provider about the host machine. It works on EC2, GCE, and DigitalOcean.
|
|
#
|
|
#processors:
|
|
#- add_cloud_metadata:
|
|
#
|
|
|
|
#================================ Outputs ======================================
|
|
|
|
# Configure what outputs to use when sending the data collected by the beat.
|
|
# Multiple outputs may be used.
|
|
|
|
#----------------------------- Logstash output ---------------------------------
|
|
output.logstash:
|
|
# Boolean flag to enable or disable the output module.
|
|
enabled: true
|
|
|
|
# The Logstash hosts
|
|
hosts: ["listener.logz.io:5015"]
|
|
|
|
# Number of workers per Logstash host.
|
|
#worker: 1
|
|
|
|
# Set gzip compression level.
|
|
#compression_level: 3
|
|
|
|
# Optional load balance the events between the Logstash hosts
|
|
#loadbalance: true
|
|
|
|
# Number of batches to be send asynchronously to logstash while processing
|
|
# new batches.
|
|
#pipelining: 0
|
|
|
|
# Optional index name. The default index name is set to name of the beat
|
|
# in all lowercase.
|
|
#index: 'beatname'
|
|
|
|
# SOCKS5 proxy server URL
|
|
#proxy_url: socks5://user:password@socks5-server:2233
|
|
|
|
# Resolve names locally when using a proxy server. Defaults to false.
|
|
#proxy_use_local_resolver: false
|
|
|
|
# Enable SSL support. SSL is automatically enabled, if any SSL setting is set.
|
|
ssl.enabled: true
|
|
|
|
# Configure SSL verification mode. If `none` is configured, all server hosts
|
|
# and certificates will be accepted. In this mode, SSL based connections are
|
|
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
|
|
# `full`.
|
|
ssl.verification_mode: full
|
|
|
|
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
|
|
# 1.2 are enabled.
|
|
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
|
|
|
|
# Optional SSL configuration options. SSL is off by default.
|
|
# List of root certificates for HTTPS server verifications
|
|
ssl.certificate_authorities: ["/etc/pki/tls/certs/COMODORSADomainValidationSecureServerCA.crt"]
|
|
|
|
# Certificate for SSL client authentication
|
|
#ssl.certificate: "/etc/pki/client/cert.pem"
|
|
|
|
# Client Certificate Key
|
|
#ssl.key: "/etc/pki/client/cert.key"
|
|
|
|
# Optional passphrase for decrypting the Certificate Key.
|
|
#ssl.key_passphrase: ''
|
|
|
|
# Configure cipher suites to be used for SSL connections
|
|
#ssl.cipher_suites: []
|
|
|
|
# Configure curve types for ECDHE based cipher suites
|
|
#ssl.curve_types: []
|
|
|
|
#------------------------------- File output -----------------------------------
|
|
#output.file:
|
|
# Boolean flag to enable or disable the output module.
|
|
#enabled: true
|
|
|
|
# Path to the directory where to save the generated files. The option is
|
|
# mandatory.
|
|
#path: "/tmp/beatname"
|
|
|
|
# Name of the generated files. The default is `beatname` and it generates
|
|
# files: `beatname`, `beatname.1`, `beatname.2`, etc.
|
|
#filename: beatname
|
|
|
|
# Maximum size in kilobytes of each file. When this size is reached, and on
|
|
# every beatname restart, the files are rotated. The default value is 10240
|
|
# kB.
|
|
#rotate_every_kb: 10000
|
|
|
|
# Maximum number of files under path. When this number of files is reached,
|
|
# the oldest file is deleted and the rest are shifted from last to first. The
|
|
# default is 7 files.
|
|
#number_of_files: 7
|
|
|
|
|
|
#----------------------------- Console output ---------------------------------
|
|
#output.console:
|
|
# Boolean flag to enable or disable the output module.
|
|
#enabled: true
|
|
|
|
# Pretty print json event
|
|
#pretty: false
|
|
|
|
#================================= Paths ======================================
|
|
|
|
# The home path for the beatname installation. This is the default base path
|
|
# for all other path settings and for miscellaneous files that come with the
|
|
# distribution (for example, the sample dashboards).
|
|
# If not set by a CLI flag or in the configuration file, the default for the
|
|
# home path is the location of the binary.
|
|
#path.home:
|
|
|
|
# The configuration path for the beatname installation. This is the default
|
|
# base path for configuration files, including the main YAML configuration file
|
|
# and the Elasticsearch template file. If not set by a CLI flag or in the
|
|
# configuration file, the default for the configuration path is the home path.
|
|
#path.config: ${path.home}
|
|
|
|
# The data path for the beatname installation. This is the default base path
|
|
# for all the files in which beatname needs to store its data. If not set by a
|
|
# CLI flag or in the configuration file, the default for the data path is a data
|
|
# subdirectory inside the home path.
|
|
#path.data: ${path.home}/data
|
|
|
|
# The logs path for a beatname installation. This is the default location for
|
|
# the Beat's log files. If not set by a CLI flag or in the configuration file,
|
|
# the default for the logs path is a logs subdirectory inside the home path.
|
|
#path.logs: ${path.home}/logs
|
|
|
|
#============================== Dashboards =====================================
|
|
# These settings control loading the sample dashboards to the Kibana index. Loading
|
|
# the dashboards is disabled by default and can be enabled either by setting the
|
|
# options here, or by using the `-setup` CLI flag.
|
|
#dashboards.enabled: false
|
|
|
|
# The URL from where to download the dashboards archive. By default this URL
|
|
# has a value which is computed based on the Beat name and version. For released
|
|
# versions, this URL points to the dashboard archive on the artifacts.elastic.co
|
|
# website.
|
|
#dashboards.url:
|
|
|
|
# The directory from where to read the dashboards. It is used instead of the URL
|
|
# when it has a value.
|
|
#dashboards.directory:
|
|
|
|
# The file archive (zip file) from where to read the dashboards. It is used instead
|
|
# of the URL when it has a value.
|
|
#dashboards.file:
|
|
|
|
# If this option is enabled, the snapshot URL is used instead of the default URL.
|
|
#dashboards.snapshot: false
|
|
|
|
# The URL from where to download the snapshot version of the dashboards. By default
|
|
# this has a value which is computed based on the Beat name and version.
|
|
#dashboards.snapshot_url
|
|
|
|
# In case the archive contains the dashboards from multiple Beats, this lets you
|
|
# select which one to load. You can load all the dashboards in the archive by
|
|
# setting this to the empty string.
|
|
#dashboards.beat: beatname
|
|
|
|
# The name of the Kibana index to use for setting the configuration. Default is ".kibana"
|
|
#dashboards.kibana_index: .kibana
|
|
|
|
# The Elasticsearch index name. This overwrites the index name defined in the
|
|
# dashboards and index pattern. Example: testbeat-*
|
|
#dashboards.index:
|
|
|
|
#================================ Logging ======================================
|
|
# There are three options for the log output: syslog, file, stderr.
|
|
# Under Windows systems, the log files are per default sent to the file output,
|
|
# under all other system per default to syslog.
|
|
|
|
# Sets log level. The default log level is info.
|
|
# Available log levels are: critical, error, warning, info, debug
|
|
#logging.level: info
|
|
|
|
# Enable debug output for selected components. To enable all selectors use ["*"]
|
|
# Other available selectors are "beat", "publish", "service"
|
|
# Multiple selectors can be chained.
|
|
#logging.selectors: [ ]
|
|
|
|
# Send all logging output to syslog. The default is false.
|
|
#logging.to_syslog: true
|
|
|
|
# If enabled, beatname periodically logs its internal metrics that have changed
|
|
# in the last period. For each metric that changed, the delta from the value at
|
|
# the beginning of the period is logged. Also, the total values for
|
|
# all non-zero internal metrics are logged on shutdown. The default is true.
|
|
#logging.metrics.enabled: true
|
|
|
|
# The period after which to log the internal metrics. The default is 30s.
|
|
#logging.metrics.period: 30s
|
|
|
|
# Logging to rotating files files. Set logging.to_files to false to disable logging to
|
|
# files.
|
|
logging.to_files: true
|
|
logging.files:
|
|
# Configure the path where the logs are written. The default is the logs directory
|
|
# under the home path (the binary location).
|
|
#path: /var/log/beatname
|
|
|
|
# The name of the files where the logs are written to.
|
|
#name: beatname
|
|
|
|
# Configure log file size limit. If limit is reached, log file will be
|
|
# automatically rotated
|
|
#rotateeverybytes: 10485760 # = 10MB
|
|
|
|
# Number of rotated log files to keep. Oldest files will be deleted first.
|
|
#keepfiles: 7
|