You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

342 lines
13 KiB

  1. #======================== Journalbeat Configuration ============================
  2. journalbeat:
  3. # What position in journald to seek to at start up
  4. # options: cursor, tail, head (defaults to tail)
  5. #seek_position: tail
  6. # If seek_position is set to cursor and seeking to cursor fails
  7. # fall back to this method. If set to none will it will exit
  8. # options: tail, head, none (defaults to tail)
  9. #cursor_seek_fallback: tail
  10. # Store the cursor of the successfully published events
  11. #write_cursor_state: true
  12. # Path to the file to store the cursor (defaults to ".journalbeat-cursor-state")
  13. #cursor_state_file: .journalbeat-cursor-state
  14. # How frequently should we save the cursor to disk (defaults to 5s)
  15. #cursor_flush_period: 5s
  16. # Path to the file to store the queue of events pending (defaults to ".journalbeat-pending-queue")
  17. #pending_queue.file: .journalbeat-pending-queue
  18. # How frequently should we save the queue to disk (defaults to 1s).
  19. # Pending queue represents the WAL of events queued to be published
  20. # or being published and waiting for acknowledgement. In case of a
  21. # regular restart of journalbeat all the events not yet acknowledged
  22. # will be flushed to disk during the shutdown.
  23. # In case of disaster most probably journalbeat won't get a chance to shutdown
  24. # itself gracefully and this flush period option will serve you as a
  25. # backup creation frequency option.
  26. #pending_queue.flush_period: 1s
  27. # Lowercase and remove leading underscores, e.g. "_MESSAGE" -> "message"
  28. # (defaults to false)
  29. #clean_field_names: false
  30. # All journal entries are strings by default. You can try to convert them to numbers.
  31. # (defaults to false)
  32. #convert_to_numbers: false
  33. # Store all the fields of the Systemd Journal entry under this field
  34. # Can be almost any string suitable to be a field name of an ElasticSearch document.
  35. # Dots can be used to create nested fields.
  36. # Two exceptions:
  37. # - no repeated dots;
  38. # - no trailing dots, e.g. "journal..field_name." will fail
  39. # (defaults to "" hence stores on the upper level of the event)
  40. #move_metadata_to_field: ""
  41. # Specific units to monitor.
  42. units: ["{{service}}.service"]
  43. # Specify Journal paths to open. You can pass an array of paths to Systemd Journal paths.
  44. # If you want to open Journal from directory just pass an array consisting of one element
  45. # representing the path. See: https://www.freedesktop.org/software/systemd/man/sd_journal_open.html
  46. # By default this setting is empty thus journalbeat will attempt to find all journal files automatically
  47. #journal_paths: ["/var/log/journal"]
  48. #default_type: journal
  49. #================================ General ======================================
  50. # The name of the shipper that publishes the network data. It can be used to group
  51. # all the transactions sent by a single shipper in the web interface.
  52. # If this options is not defined, the hostname is used.
  53. #name: journalbeat
  54. # The tags of the shipper are included in their own field with each
  55. # transaction published. Tags make it easy to group servers by different
  56. # logical properties.
  57. tags: ["{{service}}"]
  58. # Optional fields that you can specify to add additional information to the
  59. # output. Fields can be scalar values, arrays, dictionaries, or any nested
  60. # combination of these.
  61. fields:
  62. logzio_codec: plain
  63. token: {{LOGZIO_TOKEN}}
  64. # If this option is set to true, the custom fields are stored as top-level
  65. # fields in the output document instead of being grouped under a fields
  66. # sub-dictionary. Default is false.
  67. fields_under_root: true
  68. # Internal queue size for single events in processing pipeline
  69. #queue_size: 1000
  70. # The internal queue size for bulk events in the processing pipeline.
  71. # Do not modify this value.
  72. #bulk_queue_size: 0
  73. # Sets the maximum number of CPUs that can be executing simultaneously. The
  74. # default is the number of logical CPUs available in the system.
  75. #max_procs:
  76. #================================ Processors ===================================
  77. # Processors are used to reduce the number of fields in the exported event or to
  78. # enhance the event with external metadata. This section defines a list of
  79. # processors that are applied one by one and the first one receives the initial
  80. # event:
  81. #
  82. # event -> filter1 -> event1 -> filter2 ->event2 ...
  83. #
  84. # The supported processors are drop_fields, drop_event, include_fields, and
  85. # add_cloud_metadata.
  86. #
  87. # For example, you can use the following processors to keep the fields that
  88. # contain CPU load percentages, but remove the fields that contain CPU ticks
  89. # values:
  90. #
  91. processors:
  92. #- include_fields:
  93. # fields: ["cpu"]
  94. - drop_fields:
  95. fields: ["beat.name", "beat.version", "logzio_codec", "SYSLOG_IDENTIFIER", "SYSLOG_FACILITY", "PRIORITY"]
  96. #
  97. # The following example drops the events that have the HTTP response code 200:
  98. #
  99. #processors:
  100. #- drop_event:
  101. # when:
  102. # equals:
  103. # http.code: 200
  104. #
  105. # The following example enriches each event with metadata from the cloud
  106. # provider about the host machine. It works on EC2, GCE, and DigitalOcean.
  107. #
  108. #processors:
  109. #- add_cloud_metadata:
  110. #
  111. #================================ Outputs ======================================
  112. # Configure what outputs to use when sending the data collected by the beat.
  113. # Multiple outputs may be used.
  114. #----------------------------- Logstash output ---------------------------------
  115. output.logstash:
  116. # Boolean flag to enable or disable the output module.
  117. enabled: true
  118. # The Logstash hosts
  119. hosts: ["listener.logz.io:5015"]
  120. # Number of workers per Logstash host.
  121. #worker: 1
  122. # Set gzip compression level.
  123. #compression_level: 3
  124. # Optional load balance the events between the Logstash hosts
  125. #loadbalance: true
  126. # Number of batches to be send asynchronously to logstash while processing
  127. # new batches.
  128. #pipelining: 0
  129. # Optional index name. The default index name is set to name of the beat
  130. # in all lowercase.
  131. #index: 'beatname'
  132. # SOCKS5 proxy server URL
  133. #proxy_url: socks5://user:password@socks5-server:2233
  134. # Resolve names locally when using a proxy server. Defaults to false.
  135. #proxy_use_local_resolver: false
  136. # Enable SSL support. SSL is automatically enabled, if any SSL setting is set.
  137. ssl.enabled: true
  138. # Configure SSL verification mode. If `none` is configured, all server hosts
  139. # and certificates will be accepted. In this mode, SSL based connections are
  140. # susceptible to man-in-the-middle attacks. Use only for testing. Default is
  141. # `full`.
  142. ssl.verification_mode: full
  143. # List of supported/valid TLS versions. By default all TLS versions 1.0 up to
  144. # 1.2 are enabled.
  145. #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
  146. # Optional SSL configuration options. SSL is off by default.
  147. # List of root certificates for HTTPS server verifications
  148. ssl.certificate_authorities: ["/etc/pki/tls/certs/COMODORSADomainValidationSecureServerCA.crt"]
  149. # Certificate for SSL client authentication
  150. #ssl.certificate: "/etc/pki/client/cert.pem"
  151. # Client Certificate Key
  152. #ssl.key: "/etc/pki/client/cert.key"
  153. # Optional passphrase for decrypting the Certificate Key.
  154. #ssl.key_passphrase: ''
  155. # Configure cipher suites to be used for SSL connections
  156. #ssl.cipher_suites: []
  157. # Configure curve types for ECDHE based cipher suites
  158. #ssl.curve_types: []
  159. #------------------------------- File output -----------------------------------
  160. #output.file:
  161. # Boolean flag to enable or disable the output module.
  162. #enabled: true
  163. # Path to the directory where to save the generated files. The option is
  164. # mandatory.
  165. #path: "/tmp/beatname"
  166. # Name of the generated files. The default is `beatname` and it generates
  167. # files: `beatname`, `beatname.1`, `beatname.2`, etc.
  168. #filename: beatname
  169. # Maximum size in kilobytes of each file. When this size is reached, and on
  170. # every beatname restart, the files are rotated. The default value is 10240
  171. # kB.
  172. #rotate_every_kb: 10000
  173. # Maximum number of files under path. When this number of files is reached,
  174. # the oldest file is deleted and the rest are shifted from last to first. The
  175. # default is 7 files.
  176. #number_of_files: 7
  177. #----------------------------- Console output ---------------------------------
  178. #output.console:
  179. # Boolean flag to enable or disable the output module.
  180. #enabled: true
  181. # Pretty print json event
  182. #pretty: false
  183. #================================= Paths ======================================
  184. # The home path for the beatname installation. This is the default base path
  185. # for all other path settings and for miscellaneous files that come with the
  186. # distribution (for example, the sample dashboards).
  187. # If not set by a CLI flag or in the configuration file, the default for the
  188. # home path is the location of the binary.
  189. #path.home:
  190. # The configuration path for the beatname installation. This is the default
  191. # base path for configuration files, including the main YAML configuration file
  192. # and the Elasticsearch template file. If not set by a CLI flag or in the
  193. # configuration file, the default for the configuration path is the home path.
  194. #path.config: ${path.home}
  195. # The data path for the beatname installation. This is the default base path
  196. # for all the files in which beatname needs to store its data. If not set by a
  197. # CLI flag or in the configuration file, the default for the data path is a data
  198. # subdirectory inside the home path.
  199. #path.data: ${path.home}/data
  200. # The logs path for a beatname installation. This is the default location for
  201. # the Beat's log files. If not set by a CLI flag or in the configuration file,
  202. # the default for the logs path is a logs subdirectory inside the home path.
  203. #path.logs: ${path.home}/logs
  204. #============================== Dashboards =====================================
  205. # These settings control loading the sample dashboards to the Kibana index. Loading
  206. # the dashboards is disabled by default and can be enabled either by setting the
  207. # options here, or by using the `-setup` CLI flag.
  208. #dashboards.enabled: false
  209. # The URL from where to download the dashboards archive. By default this URL
  210. # has a value which is computed based on the Beat name and version. For released
  211. # versions, this URL points to the dashboard archive on the artifacts.elastic.co
  212. # website.
  213. #dashboards.url:
  214. # The directory from where to read the dashboards. It is used instead of the URL
  215. # when it has a value.
  216. #dashboards.directory:
  217. # The file archive (zip file) from where to read the dashboards. It is used instead
  218. # of the URL when it has a value.
  219. #dashboards.file:
  220. # If this option is enabled, the snapshot URL is used instead of the default URL.
  221. #dashboards.snapshot: false
  222. # The URL from where to download the snapshot version of the dashboards. By default
  223. # this has a value which is computed based on the Beat name and version.
  224. #dashboards.snapshot_url
  225. # In case the archive contains the dashboards from multiple Beats, this lets you
  226. # select which one to load. You can load all the dashboards in the archive by
  227. # setting this to the empty string.
  228. #dashboards.beat: beatname
  229. # The name of the Kibana index to use for setting the configuration. Default is ".kibana"
  230. #dashboards.kibana_index: .kibana
  231. # The Elasticsearch index name. This overwrites the index name defined in the
  232. # dashboards and index pattern. Example: testbeat-*
  233. #dashboards.index:
  234. #================================ Logging ======================================
  235. # There are three options for the log output: syslog, file, stderr.
  236. # Under Windows systems, the log files are per default sent to the file output,
  237. # under all other system per default to syslog.
  238. # Sets log level. The default log level is info.
  239. # Available log levels are: critical, error, warning, info, debug
  240. #logging.level: info
  241. # Enable debug output for selected components. To enable all selectors use ["*"]
  242. # Other available selectors are "beat", "publish", "service"
  243. # Multiple selectors can be chained.
  244. #logging.selectors: [ ]
  245. # Send all logging output to syslog. The default is false.
  246. #logging.to_syslog: true
  247. # If enabled, beatname periodically logs its internal metrics that have changed
  248. # in the last period. For each metric that changed, the delta from the value at
  249. # the beginning of the period is logged. Also, the total values for
  250. # all non-zero internal metrics are logged on shutdown. The default is true.
  251. #logging.metrics.enabled: true
  252. # The period after which to log the internal metrics. The default is 30s.
  253. #logging.metrics.period: 30s
  254. # Logging to rotating files files. Set logging.to_files to false to disable logging to
  255. # files.
  256. logging.to_files: true
  257. logging.files:
  258. # Configure the path where the logs are written. The default is the logs directory
  259. # under the home path (the binary location).
  260. #path: /var/log/beatname
  261. # The name of the files where the logs are written to.
  262. #name: beatname
  263. # Configure log file size limit. If limit is reached, log file will be
  264. # automatically rotated
  265. #rotateeverybytes: 10485760 # = 10MB
  266. # Number of rotated log files to keep. Oldest files will be deleted first.
  267. #keepfiles: 7