You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

235 lines
9.9 KiB

  1. filebeat.config:
  2. modules:
  3. path: ${path.config}/modules.d/*.yml
  4. reload.enabled: false
  5. processors:
  6. - add_cloud_metadata: ~
  7. - add_docker_metadata: ~
  8. filebeat.inputs:
  9. #------------------------------ Log input --------------------------------
  10. - type: log
  11. # Change to true to enable this input configuration.
  12. enabled: true
  13. # Paths that should be crawled and fetched. Glob based paths.
  14. # To fetch all ".log" files from a specific level of subdirectories
  15. # /var/log/*/*.log can be used.
  16. # For each file found under this path, a harvester is started.
  17. # Make sure not file is defined twice as this can lead to unexpected behaviour.
  18. paths:
  19. - /var/log/mysql/*.log
  20. #- c:\programdata\elasticsearch\logs\*
  21. # Configure the file encoding for reading files with international characters
  22. # following the W3C recommendation for HTML5 (http://www.w3.org/TR/encoding).
  23. # Some sample encodings:
  24. # plain, utf-8, utf-16be-bom, utf-16be, utf-16le, big5, gb18030, gbk,
  25. # hz-gb-2312, euc-kr, euc-jp, iso-2022-jp, shift-jis, ...
  26. #encoding: plain
  27. # Exclude lines. A list of regular expressions to match. It drops the lines that are
  28. # matching any regular expression from the list. The include_lines is called before
  29. # exclude_lines. By default, no lines are dropped.
  30. #exclude_lines: ['^DBG']
  31. # Include lines. A list of regular expressions to match. It exports the lines that are
  32. # matching any regular expression from the list. The include_lines is called before
  33. # exclude_lines. By default, all the lines are exported.
  34. #include_lines: ['^ERR', '^WARN']
  35. # Exclude files. A list of regular expressions to match. Filebeat drops the files that
  36. # are matching any regular expression from the list. By default, no files are dropped.
  37. #exclude_files: ['.gz$']
  38. # Method to determine if two files are the same or not. By default
  39. # the Beat considers two files the same if their inode and device id are the same.
  40. #file_identity.native: ~
  41. # Optional additional fields. These fields can be freely picked
  42. # to add additional information to the crawled log files for filtering
  43. #fields:
  44. # level: debug
  45. # review: 1
  46. # Set to true to store the additional fields as top level fields instead
  47. # of under the "fields" sub-dictionary. In case of name conflicts with the
  48. # fields added by Filebeat itself, the custom fields overwrite the default
  49. # fields.
  50. #fields_under_root: false
  51. # Set to true to publish fields with null values in events.
  52. #keep_null: false
  53. # By default, all events contain `host.name`. This option can be set to true
  54. # to disable the addition of this field to all events. The default value is
  55. # false.
  56. #publisher_pipeline.disable_host: false
  57. # Ignore files which were modified more then the defined timespan in the past.
  58. # ignore_older is disabled by default, so no files are ignored by setting it to 0.
  59. # Time strings like 2h (2 hours), 5m (5 minutes) can be used.
  60. #ignore_older: 0
  61. # How often the input checks for new files in the paths that are specified
  62. # for harvesting. Specify 1s to scan the directory as frequently as possible
  63. # without causing Filebeat to scan too frequently. Default: 10s.
  64. #scan_frequency: 10s
  65. # Defines the buffer size every harvester uses when fetching the file
  66. #harvester_buffer_size: 16384
  67. # Maximum number of bytes a single log event can have
  68. # All bytes after max_bytes are discarded and not sent. The default is 10MB.
  69. # This is especially useful for multiline log messages which can get large.
  70. #max_bytes: 10485760
  71. # Characters which separate the lines. Valid values: auto, line_feed, vertical_tab, form_feed,
  72. # carriage_return, carriage_return_line_feed, next_line, line_separator, paragraph_separator.
  73. #line_terminator: auto
  74. ### Recursive glob configuration
  75. # Expand "**" patterns into regular glob patterns.
  76. #recursive_glob.enabled: true
  77. ### JSON configuration
  78. # Decode JSON options. Enable this if your logs are structured in JSON.
  79. # JSON key on which to apply the line filtering and multiline settings. This key
  80. # must be top level and its value must be string, otherwise it is ignored. If
  81. # no text key is defined, the line filtering and multiline features cannot be used.
  82. #json.message_key:
  83. # By default, the decoded JSON is placed under a "json" key in the output document.
  84. # If you enable this setting, the keys are copied top level in the output document.
  85. #json.keys_under_root: false
  86. # If keys_under_root and this setting are enabled, then the values from the decoded
  87. # JSON object overwrite the fields that Filebeat normally adds (type, source, offset, etc.)
  88. # in case of conflicts.
  89. #json.overwrite_keys: false
  90. # If this setting is enabled, then keys in the decoded JSON object will be recursively
  91. # de-dotted, and expanded into a hierarchical object structure.
  92. # For example, `{"a.b.c": 123}` would be expanded into `{"a":{"b":{"c":123}}}`.
  93. #json.expand_keys: false
  94. # If this setting is enabled, Filebeat adds a "error.message" and "error.key: json" key in case of JSON
  95. # unmarshaling errors or when a text key is defined in the configuration but cannot
  96. # be used.
  97. #json.add_error_key: false
  98. ### Multiline options
  99. # Multiline can be used for log messages spanning multiple lines. This is common
  100. # for Java Stack Traces or C-Line Continuation
  101. # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [
  102. multiline.pattern: '^\#[[:space:]]Time'
  103. multiline.negate: true
  104. multiline.match: after
  105. # Defines if the pattern set under pattern should be negated or not. Default is false.
  106. #multiline.negate: false
  107. # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern
  108. # that was (not) matched before or after or as long as a pattern is not matched based on negate.
  109. # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash
  110. #multiline.match: after
  111. # The maximum number of lines that are combined to one event.
  112. # In case there are more the max_lines the additional lines are discarded.
  113. # Default is 500
  114. #multiline.max_lines: 500
  115. # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event
  116. # Default is 5s.
  117. #multiline.timeout: 5s
  118. # To aggregate constant number of lines into a single event use the count mode of multiline.
  119. #multiline.type: count
  120. # The number of lines to aggregate into a single event.
  121. #multiline.count_lines: 3
  122. # Do not add new line character when concatenating lines.
  123. #multiline.skip_newline: false
  124. # Setting tail_files to true means filebeat starts reading new files at the end
  125. # instead of the beginning. If this is used in combination with log rotation
  126. # this can mean that the first entries of a new file are skipped.
  127. #tail_files: false
  128. # The Ingest Node pipeline ID associated with this input. If this is set, it
  129. # overwrites the pipeline option from the Elasticsearch output.
  130. #pipeline:
  131. # If symlinks is enabled, symlinks are opened and harvested. The harvester is opening the
  132. # original for harvesting but will report the symlink name as source.
  133. #symlinks: false
  134. # Backoff values define how aggressively filebeat crawls new files for updates
  135. # The default values can be used in most cases. Backoff defines how long it is waited
  136. # to check a file again after EOF is reached. Default is 1s which means the file
  137. # is checked every second if new lines were added. This leads to a near real time crawling.
  138. # Every time a new line appears, backoff is reset to the initial value.
  139. #backoff: 1s
  140. # Max backoff defines what the maximum backoff time is. After having backed off multiple times
  141. # from checking the files, the waiting time will never exceed max_backoff independent of the
  142. # backoff factor. Having it set to 10s means in the worst case a new line can be added to a log
  143. # file after having backed off multiple times, it takes a maximum of 10s to read the new line
  144. #max_backoff: 10s
  145. # The backoff factor defines how fast the algorithm backs off. The bigger the backoff factor,
  146. # the faster the max_backoff value is reached. If this value is set to 1, no backoff will happen.
  147. # The backoff value will be multiplied each time with the backoff_factor until max_backoff is reached
  148. #backoff_factor: 2
  149. # Max number of harvesters that are started in parallel.
  150. # Default is 0 which means unlimited
  151. #harvester_limit: 0
  152. ### Harvester closing options
  153. # Close inactive closes the file handler after the predefined period.
  154. # The period starts when the last line of the file was, not the file ModTime.
  155. # Time strings like 2h (2 hours), 5m (5 minutes) can be used.
  156. #close_inactive: 5m
  157. # Close renamed closes a file handler when the file is renamed or rotated.
  158. # Note: Potential data loss. Make sure to read and understand the docs for this option.
  159. #close_renamed: false
  160. # When enabling this option, a file handler is closed immediately in case a file can't be found
  161. # any more. In case the file shows up again later, harvesting will continue at the last known position
  162. # after scan_frequency.
  163. #close_removed: true
  164. # Closes the file handler as soon as the harvesters reaches the end of the file.
  165. # By default this option is disabled.
  166. # Note: Potential data loss. Make sure to read and understand the docs for this option.
  167. #close_eof: false
  168. ### State options
  169. # Files for the modification data is older then clean_inactive the state from the registry is removed
  170. # By default this is disabled.
  171. #clean_inactive: 0
  172. # Removes the state for file which cannot be found on disk anymore immediately
  173. #clean_removed: true
  174. # Close timeout closes the harvester after the predefined time.
  175. # This is independent if the harvester did finish reading the file or not.
  176. # By default this option is disabled.
  177. # Note: Potential data loss. Make sure to read and understand the docs for this option.
  178. #close_timeout: 0
  179. output.logstash:
  180. hosts: ["graylog:5044"]