From f580c9149cd1afcacbaa21be8123c6c36f2c967e Mon Sep 17 00:00:00 2001 From: Chris Cowan Date: Fri, 4 Jun 2021 03:07:59 -0700 Subject: [PATCH] Elasticsearch: Add Top Metrics Aggregation and X-Pack support (#33041) * Elasticsearch: Add Top Metrics Aggregation * Adding support for non-timeseries visualizations * removing console.logs * restoring loadOptions type * Honor xpack setting * Adding test for elastic_response * adding test for query builder * Adding support of alerting * Fixing separator spelling * Fixing linting issues * attempting to reduce cyclomatic complexity * Adding elastic77 Docker block * Update public/app/plugins/datasource/elasticsearch/components/QueryEditor/MetricAggregationsEditor/MetricEditor.test.tsx Co-authored-by: Giordano Ricci * refactoring MetricsEditor tests * Fixing typo * Change getFields type & move TopMetrics to a separate component * Fix SegmentAsync styles in TopMetrics Settings * Fix field types for TopMetrics * WIP * Refactoring client side to support multiple top metrics * Adding tests and finishing go implimentation * removing fmt lib from debugging * fixing tests * reducing the cyclomatic complexity * Update public/app/plugins/datasource/elasticsearch/elastic_response.ts Co-authored-by: Giordano Ricci * Update public/app/plugins/datasource/elasticsearch/hooks/useFields.ts Co-authored-by: Giordano Ricci * Checking for possible nil value * Fixing types * fix fake-data-gen param * fix useFields hook * Removing aggregateBy and size * Fixing go tests * Fixing TS tests * fixing tests * Fixes * Remove date from top_metrics fields * Restore previous formatting * Update pkg/tsdb/elasticsearch/client/models.go Co-authored-by: Dimitris Sotirakis * Update pkg/tsdb/elasticsearch/client/models.go Co-authored-by: Dimitris Sotirakis * Fix code review comments on processTopMetricValue * Remove underscore from variable names * Remove intermediate array definition * Refactor test to use testify Co-authored-by: Giordano Ricci Co-authored-by: Elfo404 Co-authored-by: Dimitris Sotirakis --- .../blocks/elastic77/docker-compose.yaml | 44 + .../docker/blocks/elastic77/elasticsearch.yml | 3 + devenv/docker/blocks/elastic77/filebeat.yml | 1978 +++++++++++++++++ devenv/docker/blocks/elastic77/metricbeat.yml | 38 + docs/sources/datasources/elasticsearch.md | 2 +- pkg/tsdb/elasticsearch/client/client.go | 2 + pkg/tsdb/elasticsearch/client/models.go | 29 + .../elasticsearch/client/search_request.go | 2 + pkg/tsdb/elasticsearch/models.go | 1 + pkg/tsdb/elasticsearch/response_parser.go | 49 + .../elasticsearch/response_parser_test.go | 75 + .../BucketAggregationsEditor/aggregations.ts | 11 + .../BucketAggregationsEditor/utils.ts | 22 +- .../MetricEditor.test.tsx | 50 +- .../MetricAggregationsEditor/MetricEditor.tsx | 7 +- .../TopMetricsSettingsEditor.tsx | 69 + .../SettingsEditor/index.tsx | 3 + .../MetricAggregationsEditor/aggregations.ts | 18 +- .../MetricAggregationsEditor/utils.ts | 17 + .../QueryEditor/SettingsEditorContainer.tsx | 1 + .../configuration/ElasticDetails.tsx | 25 +- .../elasticsearch/datasource.test.ts | 29 +- .../datasource/elasticsearch/datasource.ts | 37 +- .../elasticsearch/elastic_response.ts | 64 +- .../elasticsearch/hooks/useFields.test.tsx | 15 +- .../elasticsearch/hooks/useFields.ts | 65 +- .../datasource/elasticsearch/query_builder.ts | 69 +- .../specs/elastic_response.test.ts | 69 + .../elasticsearch/specs/query_builder.test.ts | 33 +- .../plugins/datasource/elasticsearch/types.ts | 2 + 30 files changed, 2718 insertions(+), 111 deletions(-) create mode 100644 devenv/docker/blocks/elastic77/docker-compose.yaml create mode 100644 devenv/docker/blocks/elastic77/elasticsearch.yml create mode 100644 devenv/docker/blocks/elastic77/filebeat.yml create mode 100644 devenv/docker/blocks/elastic77/metricbeat.yml create mode 100644 public/app/plugins/datasource/elasticsearch/components/QueryEditor/MetricAggregationsEditor/SettingsEditor/TopMetricsSettingsEditor.tsx diff --git a/devenv/docker/blocks/elastic77/docker-compose.yaml b/devenv/docker/blocks/elastic77/docker-compose.yaml new file mode 100644 index 00000000000..1b905544866 --- /dev/null +++ b/devenv/docker/blocks/elastic77/docker-compose.yaml @@ -0,0 +1,44 @@ +# You need to run 'sysctl -w vm.max_map_count=262144' on the host machine + + elasticsearch77: + image: docker.elastic.co/elasticsearch/elasticsearch:7.7.1 + command: elasticsearch + environment: + - "discovery.type=single-node" + ports: + - "13200:9200" + - "13300:9300" + + fake-elastic77-data: + image: grafana/fake-data-gen + links: + - elasticsearch77 + environment: + FD_SERVER: elasticsearch77 + FD_DATASOURCE: elasticsearch7 + FD_PORT: 9200 + + filebeat77: + image: docker.elastic.co/beats/filebeat:7.7.1 + command: filebeat -e -strict.perms=false + volumes: + - ./docker/blocks/elastic77/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro + - /var/log:/var/log:ro + - ../data/log:/var/log/grafana:ro + + metricbeat77: + image: docker.elastic.co/beats/metricbeat:7.7.1 + command: metricbeat -e -strict.perms=false + user: root + volumes: + - ./docker/blocks/elastic77/metricbeat.yml:/usr/share/metricbeat/metricbeat.yml:ro + - /var/run/docker.sock:/var/run/docker.sock:ro + + kibana77: + image: docker.elastic.co/kibana/kibana:7.7.1 + ports: + - "5601:5601" + links: + - elasticsearch77 + environment: + ELASTICSEARCH_HOSTS: http://elasticsearch77:9200 diff --git a/devenv/docker/blocks/elastic77/elasticsearch.yml b/devenv/docker/blocks/elastic77/elasticsearch.yml new file mode 100644 index 00000000000..0061758eb58 --- /dev/null +++ b/devenv/docker/blocks/elastic77/elasticsearch.yml @@ -0,0 +1,3 @@ +script.inline: on +script.indexed: on +xpack.license.self_generated.type: basic diff --git a/devenv/docker/blocks/elastic77/filebeat.yml b/devenv/docker/blocks/elastic77/filebeat.yml new file mode 100644 index 00000000000..d82d90dcec3 --- /dev/null +++ b/devenv/docker/blocks/elastic77/filebeat.yml @@ -0,0 +1,1978 @@ +######################## Filebeat Configuration ############################ + +# This file is a full configuration example documenting all non-deprecated +# options in comments. For a shorter configuration example, that contains only +# the most common options, please see filebeat.yml in the same directory. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/filebeat/index.html + + +#========================== Modules configuration ============================ +filebeat.modules: + +#------------------------------- System Module ------------------------------- +#- module: system + # Syslog + #syslog: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. + #var.convert_timezone: false + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + + # Authorization logs + #auth: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. + #var.convert_timezone: false + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + +#------------------------------- Apache2 Module ------------------------------ +#- module: apache2 + # Access logs + #access: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + + # Error logs + #error: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + +#------------------------------- Auditd Module ------------------------------- +#- module: auditd + #log: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + +#---------------------------- elasticsearch Module --------------------------- +- module: elasticsearch + # Server log + server: + enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. + #var.convert_timezone: false + + gc: + enabled: true + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + audit: + enabled: true + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. + #var.convert_timezone: false + + slowlog: + enabled: true + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. + #var.convert_timezone: false + + deprecation: + enabled: true + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. + #var.convert_timezone: false + + +#------------------------------- haproxy Module ------------------------------ +- module: haproxy + # All logs + log: + enabled: true + + # Set which input to use between syslog (default) or file. + #var.input: + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + +#------------------------------- Icinga Module ------------------------------- +#- module: icinga + # Main logs + #main: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + + # Debug logs + #debug: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + + # Startup logs + #startup: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + +#--------------------------------- IIS Module -------------------------------- +#- module: iis + # Access logs + #access: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + + # Error logs + #error: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + +#-------------------------------- Kafka Module ------------------------------- +- module: kafka + # All logs + log: + enabled: true + + # Set custom paths for Kafka. If left empty, + # Filebeat will look under /opt. + #var.kafka_home: + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. + #var.convert_timezone: false + +#------------------------------- kibana Module ------------------------------- +- module: kibana + # All logs + log: + enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + +#------------------------------ logstash Module ------------------------------ +#- module: logstash + # logs + #log: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + # var.paths: + + # Slow logs + #slowlog: + #enabled: true + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + +#------------------------------- mongodb Module ------------------------------ +#- module: mongodb + # Logs + #log: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + +#-------------------------------- MySQL Module ------------------------------- +#- module: mysql + # Error logs + #error: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + + # Slow logs + #slowlog: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + +#-------------------------------- Nginx Module ------------------------------- +#- module: nginx + # Access logs + #access: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + + # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. + #var.convert_timezone: false + + # Error logs + #error: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + + # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. + #var.convert_timezone: false + +#------------------------------- Osquery Module ------------------------------ +- module: osquery + result: + enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # If true, all fields created by this module are prefixed with + # `osquery.result`. Set to false to copy the fields in the root + # of the document. The default is true. + #var.use_namespace: true + +#----------------------------- PostgreSQL Module ----------------------------- +#- module: postgresql + # Logs + #log: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + +#-------------------------------- Redis Module ------------------------------- +#- module: redis + # Main logs + #log: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: ["/var/log/redis/redis-server.log*"] + + # Slow logs, retrieved via the Redis API (SLOWLOG) + #slowlog: + #enabled: true + + # The Redis hosts to connect to. + #var.hosts: ["localhost:6379"] + + # Optional, the password to use when connecting to Redis. + #var.password: + +#------------------------------- Traefik Module ------------------------------ +#- module: traefik + # Access logs + #access: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + + +#=========================== Filebeat inputs ============================= + +# List of inputs to fetch data. +filebeat.inputs: +# Each - is an input. Most options can be set at the input level, so +# you can use different inputs for various configurations. +# Below are the input specific configurations. + +# Type of the files. Based on this the way the file is read is decided. +# The different types cannot be mixed in one input +# +# Possible options are: +# * log: Reads every line of the log file (default) +# * stdin: Reads the standard in + +#------------------------------ Log input -------------------------------- +- type: log + enabled: false + paths: + - /var/log/*.log +- type: log + enabled: true + paths: + - /var/log/grafana/grafana.log + include_lines: ['lvl=info'] + fields: + app: grafana + level: info +- type: log + enabled: true + paths: + - /var/log/grafana/grafana.log + include_lines: ['lvl=eror'] + fields: + app: grafana + level: error +- type: log + enabled: true + paths: + - /var/log/grafana/grafana.log + include_lines: ['lvl=warn'] + fields: + app: grafana + level: warning +- type: log + enabled: true + paths: + - /var/log/grafana/grafana.log + include_lines: ['lvl=dbug'] + fields: + app: grafana + level: debug + +#- type: log + + # Change to true to enable this input configuration. + #enabled: false + + # Paths that should be crawled and fetched. Glob based paths. + # To fetch all ".log" files from a specific level of subdirectories + # /var/log/*/*.log can be used. + # For each file found under this path, a harvester is started. + # Make sure not file is defined twice as this can lead to unexpected behaviour. + #paths: + #- /var/log/*.log + #- c:\programdata\elasticsearch\logs\* + + # Configure the file encoding for reading files with international characters + # following the W3C recommendation for HTML5 (http://www.w3.org/TR/encoding). + # Some sample encodings: + # plain, utf-8, utf-16be-bom, utf-16be, utf-16le, big5, gb18030, gbk, + # hz-gb-2312, euc-kr, euc-jp, iso-2022-jp, shift-jis, ... + #encoding: plain + + + # Exclude lines. A list of regular expressions to match. It drops the lines that are + # matching any regular expression from the list. The include_lines is called before + # exclude_lines. By default, no lines are dropped. + #exclude_lines: ['^DBG'] + + # Include lines. A list of regular expressions to match. It exports the lines that are + # matching any regular expression from the list. The include_lines is called before + # exclude_lines. By default, all the lines are exported. + #include_lines: ['^ERR', '^WARN'] + + # Exclude files. A list of regular expressions to match. Filebeat drops the files that + # are matching any regular expression from the list. By default, no files are dropped. + #exclude_files: ['.gz$'] + + # Optional additional fields. These fields can be freely picked + # to add additional information to the crawled log files for filtering + #fields: + # level: debug + # review: 1 + + # Set to true to store the additional fields as top level fields instead + # of under the "fields" sub-dictionary. In case of name conflicts with the + # fields added by Filebeat itself, the custom fields overwrite the default + # fields. + #fields_under_root: false + + # Ignore files which were modified more then the defined timespan in the past. + # ignore_older is disabled by default, so no files are ignored by setting it to 0. + # Time strings like 2h (2 hours), 5m (5 minutes) can be used. + #ignore_older: 0 + + # How often the input checks for new files in the paths that are specified + # for harvesting. Specify 1s to scan the directory as frequently as possible + # without causing Filebeat to scan too frequently. Default: 10s. + #scan_frequency: 10s + + # Defines the buffer size every harvester uses when fetching the file + #harvester_buffer_size: 16384 + + # Maximum number of bytes a single log event can have + # All bytes after max_bytes are discarded and not sent. The default is 10MB. + # This is especially useful for multiline log messages which can get large. + #max_bytes: 10485760 + + ### Recursive glob configuration + + # Expand "**" patterns into regular glob patterns. + #recursive_glob.enabled: true + + ### JSON configuration + + # Decode JSON options. Enable this if your logs are structured in JSON. + # JSON key on which to apply the line filtering and multiline settings. This key + # must be top level and its value must be string, otherwise it is ignored. If + # no text key is defined, the line filtering and multiline features cannot be used. + #json.message_key: + + # By default, the decoded JSON is placed under a "json" key in the output document. + # If you enable this setting, the keys are copied top level in the output document. + #json.keys_under_root: false + + # If keys_under_root and this setting are enabled, then the values from the decoded + # JSON object overwrite the fields that Filebeat normally adds (type, source, offset, etc.) + # in case of conflicts. + #json.overwrite_keys: false + + # If this setting is enabled, Filebeat adds a "error.message" and "error.key: json" key in case of JSON + # unmarshaling errors or when a text key is defined in the configuration but cannot + # be used. + #json.add_error_key: false + + ### Multiline options + + # Multiline can be used for log messages spanning multiple lines. This is common + # for Java Stack Traces or C-Line Continuation + + # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ + #multiline.pattern: ^\[ + + # Defines if the pattern set under pattern should be negated or not. Default is false. + #multiline.negate: false + + # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern + # that was (not) matched before or after or as long as a pattern is not matched based on negate. + # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash + #multiline.match: after + + # The maximum number of lines that are combined to one event. + # In case there are more the max_lines the additional lines are discarded. + # Default is 500 + #multiline.max_lines: 500 + + # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event + # Default is 5s. + #multiline.timeout: 5s + + # Setting tail_files to true means filebeat starts reading new files at the end + # instead of the beginning. If this is used in combination with log rotation + # this can mean that the first entries of a new file are skipped. + #tail_files: false + + # The Ingest Node pipeline ID associated with this input. If this is set, it + # overwrites the pipeline option from the Elasticsearch output. + #pipeline: + + # If symlinks is enabled, symlinks are opened and harvested. The harvester is opening the + # original for harvesting but will report the symlink name as source. + #symlinks: false + + # Backoff values define how aggressively filebeat crawls new files for updates + # The default values can be used in most cases. Backoff defines how long it is waited + # to check a file again after EOF is reached. Default is 1s which means the file + # is checked every second if new lines were added. This leads to a near real time crawling. + # Every time a new line appears, backoff is reset to the initial value. + #backoff: 1s + + # Max backoff defines what the maximum backoff time is. After having backed off multiple times + # from checking the files, the waiting time will never exceed max_backoff independent of the + # backoff factor. Having it set to 10s means in the worst case a new line can be added to a log + # file after having backed off multiple times, it takes a maximum of 10s to read the new line + #max_backoff: 10s + + # The backoff factor defines how fast the algorithm backs off. The bigger the backoff factor, + # the faster the max_backoff value is reached. If this value is set to 1, no backoff will happen. + # The backoff value will be multiplied each time with the backoff_factor until max_backoff is reached + #backoff_factor: 2 + + # Max number of harvesters that are started in parallel. + # Default is 0 which means unlimited + #harvester_limit: 0 + + ### Harvester closing options + + # Close inactive closes the file handler after the predefined period. + # The period starts when the last line of the file was, not the file ModTime. + # Time strings like 2h (2 hours), 5m (5 minutes) can be used. + #close_inactive: 5m + + # Close renamed closes a file handler when the file is renamed or rotated. + # Note: Potential data loss. Make sure to read and understand the docs for this option. + #close_renamed: false + + # When enabling this option, a file handler is closed immediately in case a file can't be found + # any more. In case the file shows up again later, harvesting will continue at the last known position + # after scan_frequency. + #close_removed: true + + # Closes the file handler as soon as the harvesters reaches the end of the file. + # By default this option is disabled. + # Note: Potential data loss. Make sure to read and understand the docs for this option. + #close_eof: false + + ### State options + + # Files for the modification data is older then clean_inactive the state from the registry is removed + # By default this is disabled. + #clean_inactive: 0 + + # Removes the state for file which cannot be found on disk anymore immediately + #clean_removed: true + + # Close timeout closes the harvester after the predefined time. + # This is independent if the harvester did finish reading the file or not. + # By default this option is disabled. + # Note: Potential data loss. Make sure to read and understand the docs for this option. + #close_timeout: 0 + + # Defines if inputs is enabled + #enabled: true + +#----------------------------- Stdin input ------------------------------- +# Configuration to use stdin input +#- type: stdin + +#------------------------- Redis slowlog input --------------------------- +# Experimental: Config options for the redis slow log input +#- type: redis + #enabled: false + + # List of hosts to pool to retrieve the slow log information. + #hosts: ["localhost:6379"] + + # How often the input checks for redis slow log. + #scan_frequency: 10s + + # Timeout after which time the input should return an error + #timeout: 1s + + # Network type to be used for redis connection. Default: tcp + #network: tcp + + # Max number of concurrent connections. Default: 10 + #maxconn: 10 + + # Redis AUTH password. Empty by default. + #password: foobared + +#------------------------------ Udp input -------------------------------- +# Experimental: Config options for the udp input +#- type: udp + #enabled: false + + # Maximum size of the message received over UDP + #max_message_size: 10KiB + +#------------------------------ TCP input -------------------------------- +# Experimental: Config options for the TCP input +#- type: tcp + #enabled: false + + # The host and port to receive the new event + #host: "localhost:9000" + + # Character used to split new message + #line_delimiter: "\n" + + # Maximum size in bytes of the message received over TCP + #max_message_size: 20MiB + + # The number of seconds of inactivity before a remote connection is closed. + #timeout: 300s + + # Use SSL settings for TCP. + #ssl.enabled: true + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # SSL configuration. By default is off. + # List of root certificates for client verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL server authentication. + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Server Certificate Key, + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections. + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites. + #ssl.curve_types: [] + + # Configure what types of client authentication are supported. Valid options + # are `none`, `optional`, and `required`. Default is required. + #ssl.client_authentication: "required" + +#------------------------------ Syslog input -------------------------------- +# Experimental: Config options for the Syslog input +# Accept RFC3164 formatted syslog event via UDP. +#- type: syslog + #enabled: false + #protocol.udp: + # The host and port to receive the new event + #host: "localhost:9000" + + # Maximum size of the message received over UDP + #max_message_size: 10KiB + +# Accept RFC3164 formatted syslog event via TCP. +#- type: syslog + #enabled: false + + #protocol.tcp: + # The host and port to receive the new event + #host: "localhost:9000" + + # Character used to split new message + #line_delimiter: "\n" + + # Maximum size in bytes of the message received over TCP + #max_message_size: 20MiB + + # The number of seconds of inactivity before a remote connection is closed. + #timeout: 300s + + # Use SSL settings for TCP. + #ssl.enabled: true + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # SSL configuration. By default is off. + # List of root certificates for client verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL server authentication. + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Server Certificate Key, + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections. + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites. + #ssl.curve_types: [] + + # Configure what types of client authentication are supported. Valid options + # are `none`, `optional`, and `required`. Default is required. + #ssl.client_authentication: "required" + +#------------------------------ Docker input -------------------------------- +# Experimental: Docker input reads and parses `json-file` logs from Docker +#- type: docker + #enabled: false + + # Combine partial lines flagged by `json-file` format + #combine_partials: true + + # Use this to read from all containers, replace * with a container id to read from one: + #containers: + # stream: all # can be all, stdout or stderr + # ids: + # - '*' + +#========================== Filebeat autodiscover ============================== + +# Autodiscover allows you to detect changes in the system and spawn new modules +# or inputs as they happen. + +#filebeat.autodiscover: + # List of enabled autodiscover providers +# providers: +# - type: docker +# templates: +# - condition: +# equals.docker.container.image: busybox +# config: +# - type: log +# paths: +# - /var/lib/docker/containers/${data.docker.container.id}/*.log + +#========================= Filebeat global options ============================ + +# Name of the registry file. If a relative path is used, it is considered relative to the +# data path. +#filebeat.registry_file: ${path.data}/registry + +# The permissions mask to apply on registry file. The default value is 0600. +# Must be a valid Unix-style file permissions mask expressed in octal notation. +# This option is not supported on Windows. +#filebeat.registry_file_permissions: 0600 + +# The timeout value that controls when registry entries are written to disk +# (flushed). When an unwritten update exceeds this value, it triggers a write to +# disk. When registry_flush is set to 0s, the registry is written to disk after +# each batch of events has been published successfully. The default value is 0s. +#filebeat.registry_flush: 0s + +# By default Ingest pipelines are not updated if a pipeline with the same ID +# already exists. If this option is enabled Filebeat overwrites pipelines +# every time a new Elasticsearch connection is established. +#filebeat.overwrite_pipelines: false + +# How long filebeat waits on shutdown for the publisher to finish. +# Default is 0, not waiting. +#filebeat.shutdown_timeout: 0 + +# Enable filebeat config reloading +#filebeat.config: + #inputs: + #enabled: false + #path: inputs.d/*.yml + #reload.enabled: true + #reload.period: 10s + #modules: + #enabled: false + #path: modules.d/*.yml + #reload.enabled: true + #reload.period: 10s + +#================================ General ====================================== + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +# If this options is not defined, the hostname is used. +#name: + +# The tags of the shipper are included in their own field with each +# transaction published. Tags make it easy to group servers by different +# logical properties. +#tags: ["service-X", "web-tier"] + +# Optional fields that you can specify to add additional information to the +# output. Fields can be scalar values, arrays, dictionaries, or any nested +# combination of these. +#fields: +# env: staging + +# If this option is set to true, the custom fields are stored as top-level +# fields in the output document instead of being grouped under a fields +# sub-dictionary. Default is false. +#fields_under_root: false + +# Internal queue configuration for buffering events to be published. +#queue: + # Queue type by name (default 'mem') + # The memory queue will present all available events (up to the outputs + # bulk_max_size) to the output, the moment the output is ready to server + # another batch of events. + #mem: + # Max number of events the queue can buffer. + #events: 4096 + + # Hints the minimum number of events stored in the queue, + # before providing a batch of events to the outputs. + # The default value is set to 2048. + # A value of 0 ensures events are immediately available + # to be sent to the outputs. + #flush.min_events: 2048 + + # Maximum duration after which events are available to the outputs, + # if the number of events stored in the queue is < min_flush_events. + #flush.timeout: 1s + + # The spool queue will store events in a local spool file, before + # forwarding the events to the outputs. + # + # Beta: spooling to disk is currently a beta feature. Use with care. + # + # The spool file is a circular buffer, which blocks once the file/buffer is full. + # Events are put into a write buffer and flushed once the write buffer + # is full or the flush_timeout is triggered. + # Once ACKed by the output, events are removed immediately from the queue, + # making space for new events to be persisted. + #spool: + # The file namespace configures the file path and the file creation settings. + # Once the file exists, the `size`, `page_size` and `prealloc` settings + # will have no more effect. + #file: + # Location of spool file. The default value is ${path.data}/spool.dat. + #path: "${path.data}/spool.dat" + + # Configure file permissions if file is created. The default value is 0600. + #permissions: 0600 + + # File size hint. The spool blocks, once this limit is reached. The default value is 100 MiB. + #size: 100MiB + + # The files page size. A file is split into multiple pages of the same size. The default value is 4KiB. + #page_size: 4KiB + + # If prealloc is set, the required space for the file is reserved using + # truncate. The default value is true. + #prealloc: true + + # Spool writer settings + # Events are serialized into a write buffer. The write buffer is flushed if: + # - The buffer limit has been reached. + # - The configured limit of buffered events is reached. + # - The flush timeout is triggered. + #write: + # Sets the write buffer size. + #buffer_size: 1MiB + + # Maximum duration after which events are flushed if the write buffer + # is not full yet. The default value is 1s. + #flush.timeout: 1s + + # Number of maximum buffered events. The write buffer is flushed once the + # limit is reached. + #flush.events: 16384 + + # Configure the on-disk event encoding. The encoding can be changed + # between restarts. + # Valid encodings are: json, ubjson, and cbor. + #codec: cbor + #read: + # Reader flush timeout, waiting for more events to become available, so + # to fill a complete batch as required by the outputs. + # If flush_timeout is 0, all available events are forwarded to the + # outputs immediately. + # The default value is 0s. + #flush.timeout: 0s + +# Sets the maximum number of CPUs that can be executing simultaneously. The +# default is the number of logical CPUs available in the system. +#max_procs: + +#================================ Processors =================================== + +# Processors are used to reduce the number of fields in the exported event or to +# enhance the event with external metadata. This section defines a list of +# processors that are applied one by one and the first one receives the initial +# event: +# +# event -> filter1 -> event1 -> filter2 ->event2 ... +# +# The supported processors are drop_fields, drop_event, include_fields, +# decode_json_fields, and add_cloud_metadata. +# +# For example, you can use the following processors to keep the fields that +# contain CPU load percentages, but remove the fields that contain CPU ticks +# values: +# +#processors: +#- include_fields: +# fields: ["cpu"] +#- drop_fields: +# fields: ["cpu.user", "cpu.system"] +# +# The following example drops the events that have the HTTP response code 200: +# +#processors: +#- drop_event: +# when: +# equals: +# http.code: 200 +# +# The following example renames the field a to b: +# +#processors: +#- rename: +# fields: +# - from: "a" +# to: "b" +# +# The following example tokenizes the string into fields: +# +#processors: +#- dissect: +# tokenizer: "%{key1} - %{key2}" +# field: "message" +# target_prefix: "dissect" +# +# The following example enriches each event with metadata from the cloud +# provider about the host machine. It works on EC2, GCE, DigitalOcean, +# Tencent Cloud, and Alibaba Cloud. +# +#processors: +#- add_cloud_metadata: ~ +# +# The following example enriches each event with the machine's local time zone +# offset from UTC. +# +#processors: +#- add_locale: +# format: offset +# +# The following example enriches each event with docker metadata, it matches +# given fields to an existing container id and adds info from that container: +# +#processors: +#- add_docker_metadata: +# host: "unix:///var/run/docker.sock" +# match_fields: ["system.process.cgroup.id"] +# match_pids: ["process.pid", "process.ppid"] +# match_source: true +# match_source_index: 4 +# match_short_id: false +# cleanup_timeout: 60 +# labels.dedot: false +# # To connect to Docker over TLS you must specify a client and CA certificate. +# #ssl: +# # certificate_authority: "/etc/pki/root/ca.pem" +# # certificate: "/etc/pki/client/cert.pem" +# # key: "/etc/pki/client/cert.key" +# +# The following example enriches each event with docker metadata, it matches +# container id from log path available in `source` field (by default it expects +# it to be /var/lib/docker/containers/*/*.log). +# +#processors: +#- add_docker_metadata: ~ +# +# The following example enriches each event with host metadata. +# +#processors: +#- add_host_metadata: +# netinfo.enabled: false +# +# The following example enriches each event with process metadata using +# process IDs included in the event. +# +#processors: +#- add_process_metadata: +# match_pids: ["system.process.ppid"] +# target: system.process.parent +# +# The following example decodes fields containing JSON strings +# and replaces the strings with valid JSON objects. +# +#processors: +#- decode_json_fields: +# fields: ["field1", "field2", ...] +# process_array: false +# max_depth: 1 +# target: "" +# overwrite_keys: false + +#============================= Elastic Cloud ================================== + +# These settings simplify using filebeat with the Elastic Cloud (https://cloud.elastic.co/). + +# The cloud.id setting overwrites the `output.elasticsearch.hosts` and +# `setup.kibana.host` options. +# You can find the `cloud.id` in the Elastic Cloud web UI. +#cloud.id: + +# The cloud.auth setting overwrites the `output.elasticsearch.username` and +# `output.elasticsearch.password` settings. The format is `<user>:<pass>`. +#cloud.auth: + +#================================ Outputs ====================================== + +# Configure what output to use when sending the data collected by the beat. + +#-------------------------- Elasticsearch output ------------------------------- +output.elasticsearch: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (http and 9200) + # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 + hosts: ["elasticsearch77:9200"] + + # Enabled ilm (beta) to use index lifecycle management instead daily indices. + #ilm.enabled: false + #ilm.rollover_alias: "filebeat" + #ilm.pattern: "{now/d}-000001" + + # Set gzip compression level. + #compression_level: 0 + + # Configure escaping HTML symbols in strings. + #escape_html: true + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" + + # Dictionary of HTTP parameters to pass within the URL with index operations. + #parameters: + #param1: value1 + #param2: value2 + + # Number of workers per Elasticsearch host. + #worker: 1 + + # Optional index name. The default is "filebeat" plus date + # and generates [filebeat-]YYYY.MM.DD keys. + # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. + index: "filebeat-%{+yyyy.MM.dd}" + + # Optional ingest node pipeline. By default no pipeline will be used. + #pipeline: "" + + # Optional HTTP path + #path: "/elasticsearch" + + # Custom HTTP headers to add to each request + #headers: + # X-My-Header: Contents of the header + + # Proxy server URL + #proxy_url: http://proxy:3128 + + # The number of times a particular Elasticsearch index operation is attempted. If + # the indexing operation doesn't succeed after this many retries, the events are + # dropped. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Elasticsearch bulk API index request. + # The default is 50. + #bulk_max_size: 50 + + # The number of seconds to wait before trying to reconnect to Elasticsearch + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Elasticsearch after a network error. The default is 60s. + #backoff.max: 60s + + # Configure HTTP request timeout before failing a request to Elasticsearch. + #timeout: 90 + + # Use SSL settings for HTTPS. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL-based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions from 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client certificate key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the certificate key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE-based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + +#----------------------------- Logstash output --------------------------------- +#output.logstash: + # Boolean flag to enable or disable the output module. + #enabled: true + + # The Logstash hosts + #hosts: ["localhost:5044"] + + # Number of workers per Logstash host. + #worker: 1 + + # Set gzip compression level. + #compression_level: 3 + + # Configure escaping HTML symbols in strings. + #escape_html: true + + # Optional maximum time to live for a connection to Logstash, after which the + # connection will be re-established. A value of `0s` (the default) will + # disable this feature. + # + # Not yet supported for async connections (i.e. with the "pipelining" option set) + #ttl: 30s + + # Optionally load-balance events between Logstash hosts. Default is false. + #loadbalance: false + + # Number of batches to be sent asynchronously to Logstash while processing + # new batches. + #pipelining: 2 + + # If enabled only a subset of events in a batch of events is transferred per + # transaction. The number of events to be sent increases up to `bulk_max_size` + # if no error is encountered. + #slow_start: false + + # The number of seconds to wait before trying to reconnect to Logstash + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Logstash after a network error. The default is 60s. + #backoff.max: 60s + + # Optional index name. The default index name is set to filebeat + # in all lowercase. + #index: 'filebeat' + + # SOCKS5 proxy server URL + #proxy_url: socks5://user:password@socks5-server:2233 + + # Resolve names locally when using a proxy server. Defaults to false. + #proxy_use_local_resolver: false + + # Enable SSL support. SSL is automatically enabled if any SSL setting is set. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions from 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client certificate key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE-based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat and Winlogbeat, ignore the max_retries setting + # and retry until all events are published. Set max_retries to a value less + # than 0 to retry until all events are published. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Logstash request. The + # default is 2048. + #bulk_max_size: 2048 + + # The number of seconds to wait for responses from the Logstash server before + # timing out. The default is 30s. + #timeout: 30s + +#------------------------------- Kafka output ---------------------------------- +#output.kafka: + # Boolean flag to enable or disable the output module. + #enabled: true + + # The list of Kafka broker addresses from which to fetch the cluster metadata. + # The cluster metadata contain the actual Kafka brokers events are published + # to. + #hosts: ["localhost:9092"] + + # The Kafka topic used for produced events. The setting can be a format string + # using any event field. To set the topic from document type use `%{[type]}`. + #topic: beats + + # The Kafka event key setting. Use format string to create a unique event key. + # By default no event key will be generated. + #key: '' + + # The Kafka event partitioning strategy. Default hashing strategy is `hash` + # using the `output.kafka.key` setting or randomly distributes events if + # `output.kafka.key` is not configured. + #partition.hash: + # If enabled, events will only be published to partitions with reachable + # leaders. Default is false. + #reachable_only: false + + # Configure alternative event field names used to compute the hash value. + # If empty `output.kafka.key` setting will be used. + # Default value is empty list. + #hash: [] + + # Authentication details. Password is required if username is set. + #username: '' + #password: '' + + # Kafka version filebeat is assumed to run against. Defaults to the "1.0.0". + #version: '1.0.0' + + # Configure JSON encoding + #codec.json: + # Pretty-print JSON event + #pretty: false + + # Configure escaping HTML symbols in strings. + #escape_html: true + + # Metadata update configuration. Metadata contains leader information + # used to decide which broker to use when publishing. + #metadata: + # Max metadata request retry attempts when cluster is in middle of leader + # election. Defaults to 3 retries. + #retry.max: 3 + + # Wait time between retries during leader elections. Default is 250ms. + #retry.backoff: 250ms + + # Refresh metadata interval. Defaults to every 10 minutes. + #refresh_frequency: 10m + + # The number of concurrent load-balanced Kafka output workers. + #worker: 1 + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, events are typically dropped. + # Some Beats, such as Filebeat, ignore the max_retries setting and retry until + # all events are published. Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Kafka request. The default + # is 2048. + #bulk_max_size: 2048 + + # The number of seconds to wait for responses from the Kafka brokers before + # timing out. The default is 30s. + #timeout: 30s + + # The maximum duration a broker will wait for number of required ACKs. The + # default is 10s. + #broker_timeout: 10s + + # The number of messages buffered for each Kafka broker. The default is 256. + #channel_buffer_size: 256 + + # The keep-alive period for an active network connection. If 0s, keep-alives + # are disabled. The default is 0 seconds. + #keep_alive: 0 + + # Sets the output compression codec. Must be one of none, snappy and gzip. The + # default is gzip. + #compression: gzip + + # Set the compression level. Currently only gzip provides a compression level + # between 0 and 9. The default value is chosen by the compression algorithm. + #compression_level: 4 + + # The maximum permitted size of JSON-encoded messages. Bigger messages will be + # dropped. The default value is 1000000 (bytes). This value should be equal to + # or less than the broker's message.max.bytes. + #max_message_bytes: 1000000 + + # The ACK reliability level required from broker. 0=no response, 1=wait for + # local commit, -1=wait for all replicas to commit. The default is 1. Note: + # If set to 0, no ACKs are returned by Kafka. Messages might be lost silently + # on error. + #required_acks: 1 + + # The configurable ClientID used for logging, debugging, and auditing + # purposes. The default is "beats". + #client_id: beats + + # Enable SSL support. SSL is automatically enabled if any SSL setting is set. + #ssl.enabled: true + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions from 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE-based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#------------------------------- Redis output ---------------------------------- +#output.redis: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Configure JSON encoding + #codec.json: + # Pretty print json event + #pretty: false + + # Configure escaping HTML symbols in strings. + #escape_html: true + + # The list of Redis servers to connect to. If load-balancing is enabled, the + # events are distributed to the servers in the list. If one server becomes + # unreachable, the events are distributed to the reachable servers only. + #hosts: ["localhost:6379"] + + # The name of the Redis list or channel the events are published to. The + # default is filebeat. + #key: filebeat + + # The password to authenticate to Redis with. The default is no authentication. + #password: + + # The Redis database number where the events are published. The default is 0. + #db: 0 + + # The Redis data type to use for publishing events. If the data type is list, + # the Redis RPUSH command is used. If the data type is channel, the Redis + # PUBLISH command is used. The default value is list. + #datatype: list + + # The number of workers to use for each host configured to publish events to + # Redis. Use this setting along with the loadbalance option. For example, if + # you have 2 hosts and 3 workers, in total 6 workers are started (3 for each + # host). + #worker: 1 + + # If set to true and multiple hosts or workers are configured, the output + # plugin load balances published events onto all Redis hosts. If set to false, + # the output plugin sends all events to only one host (determined at random) + # and will switch to another host if the currently selected one becomes + # unreachable. The default value is true. + #loadbalance: true + + # The Redis connection timeout in seconds. The default is 5 seconds. + #timeout: 5s + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat, ignore the max_retries setting and retry until + # all events are published. Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The number of seconds to wait before trying to reconnect to Redis + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Redis after a network error. The default is 60s. + #backoff.max: 60s + + # The maximum number of events to bulk in a single Redis request or pipeline. + # The default is 2048. + #bulk_max_size: 2048 + + # The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The + # value must be a URL with a scheme of socks5://. + #proxy_url: + + # This option determines whether Redis hostnames are resolved locally when + # using a proxy. The default value is false, which means that name resolution + # occurs on the proxy server. + #proxy_use_local_resolver: false + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#------------------------------- File output ----------------------------------- +#output.file: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Configure JSON encoding + #codec.json: + # Pretty-print JSON event + #pretty: false + + # Configure escaping HTML symbols in strings. + #escape_html: true + + # Path to the directory where to save the generated files. The option is + # mandatory. + #path: "/tmp/filebeat" + + # Name of the generated files. The default is `filebeat` and it generates + # files: `filebeat`, `filebeat.1`, `filebeat.2`, etc. + #filename: filebeat + + # Maximum size in kilobytes of each file. When this size is reached, and on + # every filebeat restart, the files are rotated. The default value is 10240 + # kB. + #rotate_every_kb: 10000 + + # Maximum number of files under path. When this number of files is reached, + # the oldest file is deleted and the rest are shifted from last to first. The + # default is 7 files. + #number_of_files: 7 + + # Permissions to use for file creation. The default is 0600. + #permissions: 0600 + + +#----------------------------- Console output --------------------------------- +#output.console: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Configure JSON encoding + #codec.json: + # Pretty-print JSON event + #pretty: false + + # Configure escaping HTML symbols in strings. + #escape_html: true + +#================================= Paths ====================================== + +# The home path for the filebeat installation. This is the default base path +# for all other path settings and for miscellaneous files that come with the +# distribution (for example, the sample dashboards). +# If not set by a CLI flag or in the configuration file, the default for the +# home path is the location of the binary. +#path.home: + +# The configuration path for the filebeat installation. This is the default +# base path for configuration files, including the main YAML configuration file +# and the Elasticsearch template file. If not set by a CLI flag or in the +# configuration file, the default for the configuration path is the home path. +#path.config: ${path.home} + +# The data path for the filebeat installation. This is the default base path +# for all the files in which filebeat needs to store its data. If not set by a +# CLI flag or in the configuration file, the default for the data path is a data +# subdirectory inside the home path. +#path.data: ${path.home}/data + +# The logs path for a filebeat installation. This is the default location for +# the Beat's log files. If not set by a CLI flag or in the configuration file, +# the default for the logs path is a logs subdirectory inside the home path. +#path.logs: ${path.home}/logs + +#================================ Keystore ========================================== +# Location of the Keystore containing the keys and their sensitive values. +#keystore.path: "${path.config}/beats.keystore" + +#============================== Dashboards ===================================== +# These settings control loading the sample dashboards to the Kibana index. Loading +# the dashboards are disabled by default and can be enabled either by setting the +# options here, or by using the `-setup` CLI flag or the `setup` command. +#setup.dashboards.enabled: false + +# The directory from where to read the dashboards. The default is the `kibana` +# folder in the home path. +#setup.dashboards.directory: ${path.home}/kibana + +# The URL from where to download the dashboards archive. It is used instead of +# the directory if it has a value. +#setup.dashboards.url: + +# The file archive (zip file) from where to read the dashboards. It is used instead +# of the directory when it has a value. +#setup.dashboards.file: + +# In case the archive contains the dashboards from multiple Beats, this lets you +# select which one to load. You can load all the dashboards in the archive by +# setting this to the empty string. +#setup.dashboards.beat: filebeat + +# The name of the Kibana index to use for setting the configuration. Default is ".kibana" +#setup.dashboards.kibana_index: .kibana + +# The Elasticsearch index name. This overwrites the index name defined in the +# dashboards and index pattern. Example: testbeat-* +#setup.dashboards.index: + +# Always use the Kibana API for loading the dashboards instead of autodetecting +# how to install the dashboards by first querying Elasticsearch. +#setup.dashboards.always_kibana: false + +# If true and Kibana is not reachable at the time when dashboards are loaded, +# it will retry to reconnect to Kibana instead of exiting with an error. +#setup.dashboards.retry.enabled: false + +# Duration interval between Kibana connection retries. +#setup.dashboards.retry.interval: 1s + +# Maximum number of retries before exiting with an error, 0 for unlimited retrying. +#setup.dashboards.retry.maximum: 0 + + +#============================== Template ===================================== + +# A template is used to set the mapping in Elasticsearch +# By default template loading is enabled and the template is loaded. +# These settings can be adjusted to load your own template or overwrite existing ones. + +# Set to false to disable template loading. +#setup.template.enabled: true + +# Template name. By default the template name is "filebeat-%{[beat.version]}" +# The template name and pattern has to be set in case the Elasticsearch index pattern is modified. +setup.template.name: "filebeat" + +# Template pattern. By default the template pattern is "-%{[beat.version]}-*" to apply to the default index settings. +# The first part is the version of the beat and then -* is used to match all daily indices. +# The template name and pattern has to be set in case the Elasticsearch index pattern is modified. +setup.template.pattern: "filebeat-*" + +# Path to fields.yml file to generate the template +#setup.template.fields: "${path.config}/fields.yml" + +# A list of fields to be added to the template and Kibana index pattern. Also +# specify setup.template.overwrite: true to overwrite the existing template. +# This setting is experimental. +#setup.template.append_fields: +#- name: field_name +# type: field_type + +# Enable JSON template loading. If this is enabled, the fields.yml is ignored. +#setup.template.json.enabled: false + +# Path to the JSON template file +#setup.template.json.path: "${path.config}/template.json" + +# Name under which the template is stored in Elasticsearch +#setup.template.json.name: "" + +# Overwrite existing template +#setup.template.overwrite: false + +# Elasticsearch template settings +setup.template.settings: + + # A dictionary of settings to place into the settings.index dictionary + # of the Elasticsearch template. For more details, please check + # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html + #index: + #number_of_shards: 1 + #codec: best_compression + #number_of_routing_shards: 30 + + # A dictionary of settings for the _source field. For more details, please check + # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html + #_source: + #enabled: false + +#============================== Kibana ===================================== + +# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API. +# This requires a Kibana endpoint configuration. +setup.kibana: + + # Kibana Host + # Scheme and port can be left out and will be set to the default (http and 5601) + # In case you specify and additional path, the scheme is required: http://localhost:5601/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601 + #host: "localhost:5601" + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" + + # Optional HTTP path + #path: "" + + # Use SSL settings for HTTPS. Default is true. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions from 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # SSL configuration. The default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client certificate key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the certificate key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE-based cipher suites + #ssl.curve_types: [] + + + +#================================ Logging ====================================== +# There are four options for the log output: file, stderr, syslog, eventlog +# The file output is the default. + +# Sets log level. The default log level is info. +# Available log levels are: error, warning, info, debug +#logging.level: info + +# Enable debug output for selected components. To enable all selectors use ["*"] +# Other available selectors are "beat", "publish", "service" +# Multiple selectors can be chained. +#logging.selectors: [ ] + +# Send all logging output to syslog. The default is false. +#logging.to_syslog: false + +# Send all logging output to Windows Event Logs. The default is false. +#logging.to_eventlog: false + +# If enabled, filebeat periodically logs its internal metrics that have changed +# in the last period. For each metric that changed, the delta from the value at +# the beginning of the period is logged. Also, the total values for +# all non-zero internal metrics are logged on shutdown. The default is true. +#logging.metrics.enabled: true + +# The period after which to log the internal metrics. The default is 30s. +#logging.metrics.period: 30s + +# Logging to rotating files. Set logging.to_files to false to disable logging to +# files. +logging.to_files: true +logging.files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/filebeat + + # The name of the files where the logs are written to. + #name: filebeat + + # Configure log file size limit. If limit is reached, log file will be + # automatically rotated + #rotateeverybytes: 10485760 # = 10MB + + # Number of rotated log files to keep. Oldest files will be deleted first. + #keepfiles: 7 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + +# Set to true to log messages in JSON format. +#logging.json: false + + +#============================== Xpack Monitoring ===================================== +# filebeat can export internal metrics to a central Elasticsearch monitoring cluster. +# This requires xpack monitoring to be enabled in Elasticsearch. +# The reporting is disabled by default. + +# Set to true to enable the monitoring reporter. +#xpack.monitoring.enabled: false + +# Uncomment to send the metrics to Elasticsearch. Most settings from the +# Elasticsearch output are accepted here as well. Any setting that is not set is +# automatically inherited from the Elasticsearch output configuration, so if you +# have the Elasticsearch output configured, you can simply uncomment the +# following line, and leave the rest commented out. +#xpack.monitoring.elasticsearch: + + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (http and 9200) + # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 + #hosts: ["localhost:9200"] + + # Set gzip compression level. + #compression_level: 0 + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "beats_system" + #password: "changeme" + + # Dictionary of HTTP parameters to pass within the URL with index operations. + #parameters: + #param1: value1 + #param2: value2 + + # Custom HTTP headers to add to each request + #headers: + # X-My-Header: Contents of the header + + # Proxy server url + #proxy_url: http://proxy:3128 + + # The number of times a particular Elasticsearch index operation is attempted. If + # the indexing operation doesn't succeed after this many retries, the events are + # dropped. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Elasticsearch bulk API index request. + # The default is 50. + #bulk_max_size: 50 + + # The number of seconds to wait before trying to reconnect to Elasticsearch + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Elasticsearch after a network error. The default is 60s. + #backoff.max: 60s + + # Configure HTTP request timeout before failing an request to Elasticsearch. + #timeout: 90 + + # Use SSL settings for HTTPS. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions from 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # SSL configuration. The default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client certificate key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the certificate key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE-based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + #metrics.period: 10s + #state.period: 1m + +#================================ HTTP Endpoint ====================================== +# Each beat can expose internal metrics through a HTTP endpoint. For security +# reasons the endpoint is disabled by default. This feature is currently experimental. +# Stats can be access through http://localhost:5066/stats . For pretty JSON output +# append ?pretty to the URL. + +# Defines if the HTTP endpoint is enabled. +#http.enabled: false + +# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost. +#http.host: localhost + +# Port on which the HTTP endpoint will bind. Default is 5066. +#http.port: 5066 + +#============================= Process Security ================================ + +# Enable or disable seccomp system call filtering on Linux. Default is enabled. +#seccomp.enabled: true diff --git a/devenv/docker/blocks/elastic77/metricbeat.yml b/devenv/docker/blocks/elastic77/metricbeat.yml new file mode 100644 index 00000000000..0eaa0ff42f8 --- /dev/null +++ b/devenv/docker/blocks/elastic77/metricbeat.yml @@ -0,0 +1,38 @@ +metricbeat.config: + modules: + path: ${path.config}/modules.d/*.yml + # Reload module configs as they change: + reload.enabled: false + +metricbeat.autodiscover: + providers: + - type: docker + hints.enabled: true + +metricbeat.modules: +- module: docker + metricsets: + - "container" + - "cpu" + - "diskio" + - "healthcheck" + - "info" + #- "image" + - "memory" + - "network" + hosts: ["unix:///var/run/docker.sock"] + period: 10s + enabled: true + +processors: + - add_cloud_metadata: ~ + +output.elasticsearch: + hosts: ["elasticsearch77:9200"] + index: "metricbeat-%{+yyyy.MM.dd}" + +setup.template.name: "metricbeat" +setup.template.pattern: "metricbeat-*" +setup.template.settings: + index.number_of_shards: 1 + index.number_of_replicas: 1 diff --git a/docs/sources/datasources/elasticsearch.md b/docs/sources/datasources/elasticsearch.md index 87f2a1aa4ab..0f6c8030b95 100644 --- a/docs/sources/datasources/elasticsearch.md +++ b/docs/sources/datasources/elasticsearch.md @@ -57,7 +57,7 @@ a time pattern for the index name or a wildcard. ### Elasticsearch version Select the version of your Elasticsearch data source from the version selection dropdown. Different query compositions and functionalities are available in the query editor for different versions. -Available Elasticsearch versions are `2.x`, `5.x`, `5.6+`, `6.0+`, and `7.0+`. Select the option that best matches your data source version. +Available Elasticsearch versions are `2.x`, `5.x`, `5.6+`, `6.0+`, `7.0+` and `7.7+`. Select the option that best matches your data source version. Grafana assumes that you are running the lowest possible version for a specified range. This ensures that new features or breaking changes in a future Elasticsearch release will not affect your configuration. diff --git a/pkg/tsdb/elasticsearch/client/client.go b/pkg/tsdb/elasticsearch/client/client.go index d63f17eeb1f..7798e0034ca 100644 --- a/pkg/tsdb/elasticsearch/client/client.go +++ b/pkg/tsdb/elasticsearch/client/client.go @@ -53,6 +53,8 @@ func coerceVersion(v *simplejson.Json) (*semver.Version, error) { return nil, err } + // Legacy version numbers (before Grafana 8) + // valid values were 2,5,56,60,70 switch versionNumber { case 2: return semver.NewVersion("2.0.0") diff --git a/pkg/tsdb/elasticsearch/client/models.go b/pkg/tsdb/elasticsearch/client/models.go index 097a5a904b3..f8994b18c8e 100644 --- a/pkg/tsdb/elasticsearch/client/models.go +++ b/pkg/tsdb/elasticsearch/client/models.go @@ -274,12 +274,41 @@ type GeoHashGridAggregation struct { // MetricAggregation represents a metric aggregation type MetricAggregation struct { + Type string Field string Settings map[string]interface{} } // MarshalJSON returns the JSON encoding of the metric aggregation func (a *MetricAggregation) MarshalJSON() ([]byte, error) { + if a.Type == "top_metrics" { + root := map[string]interface{}{} + var rootMetrics []map[string]string + + order, hasOrder := a.Settings["order"] + orderBy, hasOrderBy := a.Settings["orderBy"] + + root["size"] = "1" + + metrics, hasMetrics := a.Settings["metrics"].([]interface{}) + if hasMetrics { + for _, v := range metrics { + metricValue := map[string]string{"field": v.(string)} + rootMetrics = append(rootMetrics, metricValue) + } + root["metrics"] = rootMetrics + } + + if hasOrderBy && hasOrder { + root["sort"] = []map[string]interface{}{ + { + orderBy.(string): order, + }, + } + } + + return json.Marshal(root) + } root := map[string]interface{}{} if a.Field != "" { diff --git a/pkg/tsdb/elasticsearch/client/search_request.go b/pkg/tsdb/elasticsearch/client/search_request.go index 2c02de113b7..1f5d71c020e 100644 --- a/pkg/tsdb/elasticsearch/client/search_request.go +++ b/pkg/tsdb/elasticsearch/client/search_request.go @@ -419,9 +419,11 @@ func (b *aggBuilderImpl) GeoHashGrid(key, field string, fn func(a *GeoHashGridAg func (b *aggBuilderImpl) Metric(key, metricType, field string, fn func(a *MetricAggregation)) AggBuilder { innerAgg := &MetricAggregation{ + Type: metricType, Field: field, Settings: make(map[string]interface{}), } + aggDef := newAggDef(key, &aggContainer{ Type: metricType, Aggregation: innerAgg, diff --git a/pkg/tsdb/elasticsearch/models.go b/pkg/tsdb/elasticsearch/models.go index 33abb6d9586..cd15c34236c 100644 --- a/pkg/tsdb/elasticsearch/models.go +++ b/pkg/tsdb/elasticsearch/models.go @@ -43,6 +43,7 @@ var metricAggType = map[string]string{ "min": "Min", "extended_stats": "Extended Stats", "percentiles": "Percentiles", + "top_metrics": "Top Metrics", "cardinality": "Unique Count", "moving_avg": "Moving Average", "moving_fn": "Moving Function", diff --git a/pkg/tsdb/elasticsearch/response_parser.go b/pkg/tsdb/elasticsearch/response_parser.go index 4c2fc6663bd..a90d29270db 100644 --- a/pkg/tsdb/elasticsearch/response_parser.go +++ b/pkg/tsdb/elasticsearch/response_parser.go @@ -18,6 +18,7 @@ const ( countType = "count" percentilesType = "percentiles" extendedStatsType = "extended_stats" + topMetricsType = "top_metrics" // Bucket types dateHistType = "date_histogram" histogramType = "histogram" @@ -225,6 +226,10 @@ func (rp *responseParser) processMetrics(esAgg *simplejson.Json, target *Query, } *series = append(*series, newSeries) } + case topMetricsType: + topMetricSeries := processTopMetrics(metric, esAgg, props) + *series = append(*series, topMetricSeries...) + case extendedStatsType: buckets := esAgg.Get("buckets").MustArray() @@ -592,3 +597,47 @@ func getErrorFromElasticResponse(response *es.SearchResponse) plugins.DataQueryR return result } + +func processTopMetricValues(stats *simplejson.Json, field string) null.Float { + for _, stat := range stats.MustArray() { + stat := stat.(map[string]interface{}) + metrics, hasMetrics := stat["metrics"] + if hasMetrics { + metrics := metrics.(map[string]interface{}) + metricValue, hasMetricValue := metrics[field] + if hasMetricValue && metricValue != nil { + return null.FloatFrom(metricValue.(float64)) + } + } + } + return null.NewFloat(0, false) +} + +func processTopMetrics(metric *MetricAgg, esAgg *simplejson.Json, props map[string]string) plugins.DataTimeSeriesSlice { + var series plugins.DataTimeSeriesSlice + metrics, hasMetrics := metric.Settings.MustMap()["metrics"].([]interface{}) + + if hasMetrics { + for _, metricField := range metrics { + newSeries := plugins.DataTimeSeries{ + Tags: make(map[string]string), + } + + for _, v := range esAgg.Get("buckets").MustArray() { + bucket := simplejson.NewFromAny(v) + stats := bucket.GetPath(metric.ID, "top") + value := processTopMetricValues(stats, metricField.(string)) + key := castToNullFloat(bucket.Get("key")) + newSeries.Points = append(newSeries.Points, plugins.DataTimePoint{value, key}) + } + + for k, v := range props { + newSeries.Tags[k] = v + } + newSeries.Tags["metric"] = "top_metrics" + newSeries.Tags["field"] = metricField.(string) + series = append(series, newSeries) + } + } + return series +} diff --git a/pkg/tsdb/elasticsearch/response_parser_test.go b/pkg/tsdb/elasticsearch/response_parser_test.go index 0ee1c089910..5ee32ff8ff7 100644 --- a/pkg/tsdb/elasticsearch/response_parser_test.go +++ b/pkg/tsdb/elasticsearch/response_parser_test.go @@ -10,6 +10,7 @@ import ( "github.com/grafana/grafana/pkg/components/simplejson" "github.com/grafana/grafana/pkg/plugins" es "github.com/grafana/grafana/pkg/tsdb/elasticsearch/client" + "github.com/stretchr/testify/assert" . "github.com/smartystreets/goconvey/convey" ) @@ -992,6 +993,80 @@ func TestResponseParser(t *testing.T) { // So(rows[0][2].(null.Float).Float64, ShouldEqual, 3000) // }) }) + + t.Run("With top_metrics", func(t *testing.T) { + targets := map[string]string{ + "A": `{ + "timeField": "@timestamp", + "metrics": [ + { + "type": "top_metrics", + "settings": { + "order": "desc", + "orderBy": "@timestamp", + "metrics": ["@value", "@anotherValue"] + }, + "id": "1" + } + ], + "bucketAggs": [{ "type": "date_histogram", "field": "@timestamp", "id": "3" }] + }`, + } + response := `{ + "responses": [{ + "aggregations": { + "3": { + "buckets": [ + { + "key": 1609459200000, + "key_as_string": "2021-01-01T00:00:00.000Z", + "1": { + "top": [ + { "sort": ["2021-01-01T00:00:00.000Z"], "metrics": { "@value": 1, "@anotherValue": 2 } } + ] + } + }, + { + "key": 1609459210000, + "key_as_string": "2021-01-01T00:00:10.000Z", + "1": { + "top": [ + { "sort": ["2021-01-01T00:00:10.000Z"], "metrics": { "@value": 1, "@anotherValue": 2 } } + ] + } + } + ] + } + } + }] + }` + rp, err := newResponseParserForTest(targets, response) + assert.Nil(t, err) + result, err := rp.getTimeSeries() + assert.Nil(t, err) + assert.Len(t, result.Results, 1) + + queryRes := result.Results["A"] + assert.NotNil(t, queryRes) + assert.Len(t, queryRes.Series, 2) + + seriesOne := queryRes.Series[0] + assert.Equal(t, seriesOne.Name, "Top Metrics @value") + assert.Len(t, seriesOne.Points, 2) + assert.Equal(t, seriesOne.Points[0][0].Float64, 1.) + assert.Equal(t, seriesOne.Points[0][1].Float64, 1609459200000.) + assert.Equal(t, seriesOne.Points[1][0].Float64, 1.) + assert.Equal(t, seriesOne.Points[1][1].Float64, 1609459210000.) + + seriesTwo := queryRes.Series[1] + assert.Equal(t, seriesTwo.Name, "Top Metrics @anotherValue") + assert.Len(t, seriesTwo.Points, 2) + + assert.Equal(t, seriesTwo.Points[0][0].Float64, 2.) + assert.Equal(t, seriesTwo.Points[0][1].Float64, 1609459200000.) + assert.Equal(t, seriesTwo.Points[1][0].Float64, 2.) + assert.Equal(t, seriesTwo.Points[1][1].Float64, 1609459210000.) + }) } func newResponseParserForTest(tsdbQueries map[string]string, responseBody string) (*responseParser, error) { diff --git a/public/app/plugins/datasource/elasticsearch/components/QueryEditor/BucketAggregationsEditor/aggregations.ts b/public/app/plugins/datasource/elasticsearch/components/QueryEditor/BucketAggregationsEditor/aggregations.ts index 36f8e592c3c..b0154b34305 100644 --- a/public/app/plugins/datasource/elasticsearch/components/QueryEditor/BucketAggregationsEditor/aggregations.ts +++ b/public/app/plugins/datasource/elasticsearch/components/QueryEditor/BucketAggregationsEditor/aggregations.ts @@ -66,3 +66,14 @@ export type BucketAggregation = DateHistogram | Histogram | Terms | Filters | Ge export const isBucketAggregationWithField = ( bucketAgg: BucketAggregation | BucketAggregationWithField ): bucketAgg is BucketAggregationWithField => bucketAggregationConfig[bucketAgg.type].requiresField; + +export const BUCKET_AGGREGATION_TYPES: BucketAggregationType[] = [ + 'date_histogram', + 'histogram', + 'terms', + 'filters', + 'geohash_grid', +]; + +export const isBucketAggregationType = (s: BucketAggregationType | string): s is BucketAggregationType => + BUCKET_AGGREGATION_TYPES.includes(s as BucketAggregationType); diff --git a/public/app/plugins/datasource/elasticsearch/components/QueryEditor/BucketAggregationsEditor/utils.ts b/public/app/plugins/datasource/elasticsearch/components/QueryEditor/BucketAggregationsEditor/utils.ts index 45541c7e63c..dcd160d6859 100644 --- a/public/app/plugins/datasource/elasticsearch/components/QueryEditor/BucketAggregationsEditor/utils.ts +++ b/public/app/plugins/datasource/elasticsearch/components/QueryEditor/BucketAggregationsEditor/utils.ts @@ -126,18 +126,22 @@ function createOrderByOptionsForPercentiles(metric: Percentiles): OrderByOption[ }); } +const INCOMPATIBLE_ORDER_BY_AGGS = ['top_metrics']; + /** * This creates all the valid order by options based on the metrics */ export const createOrderByOptionsFromMetrics = (metrics: MetricAggregation[] = []): OrderByOption[] => { - const metricOptions = metrics.flatMap((metric) => { - if (metric.type === 'extended_stats') { - return createOrderByOptionsForExtendedStats(metric); - } else if (metric.type === 'percentiles') { - return createOrderByOptionsForPercentiles(metric); - } else { - return { label: describeMetric(metric), value: metric.id }; - } - }); + const metricOptions = metrics + .filter((metric) => !INCOMPATIBLE_ORDER_BY_AGGS.includes(metric.type)) + .flatMap((metric) => { + if (metric.type === 'extended_stats') { + return createOrderByOptionsForExtendedStats(metric); + } else if (metric.type === 'percentiles') { + return createOrderByOptionsForPercentiles(metric); + } else { + return { label: describeMetric(metric), value: metric.id }; + } + }); return [...orderByOptions, ...metricOptions]; }; diff --git a/public/app/plugins/datasource/elasticsearch/components/QueryEditor/MetricAggregationsEditor/MetricEditor.test.tsx b/public/app/plugins/datasource/elasticsearch/components/QueryEditor/MetricAggregationsEditor/MetricEditor.test.tsx index 71bdbef2391..efa666958ef 100644 --- a/public/app/plugins/datasource/elasticsearch/components/QueryEditor/MetricAggregationsEditor/MetricEditor.test.tsx +++ b/public/app/plugins/datasource/elasticsearch/components/QueryEditor/MetricAggregationsEditor/MetricEditor.test.tsx @@ -1,12 +1,12 @@ import { act, fireEvent, render, screen } from '@testing-library/react'; import { ElasticsearchProvider } from '../ElasticsearchQueryContext'; import { MetricEditor } from './MetricEditor'; -import React, { PropsWithChildren } from 'react'; +import React, { ReactNode, PropsWithChildren } from 'react'; import { ElasticDatasource } from '../../../datasource'; import { getDefaultTimeRange } from '@grafana/data'; import { ElasticsearchQuery } from '../../../types'; import { Average, UniqueCount } from './aggregations'; -import { defaultBucketAgg } from '../../../query_def'; +import { defaultBucketAgg, defaultMetricAgg } from '../../../query_def'; import { from } from 'rxjs'; describe('Metric Editor', () => { @@ -82,4 +82,50 @@ describe('Metric Editor', () => { expect(await screen.findByText('No options found')).toBeInTheDocument(); expect(screen.queryByText('None')).not.toBeInTheDocument(); }); + + describe('Top Metrics Aggregation', () => { + const setupTopMetricsScreen = (esVersion: string, xpack: boolean) => { + const query: ElasticsearchQuery = { + refId: 'A', + query: '', + metrics: [defaultMetricAgg('1')], + bucketAggs: [defaultBucketAgg('2')], + }; + + const getFields: ElasticDatasource['getFields'] = jest.fn(() => from([[]])); + + const wrapper = ({ children }: { children: ReactNode }) => ( + {}} + onRunQuery={() => {}} + > + {children} + + ); + + render(, { wrapper }); + + act(() => { + fireEvent.click(screen.getByText('Count')); + }); + }; + + it('Should include top metrics aggregation when esVersion is 77 and X-Pack is enabled', () => { + setupTopMetricsScreen('7.7.0', true); + expect(screen.getByText('Top Metrics')).toBeInTheDocument(); + }); + + it('Should NOT include top metrics aggregation where esVersion is 77 and X-Pack is disabled', () => { + setupTopMetricsScreen('7.7.0', false); + expect(screen.queryByText('Top Metrics')).toBe(null); + }); + + it('Should NOT include top metrics aggregation when esVersion is 70 and X-Pack is enabled', () => { + setupTopMetricsScreen('7.0.0', true); + expect(screen.queryByText('Top Metrics')).toBe(null); + }); + }); }); diff --git a/public/app/plugins/datasource/elasticsearch/components/QueryEditor/MetricAggregationsEditor/MetricEditor.tsx b/public/app/plugins/datasource/elasticsearch/components/QueryEditor/MetricAggregationsEditor/MetricEditor.tsx index 38304ef1179..eeace2f9b09 100644 --- a/public/app/plugins/datasource/elasticsearch/components/QueryEditor/MetricAggregationsEditor/MetricEditor.tsx +++ b/public/app/plugins/datasource/elasticsearch/components/QueryEditor/MetricAggregationsEditor/MetricEditor.tsx @@ -40,7 +40,8 @@ const isBasicAggregation = (metric: MetricAggregation) => !metricAggregationConf const getTypeOptions = ( previousMetrics: MetricAggregation[], - esVersion: string + esVersion: string, + xpack = false ): Array> => { // we'll include Pipeline Aggregations only if at least one previous metric is a "Basic" one const includePipelineAggregations = previousMetrics.some(isBasicAggregation); @@ -51,6 +52,8 @@ const getTypeOptions = ( .filter(([_, { versionRange = '*' }]) => satisfies(esVersion, versionRange)) // Filtering out Pipeline Aggregations if there's no basic metric selected before .filter(([_, config]) => includePipelineAggregations || !config.isPipelineAgg) + // Filtering out X-Pack plugins if X-Pack is disabled + .filter(([_, config]) => (config.xpack ? xpack : true)) .map(([key, { label }]) => ({ label, value: key as MetricAggregationType, @@ -86,7 +89,7 @@ export const MetricEditor = ({ value }: Props) => { dispatch(changeMetricType(value.id, e.value!))} value={toOption(value)} /> diff --git a/public/app/plugins/datasource/elasticsearch/components/QueryEditor/MetricAggregationsEditor/SettingsEditor/TopMetricsSettingsEditor.tsx b/public/app/plugins/datasource/elasticsearch/components/QueryEditor/MetricAggregationsEditor/SettingsEditor/TopMetricsSettingsEditor.tsx new file mode 100644 index 00000000000..b9c679e1aaa --- /dev/null +++ b/public/app/plugins/datasource/elasticsearch/components/QueryEditor/MetricAggregationsEditor/SettingsEditor/TopMetricsSettingsEditor.tsx @@ -0,0 +1,69 @@ +import { AsyncMultiSelect, InlineField, SegmentAsync, Select } from '@grafana/ui'; +import React, { FunctionComponent } from 'react'; +import { useDispatch } from '../../../../hooks/useStatelessReducer'; +import { useFields } from '../../../../hooks/useFields'; +import { TopMetrics } from '../aggregations'; +import { changeMetricSetting } from '../state/actions'; +import { orderOptions } from '../../BucketAggregationsEditor/utils'; +import { css } from '@emotion/css'; +import { SelectableValue } from '@grafana/data'; + +interface Props { + metric: TopMetrics; +} + +const toMultiSelectValue = (value: string): SelectableValue => ({ value, label: value }); + +export const TopMetricsSettingsEditor: FunctionComponent = ({ metric }) => { + const dispatch = useDispatch(); + const getOrderByOptions = useFields(['number', 'date']); + const getMetricsOptions = useFields(metric.type); + + return ( + <> + + + dispatch( + changeMetricSetting( + metric, + 'metrics', + e.map((v) => v.value!) + ) + ) + } + loadOptions={getMetricsOptions} + value={metric.settings?.metrics?.map(toMultiSelectValue)} + closeMenuOnSelect={false} + defaultOptions + /> + + +