- Modified sample conf so it is useable by default, also added some
comments from the 'hello world' configuration.
- Updated README so there's a clear two step start for newbies.
- Added extra vim swap files to gitignore.
Change-Id: I76203973db4a7b332014662fcfb2ce5e7d137bd8
changes/48/48/2
parent
986adfa557
commit
2d2c434d48
@ -1,26 +1,30 @@ |
||||
# Global default settings. |
||||
global { |
||||
scrape_interval: "1s" |
||||
evaluation_interval: "1s" |
||||
labels { |
||||
label { |
||||
name: "owner" |
||||
value: "test" |
||||
scrape_interval: "15s" # By default, scrape targets every 15 seconds. |
||||
evaluation_interval: "15s" # By default, evaluate rules every 15 seconds. |
||||
|
||||
# Attach these extra labels to all timeseries collected by this Prometheus instance. |
||||
labels: { |
||||
label: { |
||||
name: "monitor" |
||||
value: "codelab-monitor" |
||||
} |
||||
} |
||||
rule_file: "prometheus.rules" |
||||
|
||||
# Load and evaluate rules in this file every 'evaluation_interval' seconds. This field may be repeated. |
||||
#rule_file: "prometheus.rules" |
||||
} |
||||
|
||||
job { |
||||
# A job definition containing exactly one endpoint to scrape: Here it's prometheus itself. |
||||
job: { |
||||
# The job name is added as a label `job={job-name}` to any timeseries scraped from this job. |
||||
name: "prometheus" |
||||
# Override the global default and scrape targets from this job every 5 seconds. |
||||
scrape_interval: "5s" |
||||
|
||||
target_group { |
||||
# Let's define a group of targets to scrape for this job. In this case, only one. |
||||
target_group: { |
||||
# These endpoints are scraped via HTTP. |
||||
target: "http://localhost:9090/metrics.json" |
||||
labels { |
||||
label { |
||||
name: "group" |
||||
value: "canary" |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
Loading…
Reference in new issue