diff --git a/.github/workflows/buf-lint.yml b/.github/workflows/buf-lint.yml index 4e942f1f3b..7b835b36f8 100644 --- a/.github/workflows/buf-lint.yml +++ b/.github/workflows/buf-lint.yml @@ -12,7 +12,7 @@ jobs: name: lint runs-on: ubuntu-latest steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: persist-credentials: false - uses: bufbuild/buf-setup-action@a47c93e0b1648d5651a065437926377d060baa99 # v1.50.0 diff --git a/.github/workflows/buf.yml b/.github/workflows/buf.yml index add72cc89c..e65c14442d 100644 --- a/.github/workflows/buf.yml +++ b/.github/workflows/buf.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest if: github.repository_owner == 'prometheus' steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: persist-credentials: false - uses: bufbuild/buf-setup-action@a47c93e0b1648d5651a065437926377d060baa99 # v1.50.0 @@ -25,7 +25,7 @@ jobs: with: input: 'prompb' against: 'https://github.com/prometheus/prometheus.git#branch=main,ref=HEAD~1,subdir=prompb' - - uses: bufbuild/buf-push-action@a654ff18effe4641ebea4a4ce242c49800728459 # v1.1.1 + - uses: bufbuild/buf-push-action@1c45f6a21ec277ee4c1fa2772e49b9541ea17f38 # v1.1.1 with: input: 'prompb' buf_token: ${{ secrets.BUF_TOKEN }} diff --git a/.github/workflows/check_release_notes.yml b/.github/workflows/check_release_notes.yml index b8381aff07..171af5f213 100644 --- a/.github/workflows/check_release_notes.yml +++ b/.github/workflows/check_release_notes.yml @@ -20,7 +20,7 @@ jobs: # Don't run it on dependabot PRs either as humans would take control in case a bump introduces a breaking change. if: (github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community') && github.event.pull_request.user.login != 'dependabot[bot]' steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - env: PR_DESCRIPTION: ${{ github.event.pull_request.body }} run: | diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f0195f02d5..48611c1973 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -16,7 +16,7 @@ jobs: # should also be updated. image: quay.io/prometheus/golang-builder:1.25-base steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: persist-credentials: false - uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3 @@ -34,7 +34,7 @@ jobs: container: image: quay.io/prometheus/golang-builder:1.25-base steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: persist-credentials: false - uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3 @@ -59,7 +59,7 @@ jobs: # The go version in this image should be N-1 wrt test_go. image: quay.io/prometheus/golang-builder:1.24-base steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: persist-credentials: false - run: make build @@ -78,7 +78,7 @@ jobs: image: quay.io/prometheus/golang-builder:1.25-base steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: persist-credentials: false - uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3 @@ -97,10 +97,10 @@ jobs: name: Go tests on Windows runs-on: windows-latest steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: persist-credentials: false - - uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0 + - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: go-version: 1.25.x - run: | @@ -116,7 +116,7 @@ jobs: container: image: quay.io/prometheus/golang-builder:1.25-base steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: persist-credentials: false - run: go install ./cmd/promtool/. @@ -143,7 +143,7 @@ jobs: matrix: thread: [ 0, 1, 2 ] steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: persist-credentials: false - uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3 @@ -170,7 +170,7 @@ jobs: # Whenever the Go version is updated here, .promu.yml # should also be updated. steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: persist-credentials: false - uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3 @@ -208,7 +208,8 @@ jobs: container: image: quay.io/prometheus/golang-builder:1.25-base steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - name: Checkout repository + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: persist-credentials: false - uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3 @@ -222,11 +223,11 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: persist-credentials: false - name: Install Go - uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0 + uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: go-version: 1.25.x - name: Install snmp_exporter/generator dependencies @@ -236,18 +237,18 @@ jobs: id: golangci-lint-version run: echo "version=$(make print-golangci-lint-version)" >> $GITHUB_OUTPUT - name: Lint - uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0 + uses: golangci/golangci-lint-action@0a35821d5c230e903fcfe077583637dea1b27b47 # v9.0.0 with: args: --verbose version: ${{ steps.golangci-lint-version.outputs.version }} - name: Lint with slicelabels - uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0 + uses: golangci/golangci-lint-action@0a35821d5c230e903fcfe077583637dea1b27b47 # v9.0.0 with: # goexperiment.synctest to ensure we don't miss files that depend on it. args: --verbose --build-tags=slicelabels,goexperiment.synctest version: ${{ steps.golangci-lint-version.outputs.version }} - name: Lint with dedupelabels - uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0 + uses: golangci/golangci-lint-action@0a35821d5c230e903fcfe077583637dea1b27b47 # v9.0.0 with: args: --verbose --build-tags=dedupelabels version: ${{ steps.golangci-lint-version.outputs.version }} @@ -266,7 +267,7 @@ jobs: needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all] if: github.event_name == 'push' && github.event.ref == 'refs/heads/main' steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: persist-credentials: false - uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3 @@ -285,7 +286,7 @@ jobs: || (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.')) steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: persist-credentials: false - uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3 @@ -302,7 +303,7 @@ jobs: needs: [test_ui, codeql] steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: persist-credentials: false - uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 2e2143f4c8..8dfa6049f2 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -24,17 +24,17 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: persist-credentials: false - name: Initialize CodeQL - uses: github/codeql-action/init@0499de31b99561a6d14a36a5f662c2a54f91beee # v4.31.2 + uses: github/codeql-action/init@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9 with: languages: ${{ matrix.language }} - name: Autobuild - uses: github/codeql-action/autobuild@0499de31b99561a6d14a36a5f662c2a54f91beee # v4.31.2 + uses: github/codeql-action/autobuild@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@0499de31b99561a6d14a36a5f662c2a54f91beee # v4.31.2 + uses: github/codeql-action/analyze@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9 diff --git a/.github/workflows/container_description.yml b/.github/workflows/container_description.yml index 7de8bb8da7..7b46e9532f 100644 --- a/.github/workflows/container_description.yml +++ b/.github/workflows/container_description.yml @@ -18,7 +18,7 @@ jobs: if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. steps: - name: git checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: persist-credentials: false - name: Set docker hub repo name @@ -42,7 +42,7 @@ jobs: if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. steps: - name: git checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: persist-credentials: false - name: Set quay.io org name diff --git a/.github/workflows/fuzzing.yml b/.github/workflows/fuzzing.yml index 24702c2920..60f643b4f0 100644 --- a/.github/workflows/fuzzing.yml +++ b/.github/workflows/fuzzing.yml @@ -10,12 +10,12 @@ jobs: steps: - name: Build Fuzzers id: build - uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@537c8005ba4c9de026b2fa3550663280d25d6175 # master + uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@4bf20ff8dfda18ad651583ebca9fb17a7ce1940a # master with: oss-fuzz-project-name: "prometheus" dry-run: false - name: Run Fuzzers - uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@537c8005ba4c9de026b2fa3550663280d25d6175 # master + uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@4bf20ff8dfda18ad651583ebca9fb17a7ce1940a # master # Note: Regularly check for updates to the pinned commit hash at: # https://github.com/google/oss-fuzz/tree/master/infra/cifuzz/actions/run_fuzzers with: diff --git a/.github/workflows/repo_sync.yml b/.github/workflows/repo_sync.yml index fea1422fdc..afc589c6d7 100644 --- a/.github/workflows/repo_sync.yml +++ b/.github/workflows/repo_sync.yml @@ -14,7 +14,7 @@ jobs: container: image: quay.io/prometheus/golang-builder steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: persist-credentials: false - run: ./scripts/sync_repo_files.sh diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 658e140f27..64a6365e48 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -21,7 +21,7 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # tag=v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # tag=v6.0.1 with: persist-credentials: false @@ -45,6 +45,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@0499de31b99561a6d14a36a5f662c2a54f91beee # tag=v4.31.2 + uses: github/codeql-action/upload-sarif@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9 with: sarif_file: results.sarif diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 86deb94097..947e670fd8 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -11,7 +11,7 @@ jobs: if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. runs-on: ubuntu-latest steps: - - uses: actions/stale@5f858e3efba33a5ca4407a664cc011ad407f2008 # v10.1.0 + - uses: actions/stale@997185467fa4f803885201cee163a9f38240193d # v10.1.1 with: repo-token: ${{ secrets.GITHUB_TOKEN }} # opt out of defaults to avoid marking issues as stale and closing them diff --git a/config/config.go b/config/config.go index 30c8a8ed21..113942b61a 100644 --- a/config/config.go +++ b/config/config.go @@ -1022,7 +1022,7 @@ func ToEscapingScheme(s string, v model.ValidationScheme) (model.EscapingScheme, case model.LegacyValidation: return model.UnderscoreEscaping, nil case model.UnsetValidation: - return model.NoEscaping, fmt.Errorf("v is unset: %s", v) + return model.NoEscaping, fmt.Errorf("ValidationScheme is unset: %s", v) default: panic(fmt.Errorf("unhandled validation scheme: %s", v)) } diff --git a/docs/feature_flags.md b/docs/feature_flags.md index 0051859d66..74daa11c13 100644 --- a/docs/feature_flags.md +++ b/docs/feature_flags.md @@ -197,7 +197,12 @@ the offset calculation. `step()` can be used in duration expressions. For a **range query**, it resolves to the step width of the range query. -For an **instant query**, it resolves to `0s`. +For an **instant query**, it resolves to `0s`. + +`range()` can be used in duration expressions. +For a **range query**, it resolves to the full range of the query (end time - start time). +For an **instant query**, it resolves to `0s`. +This is particularly useful in combination with `@end()` to look back over the entire query range, e.g., `max_over_time(metric[range()] @ end())`. `min(, )` and `max(, )` can be used to find the minimum or maximum of two duration expressions. diff --git a/go.mod b/go.mod index 6ebb6c46fe..67761b4dc4 100644 --- a/go.mod +++ b/go.mod @@ -39,7 +39,7 @@ require ( github.com/gophercloud/gophercloud/v2 v2.9.0 github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853 github.com/hashicorp/consul/api v1.32.1 - github.com/hashicorp/nomad/api v0.0.0-20251216171439-1dee0671280e + github.com/hashicorp/nomad/api v0.0.0-20251222083347-1355d4cb1671 github.com/hetznercloud/hcloud-go/v2 v2.32.0 github.com/ionos-cloud/sdk-go/v6 v6.3.5 github.com/json-iterator/go v1.1.12 diff --git a/go.sum b/go.sum index b28b0eb3ff..6be018d24b 100644 --- a/go.sum +++ b/go.sum @@ -307,8 +307,8 @@ github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/ github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= github.com/hashicorp/memberlist v0.5.3 h1:tQ1jOCypD0WvMemw/ZhhtH+PWpzcftQvgCorLu0hndk= github.com/hashicorp/memberlist v0.5.3/go.mod h1:h60o12SZn/ua/j0B6iKAZezA4eDaGsIuPO70eOaJ6WE= -github.com/hashicorp/nomad/api v0.0.0-20251216171439-1dee0671280e h1:wGl06iy/H90NSbWjfXWeRwk9SJOks0u4voIryeJFlSA= -github.com/hashicorp/nomad/api v0.0.0-20251216171439-1dee0671280e/go.mod h1:sldFTIgs+FsUeKU3LwVjviAIuksxD8TzDOn02MYwslE= +github.com/hashicorp/nomad/api v0.0.0-20251222083347-1355d4cb1671 h1:4NbynIRljuOUvAQNLLJA1yuWcoL5EC3Qn4c7HCngUds= +github.com/hashicorp/nomad/api v0.0.0-20251222083347-1355d4cb1671/go.mod h1:sldFTIgs+FsUeKU3LwVjviAIuksxD8TzDOn02MYwslE= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= github.com/hetznercloud/hcloud-go/v2 v2.32.0 h1:BRe+k7ESdYv3xQLBGdKUfk+XBFRJNGKzq70nJI24ciM= diff --git a/internal/tools/go.mod b/internal/tools/go.mod index 2334ae7bd0..b31cd5bc3e 100644 --- a/internal/tools/go.mod +++ b/internal/tools/go.mod @@ -67,8 +67,8 @@ require ( github.com/opencontainers/image-spec v1.1.1 // indirect github.com/petermattis/goid v0.0.0-20250904145737-900bdf8bb490 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/quic-go/qpack v0.5.1 // indirect - github.com/quic-go/quic-go v0.56.0 // indirect + github.com/quic-go/qpack v0.6.0 // indirect + github.com/quic-go/quic-go v0.57.0 // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/rs/cors v1.11.1 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect diff --git a/internal/tools/go.sum b/internal/tools/go.sum index fb63670b60..7f1161148b 100644 --- a/internal/tools/go.sum +++ b/internal/tools/go.sum @@ -154,10 +154,10 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/protocolbuffers/protoscope v0.0.0-20221109213918-8e7a6aafa2c9 h1:arwj11zP0yJIxIRiDn22E0H8PxfF7TsTrc2wIPFIsf4= github.com/protocolbuffers/protoscope v0.0.0-20221109213918-8e7a6aafa2c9/go.mod h1:SKZx6stCn03JN3BOWTwvVIO2ajMkb/zQdTceXYhKw/4= -github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= -github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg= -github.com/quic-go/quic-go v0.56.0 h1:q/TW+OLismmXAehgFLczhCDTYB3bFmua4D9lsNBWxvY= -github.com/quic-go/quic-go v0.56.0/go.mod h1:9gx5KsFQtw2oZ6GZTyh+7YEvOxWCL9WZAepnHxgAo6c= +github.com/quic-go/qpack v0.6.0 h1:g7W+BMYynC1LbYLSqRt8PBg5Tgwxn214ZZR34VIOjz8= +github.com/quic-go/qpack v0.6.0/go.mod h1:lUpLKChi8njB4ty2bFLX2x4gzDqXwUpaO1DP9qMDZII= +github.com/quic-go/quic-go v0.57.0 h1:AsSSrrMs4qI/hLrKlTH/TGQeTMY0ib1pAOX7vA3AdqE= +github.com/quic-go/quic-go v0.57.0/go.mod h1:ly4QBAjHA2VhdnxhojRsCUOeJwKYg+taDlos92xb1+s= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rodaine/protogofakeit v0.1.1 h1:ZKouljuRM3A+TArppfBqnH8tGZHOwM/pjvtXe9DaXH8= diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index 91fcac1cfb..0acf9cb28f 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -484,7 +484,7 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) (res *FloatHistogram, counte // supposed to be used according to the schema. func (h *FloatHistogram) Equals(h2 *FloatHistogram) bool { if h2 == nil { - return false + return h == nil } if h.Schema != h2.Schema || diff --git a/model/histogram/histogram.go b/model/histogram/histogram.go index 5fc68ef9d0..aa9f696be6 100644 --- a/model/histogram/histogram.go +++ b/model/histogram/histogram.go @@ -247,7 +247,7 @@ func (h *Histogram) CumulativeBucketIterator() BucketIterator[uint64] { // supposed to be used according to the schema. func (h *Histogram) Equals(h2 *Histogram) bool { if h2 == nil { - return false + return h == nil } if h.Schema != h2.Schema || h.Count != h2.Count || diff --git a/promql/durations.go b/promql/durations.go index c882adfbb6..216dd02725 100644 --- a/promql/durations.go +++ b/promql/durations.go @@ -28,7 +28,8 @@ import ( // in OriginalOffsetExpr representing (1h / 2). This visitor evaluates // such duration expression, setting OriginalOffset to 30m. type durationVisitor struct { - step time.Duration + step time.Duration + queryRange time.Duration } // Visit finds any duration expressions in AST Nodes and modifies the Node to @@ -121,6 +122,8 @@ func (v *durationVisitor) evaluateDurationExpr(expr parser.Expr) (float64, error switch n.Op { case parser.STEP: return float64(v.step.Seconds()), nil + case parser.RANGE: + return float64(v.queryRange.Seconds()), nil case parser.MIN: return math.Min(lhs, rhs), nil case parser.MAX: diff --git a/promql/durations_test.go b/promql/durations_test.go index 18592a0d0a..7a5e8f00a4 100644 --- a/promql/durations_test.go +++ b/promql/durations_test.go @@ -213,6 +213,37 @@ func TestCalculateDuration(t *testing.T) { }, expected: 3 * time.Second, }, + { + name: "range", + expr: &parser.DurationExpr{ + Op: parser.RANGE, + }, + expected: 5 * time.Minute, + }, + { + name: "range division", + expr: &parser.DurationExpr{ + LHS: &parser.DurationExpr{ + Op: parser.RANGE, + }, + RHS: &parser.NumberLiteral{Val: 2}, + Op: parser.DIV, + }, + expected: 150 * time.Second, + }, + { + name: "max of step and range", + expr: &parser.DurationExpr{ + LHS: &parser.DurationExpr{ + Op: parser.STEP, + }, + RHS: &parser.DurationExpr{ + Op: parser.RANGE, + }, + Op: parser.MAX, + }, + expected: 5 * time.Minute, + }, { name: "division by zero", expr: &parser.DurationExpr{ @@ -243,7 +274,7 @@ func TestCalculateDuration(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - v := &durationVisitor{step: 1 * time.Second} + v := &durationVisitor{step: 1 * time.Second, queryRange: 5 * time.Minute} result, err := v.calculateDuration(tt.expr, tt.allowedNegative) if tt.errorMessage != "" { require.Error(t, err) diff --git a/promql/engine.go b/promql/engine.go index 5a08da121c..a9f0dd2952 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -4057,7 +4057,7 @@ func unwrapStepInvariantExpr(e parser.Expr) parser.Expr { func PreprocessExpr(expr parser.Expr, start, end time.Time, step time.Duration) (parser.Expr, error) { detectHistogramStatsDecoding(expr) - if err := parser.Walk(&durationVisitor{step: step}, expr, nil); err != nil { + if err := parser.Walk(&durationVisitor{step: step, queryRange: end.Sub(start)}, expr, nil); err != nil { return nil, err } diff --git a/promql/parser/ast.go b/promql/parser/ast.go index 67ecb190fe..8a1a094b79 100644 --- a/promql/parser/ast.go +++ b/promql/parser/ast.go @@ -116,8 +116,8 @@ type DurationExpr struct { LHS, RHS Expr // The operands on the respective sides of the operator. Wrapped bool // Set when the duration is wrapped in parentheses. - StartPos posrange.Pos // For unary operations and step(), the start position of the operator. - EndPos posrange.Pos // For step(), the end position of the operator. + StartPos posrange.Pos // For unary operations, step(), and range(), the start position of the operator. + EndPos posrange.Pos // For step() and range(), the end position of the operator. } // Call represents a function call. @@ -474,7 +474,7 @@ func (e *BinaryExpr) PositionRange() posrange.PositionRange { } func (e *DurationExpr) PositionRange() posrange.PositionRange { - if e.Op == STEP { + if e.Op == STEP || e.Op == RANGE { return posrange.PositionRange{ Start: e.StartPos, End: e.EndPos, diff --git a/promql/parser/generated_parser.y b/promql/parser/generated_parser.y index d9bbb10b28..47776f53d0 100644 --- a/promql/parser/generated_parser.y +++ b/promql/parser/generated_parser.y @@ -153,6 +153,7 @@ WITHOUT START END STEP +RANGE %token preprocessorEnd // Counter reset hints. @@ -465,7 +466,7 @@ offset_expr: expr OFFSET offset_duration_expr $$ = $1 } | expr OFFSET error - { yylex.(*parser).unexpected("offset", "number, duration, or step()"); $$ = $1 } + { yylex.(*parser).unexpected("offset", "number, duration, step(), or range()"); $$ = $1 } ; /* @@ -575,11 +576,11 @@ subquery_expr : expr LEFT_BRACKET positive_duration_expr COLON positive_durati | expr LEFT_BRACKET positive_duration_expr COLON positive_duration_expr error { yylex.(*parser).unexpected("subquery selector", "\"]\""); $$ = $1 } | expr LEFT_BRACKET positive_duration_expr COLON error - { yylex.(*parser).unexpected("subquery selector", "number, duration, or step() or \"]\""); $$ = $1 } + { yylex.(*parser).unexpected("subquery selector", "number, duration, step(), range(), or \"]\""); $$ = $1 } | expr LEFT_BRACKET positive_duration_expr error { yylex.(*parser).unexpected("subquery or range", "\":\" or \"]\""); $$ = $1 } | expr LEFT_BRACKET error - { yylex.(*parser).unexpected("subquery or range selector", "number, duration, or step()"); $$ = $1 } + { yylex.(*parser).unexpected("subquery or range selector", "number, duration, step(), or range()"); $$ = $1 } ; /* @@ -696,7 +697,7 @@ metric : metric_identifier label_set ; -metric_identifier: AVG | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | IDENTIFIER | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | QUANTILE | STDDEV | STDVAR | SUM | TOPK | WITHOUT | START | END | LIMITK | LIMIT_RATIO | STEP | ANCHORED | SMOOTHED; +metric_identifier: AVG | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | IDENTIFIER | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | QUANTILE | STDDEV | STDVAR | SUM | TOPK | WITHOUT | START | END | LIMITK | LIMIT_RATIO | STEP | RANGE | ANCHORED | SMOOTHED; label_set : LEFT_BRACE label_set_list RIGHT_BRACE { $$ = labels.New($2...) } @@ -953,7 +954,7 @@ counter_reset_hint : UNKNOWN_COUNTER_RESET | COUNTER_RESET | NOT_COUNTER_RESET | aggregate_op : AVG | BOTTOMK | COUNT | COUNT_VALUES | GROUP | MAX | MIN | QUANTILE | STDDEV | STDVAR | SUM | TOPK | LIMITK | LIMIT_RATIO; // Inside of grouping options label names can be recognized as keywords by the lexer. This is a list of keywords that could also be a label name. -maybe_label : AVG | BOOL | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | GROUP_LEFT | GROUP_RIGHT | IDENTIFIER | IGNORING | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | ON | QUANTILE | STDDEV | STDVAR | SUM | TOPK | START | END | ATAN2 | LIMITK | LIMIT_RATIO | STEP | ANCHORED | SMOOTHED; +maybe_label : AVG | BOOL | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | GROUP_LEFT | GROUP_RIGHT | IDENTIFIER | IGNORING | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | ON | QUANTILE | STDDEV | STDVAR | SUM | TOPK | START | END | ATAN2 | LIMITK | LIMIT_RATIO | STEP | RANGE | ANCHORED | SMOOTHED; unary_op : ADD | SUB; @@ -1088,6 +1089,14 @@ offset_duration_expr : number_duration_literal EndPos: $3.PositionRange().End, } } + | RANGE LEFT_PAREN RIGHT_PAREN + { + $$ = &DurationExpr{ + Op: RANGE, + StartPos: $1.PositionRange().Start, + EndPos: $3.PositionRange().End, + } + } | unary_op STEP LEFT_PAREN RIGHT_PAREN { $$ = &DurationExpr{ @@ -1100,6 +1109,18 @@ offset_duration_expr : number_duration_literal StartPos: $1.Pos, } } + | unary_op RANGE LEFT_PAREN RIGHT_PAREN + { + $$ = &DurationExpr{ + Op: $1.Typ, + RHS: &DurationExpr{ + Op: RANGE, + StartPos: $2.PositionRange().Start, + EndPos: $4.PositionRange().End, + }, + StartPos: $1.Pos, + } + } | min_max LEFT_PAREN duration_expr COMMA duration_expr RIGHT_PAREN { $$ = &DurationExpr{ @@ -1234,6 +1255,14 @@ duration_expr : number_duration_literal EndPos: $3.PositionRange().End, } } + | RANGE LEFT_PAREN RIGHT_PAREN + { + $$ = &DurationExpr{ + Op: RANGE, + StartPos: $1.PositionRange().Start, + EndPos: $3.PositionRange().End, + } + } | min_max LEFT_PAREN duration_expr COMMA duration_expr RIGHT_PAREN { $$ = &DurationExpr{ diff --git a/promql/parser/generated_parser.y.go b/promql/parser/generated_parser.y.go index eb4b32129a..f5feec0b55 100644 --- a/promql/parser/generated_parser.y.go +++ b/promql/parser/generated_parser.y.go @@ -124,19 +124,20 @@ const preprocessorStart = 57431 const START = 57432 const END = 57433 const STEP = 57434 -const preprocessorEnd = 57435 -const counterResetHintsStart = 57436 -const UNKNOWN_COUNTER_RESET = 57437 -const COUNTER_RESET = 57438 -const NOT_COUNTER_RESET = 57439 -const GAUGE_TYPE = 57440 -const counterResetHintsEnd = 57441 -const startSymbolsStart = 57442 -const START_METRIC = 57443 -const START_SERIES_DESCRIPTION = 57444 -const START_EXPRESSION = 57445 -const START_METRIC_SELECTOR = 57446 -const startSymbolsEnd = 57447 +const RANGE = 57435 +const preprocessorEnd = 57436 +const counterResetHintsStart = 57437 +const UNKNOWN_COUNTER_RESET = 57438 +const COUNTER_RESET = 57439 +const NOT_COUNTER_RESET = 57440 +const GAUGE_TYPE = 57441 +const counterResetHintsEnd = 57442 +const startSymbolsStart = 57443 +const START_METRIC = 57444 +const START_SERIES_DESCRIPTION = 57445 +const START_EXPRESSION = 57446 +const START_METRIC_SELECTOR = 57447 +const startSymbolsEnd = 57448 var yyToknames = [...]string{ "$end", @@ -231,6 +232,7 @@ var yyToknames = [...]string{ "START", "END", "STEP", + "RANGE", "preprocessorEnd", "counterResetHintsStart", "UNKNOWN_COUNTER_RESET", @@ -256,344 +258,344 @@ var yyExca = [...]int16{ -1, 1, 1, -1, -2, 0, - -1, 40, - 1, 149, - 10, 149, - 24, 149, + -1, 41, + 1, 150, + 10, 150, + 24, 150, -2, 0, - -1, 70, - 2, 192, - 15, 192, - 79, 192, - 87, 192, - -2, 107, - -1, 71, + -1, 72, 2, 193, 15, 193, 79, 193, 87, 193, - -2, 108, - -1, 72, + -2, 107, + -1, 73, 2, 194, 15, 194, 79, 194, 87, 194, - -2, 110, - -1, 73, + -2, 108, + -1, 74, 2, 195, 15, 195, 79, 195, 87, 195, - -2, 111, - -1, 74, + -2, 110, + -1, 75, 2, 196, 15, 196, 79, 196, 87, 196, - -2, 112, - -1, 75, + -2, 111, + -1, 76, 2, 197, 15, 197, 79, 197, 87, 197, - -2, 117, - -1, 76, + -2, 112, + -1, 77, 2, 198, 15, 198, 79, 198, 87, 198, - -2, 119, - -1, 77, + -2, 117, + -1, 78, 2, 199, 15, 199, 79, 199, 87, 199, - -2, 121, - -1, 78, + -2, 119, + -1, 79, 2, 200, 15, 200, 79, 200, 87, 200, - -2, 122, - -1, 79, + -2, 121, + -1, 80, 2, 201, 15, 201, 79, 201, 87, 201, - -2, 123, - -1, 80, + -2, 122, + -1, 81, 2, 202, 15, 202, 79, 202, 87, 202, - -2, 124, - -1, 81, + -2, 123, + -1, 82, 2, 203, 15, 203, 79, 203, 87, 203, - -2, 125, - -1, 82, + -2, 124, + -1, 83, 2, 204, 15, 204, 79, 204, 87, 204, - -2, 129, - -1, 83, + -2, 125, + -1, 84, 2, 205, 15, 205, 79, 205, 87, 205, + -2, 129, + -1, 85, + 2, 206, + 15, 206, + 79, 206, + 87, 206, -2, 130, - -1, 135, - 41, 270, - 42, 270, - 52, 270, - 53, 270, - 57, 270, + -1, 137, + 41, 274, + 42, 274, + 52, 274, + 53, 274, + 57, 274, -2, 22, - -1, 245, - 9, 257, - 12, 257, - 13, 257, - 18, 257, - 19, 257, - 25, 257, - 41, 257, - 47, 257, - 48, 257, - 51, 257, - 57, 257, - 62, 257, - 63, 257, - 64, 257, - 65, 257, - 66, 257, - 67, 257, - 68, 257, - 69, 257, - 70, 257, - 71, 257, - 72, 257, - 73, 257, - 74, 257, - 75, 257, - 79, 257, - 83, 257, - 84, 257, - 85, 257, - 87, 257, - 90, 257, - 91, 257, - 92, 257, + -1, 251, + 9, 259, + 12, 259, + 13, 259, + 18, 259, + 19, 259, + 25, 259, + 41, 259, + 47, 259, + 48, 259, + 51, 259, + 57, 259, + 62, 259, + 63, 259, + 64, 259, + 65, 259, + 66, 259, + 67, 259, + 68, 259, + 69, 259, + 70, 259, + 71, 259, + 72, 259, + 73, 259, + 74, 259, + 75, 259, + 79, 259, + 83, 259, + 84, 259, + 85, 259, + 87, 259, + 90, 259, + 91, 259, + 92, 259, + 93, 259, -2, 0, - -1, 246, - 9, 257, - 12, 257, - 13, 257, - 18, 257, - 19, 257, - 25, 257, - 41, 257, - 47, 257, - 48, 257, - 51, 257, - 57, 257, - 62, 257, - 63, 257, - 64, 257, - 65, 257, - 66, 257, - 67, 257, - 68, 257, - 69, 257, - 70, 257, - 71, 257, - 72, 257, - 73, 257, - 74, 257, - 75, 257, - 79, 257, - 83, 257, - 84, 257, - 85, 257, - 87, 257, - 90, 257, - 91, 257, - 92, 257, + -1, 252, + 9, 259, + 12, 259, + 13, 259, + 18, 259, + 19, 259, + 25, 259, + 41, 259, + 47, 259, + 48, 259, + 51, 259, + 57, 259, + 62, 259, + 63, 259, + 64, 259, + 65, 259, + 66, 259, + 67, 259, + 68, 259, + 69, 259, + 70, 259, + 71, 259, + 72, 259, + 73, 259, + 74, 259, + 75, 259, + 79, 259, + 83, 259, + 84, 259, + 85, 259, + 87, 259, + 90, 259, + 91, 259, + 92, 259, + 93, 259, -2, 0, } const yyPrivate = 57344 -const yyLast = 1071 +const yyLast = 1050 var yyAct = [...]int16{ - 57, 182, 401, 399, 185, 406, 278, 237, 193, 332, - 93, 47, 346, 141, 68, 221, 91, 413, 414, 415, - 416, 127, 128, 64, 156, 186, 66, 126, 347, 326, - 129, 243, 122, 125, 130, 244, 245, 246, 119, 122, - 118, 124, 123, 121, 327, 151, 124, 118, 214, 123, - 121, 396, 373, 124, 120, 364, 395, 366, 323, 385, - 328, 354, 352, 133, 216, 135, 6, 98, 100, 101, - 364, 102, 103, 104, 105, 106, 107, 108, 109, 110, - 111, 324, 112, 113, 117, 99, 42, 131, 315, 112, - 144, 117, 136, 400, 241, 350, 191, 143, 128, 349, - 142, 137, 270, 314, 322, 320, 129, 268, 317, 114, - 116, 115, 192, 95, 233, 178, 114, 116, 115, 195, - 199, 200, 201, 202, 203, 204, 174, 321, 319, 177, - 196, 196, 196, 196, 196, 196, 196, 232, 175, 217, - 267, 130, 197, 197, 197, 197, 197, 197, 197, 132, - 196, 134, 138, 205, 390, 407, 239, 207, 210, 227, - 206, 223, 197, 229, 428, 2, 3, 4, 5, 360, - 190, 194, 429, 389, 359, 7, 266, 240, 61, 86, - 189, 231, 269, 427, 181, 150, 426, 262, 60, 358, - 264, 119, 122, 196, 425, 209, 271, 272, 266, 197, - 152, 225, 123, 121, 230, 197, 124, 120, 208, 196, - 84, 224, 226, 119, 122, 38, 384, 213, 222, 383, - 223, 197, 10, 382, 123, 121, 85, 235, 124, 120, - 143, 190, 88, 318, 238, 381, 180, 179, 241, 242, - 380, 189, 379, 378, 247, 248, 249, 250, 251, 252, - 253, 254, 255, 256, 257, 258, 259, 260, 261, 348, - 225, 198, 325, 191, 94, 377, 351, 376, 97, 353, - 224, 226, 344, 345, 92, 195, 375, 196, 374, 192, - 196, 39, 228, 355, 61, 55, 196, 95, 1, 197, - 181, 87, 197, 149, 60, 148, 172, 69, 197, 54, - 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, - 167, 168, 169, 170, 171, 417, 84, 362, 65, 53, - 190, 9, 9, 144, 52, 51, 363, 365, 196, 367, - 189, 155, 85, 142, 275, 368, 369, 184, 274, 50, - 197, 140, 180, 179, 190, 49, 95, 48, 372, 119, - 122, 386, 191, 273, 189, 8, 46, 153, 211, 40, - 123, 121, 196, 371, 124, 120, 392, 198, 192, 394, - 370, 388, 94, 45, 197, 154, 191, 402, 403, 404, - 398, 44, 92, 405, 43, 409, 408, 411, 410, 418, - 90, 281, 192, 56, 236, 95, 422, 316, 419, 420, - 196, 291, 361, 421, 393, 119, 122, 297, 329, 423, - 96, 391, 197, 234, 280, 276, 123, 121, 424, 89, - 124, 120, 412, 119, 122, 187, 188, 183, 431, 196, - 279, 119, 122, 58, 123, 121, 293, 294, 124, 120, - 295, 197, 123, 121, 139, 0, 124, 120, 308, 0, - 0, 282, 284, 286, 287, 288, 296, 298, 301, 302, - 303, 304, 305, 309, 310, 0, 281, 283, 285, 289, - 290, 292, 299, 313, 312, 300, 291, 0, 220, 306, - 307, 311, 297, 219, 0, 0, 277, 387, 0, 280, - 147, 0, 190, 61, 0, 146, 218, 0, 0, 265, - 0, 0, 189, 60, 430, 0, 119, 122, 145, 0, - 0, 293, 294, 0, 0, 295, 0, 123, 121, 0, - 0, 124, 120, 308, 191, 84, 282, 284, 286, 287, - 288, 296, 298, 301, 302, 303, 304, 305, 309, 310, - 192, 85, 283, 285, 289, 290, 292, 299, 313, 312, - 300, 180, 179, 0, 306, 307, 311, 61, 0, 118, - 59, 86, 0, 62, 0, 0, 22, 60, 0, 0, - 212, 0, 0, 63, 0, 0, 263, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 98, 100, 0, 84, - 0, 0, 0, 0, 0, 18, 19, 109, 110, 20, - 0, 112, 113, 117, 99, 85, 0, 0, 0, 0, - 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 0, 0, 0, 13, 114, 116, - 115, 24, 37, 36, 215, 30, 0, 0, 31, 32, - 67, 61, 41, 0, 59, 86, 0, 62, 0, 0, - 22, 60, 0, 119, 122, 0, 0, 63, 0, 0, - 0, 0, 0, 0, 123, 121, 0, 0, 124, 120, - 0, 357, 0, 84, 0, 0, 0, 0, 61, 18, - 19, 0, 0, 20, 181, 0, 0, 0, 60, 85, - 356, 0, 0, 0, 70, 71, 72, 73, 74, 75, - 76, 77, 78, 79, 80, 81, 82, 83, 0, 0, - 84, 13, 0, 0, 0, 24, 37, 36, 0, 30, - 0, 0, 31, 32, 67, 61, 85, 0, 59, 86, - 0, 62, 331, 0, 22, 60, 180, 179, 0, 330, - 0, 63, 0, 334, 335, 333, 340, 342, 339, 341, - 336, 337, 338, 343, 0, 0, 0, 84, 0, 0, - 0, 198, 0, 18, 19, 0, 0, 20, 0, 0, - 0, 0, 0, 85, 0, 0, 0, 0, 70, 71, - 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, - 82, 83, 17, 86, 0, 13, 0, 0, 22, 24, - 37, 36, 397, 30, 0, 0, 31, 32, 67, 0, - 0, 0, 0, 334, 335, 333, 340, 342, 339, 341, - 336, 337, 338, 343, 0, 0, 0, 18, 19, 0, - 0, 20, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 11, 12, 14, 15, 16, 21, 23, 25, - 26, 27, 28, 29, 33, 34, 17, 38, 0, 13, - 0, 0, 22, 24, 37, 36, 0, 30, 0, 0, - 31, 32, 35, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 18, 19, 0, 0, 20, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 11, 12, 14, 15, - 16, 21, 23, 25, 26, 27, 28, 29, 33, 34, - 118, 0, 0, 13, 0, 0, 0, 24, 37, 36, - 0, 30, 0, 0, 31, 32, 35, 0, 0, 118, - 0, 0, 0, 0, 0, 0, 0, 98, 100, 101, - 0, 102, 103, 104, 105, 106, 107, 108, 109, 110, - 111, 0, 112, 113, 117, 99, 98, 100, 101, 0, - 102, 103, 104, 0, 106, 107, 108, 109, 110, 111, - 173, 112, 113, 117, 99, 118, 0, 61, 0, 114, - 116, 115, 0, 181, 118, 0, 0, 60, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 114, 116, - 115, 0, 98, 100, 101, 0, 102, 103, 0, 84, - 106, 107, 100, 109, 110, 111, 0, 112, 113, 117, - 99, 0, 109, 110, 0, 85, 112, 0, 117, 99, - 0, 0, 0, 0, 0, 180, 179, 0, 0, 0, - 0, 0, 0, 0, 114, 116, 115, 0, 0, 0, - 0, 0, 0, 114, 116, 115, 0, 0, 0, 0, - 176, + 58, 186, 413, 411, 341, 418, 286, 243, 197, 95, + 189, 48, 355, 144, 70, 227, 93, 251, 252, 356, + 159, 190, 65, 120, 17, 88, 127, 130, 128, 129, + 22, 425, 426, 427, 428, 131, 249, 121, 124, 335, + 250, 67, 132, 126, 408, 407, 377, 332, 125, 123, + 331, 102, 126, 122, 336, 154, 324, 6, 397, 18, + 19, 111, 112, 20, 135, 114, 137, 119, 101, 375, + 337, 323, 375, 330, 11, 12, 14, 15, 16, 21, + 23, 25, 26, 27, 28, 29, 33, 34, 43, 133, + 329, 13, 116, 118, 117, 24, 38, 37, 146, 30, + 402, 124, 31, 32, 35, 36, 130, 412, 138, 396, + 194, 125, 123, 328, 131, 126, 365, 182, 239, 401, + 193, 199, 204, 205, 206, 207, 208, 209, 177, 363, + 362, 181, 200, 200, 200, 200, 200, 200, 200, 178, + 120, 238, 223, 201, 201, 201, 201, 201, 201, 201, + 212, 215, 134, 200, 136, 211, 210, 2, 3, 4, + 5, 222, 233, 221, 201, 245, 235, 384, 333, 371, + 228, 247, 229, 360, 370, 359, 246, 358, 188, 273, + 140, 368, 114, 195, 119, 194, 277, 139, 62, 369, + 268, 237, 229, 271, 185, 193, 441, 200, 61, 196, + 367, 201, 273, 383, 155, 278, 279, 280, 201, 116, + 118, 117, 231, 200, 236, 121, 124, 195, 382, 440, + 86, 218, 230, 232, 201, 381, 125, 123, 276, 275, + 126, 122, 231, 196, 274, 146, 87, 132, 439, 327, + 429, 438, 230, 232, 248, 141, 184, 183, 419, 253, + 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, + 264, 265, 266, 267, 334, 357, 191, 192, 214, 353, + 354, 202, 203, 361, 121, 124, 88, 364, 283, 7, + 39, 213, 282, 199, 200, 125, 123, 395, 200, 126, + 122, 366, 10, 194, 200, 201, 394, 281, 393, 201, + 392, 391, 90, 193, 390, 201, 160, 161, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, + 174, 389, 194, 388, 120, 195, 373, 387, 386, 385, + 153, 99, 193, 62, 442, 374, 376, 200, 378, 185, + 56, 196, 40, 61, 379, 380, 89, 152, 201, 151, + 1, 100, 102, 103, 195, 104, 105, 175, 71, 108, + 109, 398, 111, 112, 113, 86, 114, 115, 119, 101, + 196, 66, 200, 55, 9, 9, 54, 404, 8, 53, + 406, 87, 41, 201, 52, 158, 410, 51, 414, 415, + 416, 184, 183, 116, 118, 117, 421, 420, 423, 422, + 417, 430, 50, 49, 289, 47, 156, 216, 147, 46, + 431, 432, 200, 372, 299, 433, 202, 203, 145, 96, + 305, 435, 157, 201, 403, 437, 326, 288, 147, 94, + 436, 97, 45, 44, 57, 242, 434, 234, 145, 338, + 443, 200, 97, 98, 121, 124, 143, 240, 284, 301, + 302, 97, 201, 303, 91, 125, 123, 424, 187, 126, + 122, 316, 287, 59, 290, 292, 294, 295, 296, 304, + 306, 309, 310, 311, 312, 313, 317, 318, 142, 0, + 291, 293, 297, 298, 300, 307, 322, 321, 308, 289, + 96, 0, 314, 315, 319, 320, 226, 150, 405, 299, + 94, 225, 149, 0, 0, 305, 0, 0, 92, 285, + 0, 0, 288, 97, 224, 148, 62, 121, 124, 0, + 0, 0, 272, 0, 0, 0, 61, 0, 125, 123, + 0, 0, 126, 122, 301, 302, 0, 0, 303, 0, + 0, 0, 0, 0, 0, 0, 316, 0, 86, 290, + 292, 294, 295, 296, 304, 306, 309, 310, 311, 312, + 313, 317, 318, 0, 87, 291, 293, 297, 298, 300, + 307, 322, 321, 308, 184, 183, 0, 314, 315, 319, + 320, 62, 0, 120, 60, 88, 0, 63, 0, 0, + 22, 61, 0, 0, 217, 0, 0, 64, 0, 269, + 270, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 100, 102, 0, 86, 0, 0, 0, 0, 0, 18, + 19, 111, 112, 20, 0, 114, 115, 119, 101, 87, + 0, 0, 0, 0, 72, 73, 74, 75, 76, 77, + 78, 79, 80, 81, 82, 83, 84, 85, 0, 0, + 400, 13, 116, 118, 117, 24, 38, 37, 399, 30, + 0, 0, 31, 32, 68, 69, 62, 42, 0, 60, + 88, 0, 63, 0, 0, 22, 61, 121, 124, 0, + 0, 0, 64, 0, 121, 124, 0, 0, 125, 123, + 0, 0, 126, 122, 0, 125, 123, 0, 86, 126, + 122, 0, 0, 0, 18, 19, 0, 0, 20, 0, + 0, 0, 0, 0, 87, 0, 0, 0, 0, 72, + 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, + 83, 84, 85, 0, 0, 0, 13, 0, 0, 220, + 24, 38, 37, 0, 30, 0, 325, 31, 32, 68, + 69, 62, 0, 0, 60, 88, 0, 63, 121, 124, + 22, 61, 0, 0, 0, 0, 0, 64, 0, 125, + 123, 0, 0, 126, 122, 0, 0, 0, 0, 0, + 121, 124, 0, 86, 0, 0, 0, 0, 0, 18, + 19, 125, 123, 20, 0, 126, 122, 0, 0, 87, + 0, 0, 0, 0, 72, 73, 74, 75, 76, 77, + 78, 79, 80, 81, 82, 83, 84, 85, 17, 39, + 0, 13, 0, 0, 22, 24, 38, 37, 0, 30, + 340, 0, 31, 32, 68, 69, 0, 339, 0, 0, + 0, 343, 344, 342, 349, 351, 348, 350, 345, 346, + 347, 352, 241, 18, 19, 0, 194, 20, 0, 244, + 0, 0, 0, 247, 0, 0, 193, 0, 11, 12, + 14, 15, 16, 21, 23, 25, 26, 27, 28, 29, + 33, 34, 0, 0, 120, 13, 0, 0, 195, 24, + 38, 37, 219, 30, 0, 0, 31, 32, 35, 36, + 0, 0, 0, 120, 196, 0, 0, 0, 0, 0, + 0, 100, 102, 103, 0, 104, 105, 106, 107, 108, + 109, 110, 111, 112, 113, 0, 114, 115, 119, 101, + 100, 102, 103, 0, 104, 105, 106, 107, 108, 109, + 110, 111, 112, 113, 198, 114, 115, 119, 101, 120, + 0, 62, 0, 116, 118, 117, 0, 185, 176, 0, + 0, 61, 0, 0, 0, 62, 0, 0, 0, 0, + 0, 185, 116, 118, 117, 61, 100, 102, 103, 0, + 104, 105, 106, 86, 108, 109, 110, 111, 112, 113, + 0, 114, 115, 119, 101, 0, 0, 86, 0, 87, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 184, + 183, 0, 0, 87, 0, 0, 0, 0, 116, 118, + 117, 0, 0, 184, 183, 409, 0, 0, 0, 0, + 0, 0, 0, 0, 202, 203, 343, 344, 342, 349, + 351, 348, 350, 345, 346, 347, 352, 0, 179, 180, } var yyPact = [...]int16{ - 64, 165, 844, 844, 632, 780, -1000, -1000, -1000, 202, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + 55, 269, 806, 806, 657, 12, -1000, -1000, -1000, 267, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 370, -1000, - 266, -1000, 906, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -3, 19, 126, - -1000, -1000, 716, -1000, 716, 166, -1000, 86, 137, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, 321, -1000, -1000, 488, - -1000, -1000, 291, 181, -1000, -1000, 21, -1000, -54, -54, - -54, -54, -54, -54, -54, -54, -54, -54, -54, -54, - -54, -54, -54, -54, 978, -1000, -1000, 335, 169, 275, - 275, 275, 275, 275, 275, 126, -57, -1000, 193, 193, - 548, -1000, 26, 612, 33, -15, -1000, 42, 275, 476, - -1000, -1000, 216, 157, -1000, -1000, 262, -1000, 179, -1000, - 112, 222, 716, -1000, -51, -44, -1000, 716, 716, 716, - 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, - 716, 716, -1000, -1000, -1000, 484, 125, 92, -3, -1000, - -1000, 275, -1000, 87, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, 161, 161, 332, -1000, -3, -1000, 275, 86, -10, - -10, -15, -15, -15, -15, -1000, -1000, -1000, 464, -1000, - -1000, 81, -1000, 906, -1000, -1000, -1000, 390, -1000, 88, - -1000, 103, -1000, -1000, -1000, -1000, -1000, 102, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 32, 55, 3, -1000, -1000, - -1000, 715, 980, 193, 193, 193, 193, 33, 33, 545, - 545, 545, 971, 925, 545, 545, 971, 33, 33, 545, - 33, 980, -1000, 84, 80, 275, -15, 40, 275, 612, - 39, -1000, -1000, -1000, 669, -1000, 167, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 488, + -1000, 329, -1000, 889, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -4, 27, + 222, -1000, -1000, 742, -1000, 742, 263, -1000, 172, 165, + 230, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 426, -1000, + -1000, 495, -1000, -1000, 345, 326, -1000, -1000, 31, -1000, + -58, -58, -58, -58, -58, -58, -58, -58, -58, -58, + -58, -58, -58, -58, -58, -58, 956, -1000, -1000, 176, + 942, 324, 324, 324, 324, 324, 324, 222, -52, -1000, + 266, 266, 572, -1000, 870, 717, 126, -13, -1000, 141, + 139, 324, 494, -1000, -1000, 168, 188, -1000, -1000, 417, + -1000, 189, -1000, 116, 847, 742, -1000, -46, -63, -1000, + 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, + 742, 742, 742, 742, 742, -1000, -1000, -1000, 507, 219, + 214, 213, -4, -1000, -1000, 324, -1000, 190, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, 101, 101, 276, -1000, -4, + -1000, 324, 172, 165, 59, 59, -13, -13, -13, -13, + -1000, -1000, -1000, 487, -1000, -1000, 49, -1000, 889, -1000, + -1000, -1000, -1000, 739, -1000, 406, -1000, 88, -1000, -1000, + -1000, -1000, -1000, 48, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, 21, 142, 13, -1000, -1000, -1000, 813, 9, 266, + 266, 266, 266, 126, 126, 569, 569, 569, 310, 935, + 569, 569, 310, 126, 126, 569, 126, 9, -1000, 162, + 160, 158, 324, -13, 108, 107, 324, 717, 94, -1000, + -1000, -1000, 179, -1000, 167, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 716, 275, -1000, -1000, -1000, - -1000, -1000, -1000, 51, 51, 31, 51, 78, 78, 346, - 35, -1000, -1000, 272, 270, 261, 259, 237, 236, 234, - 229, 217, 213, 210, -1000, -1000, -1000, -1000, -1000, 37, - 275, 465, -1000, 364, -1000, 152, -1000, -1000, -1000, 389, - -1000, 906, 382, -1000, -1000, -1000, 51, -1000, 30, 25, - 785, -1000, -1000, -1000, 36, 311, 311, 311, 161, 141, - 141, 36, 141, 36, -78, -1000, 308, -1000, 275, -1000, - -1000, -1000, -1000, -1000, -1000, 51, 51, -1000, -1000, -1000, - 51, -1000, -1000, -1000, -1000, -1000, -1000, 311, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, 275, 172, -1000, - -1000, -1000, 162, -1000, 150, -1000, 483, -1000, -1000, -1000, - -1000, -1000, + -1000, -1000, -1000, -1000, 742, 324, -1000, -1000, -1000, -1000, + -1000, -1000, 53, 53, 20, 53, 155, 155, 201, 150, + -1000, -1000, 323, 322, 321, 317, 315, 298, 295, 294, + 292, 290, 281, -1000, -1000, -1000, -1000, -1000, 87, 36, + 324, 636, -1000, -1000, 643, -1000, 98, -1000, -1000, -1000, + 402, -1000, 889, 476, -1000, -1000, -1000, 53, -1000, 19, + 18, 1008, -1000, -1000, -1000, 50, 284, 284, 284, 101, + 234, 234, 50, 234, 50, -65, -1000, -1000, 233, -1000, + 324, -1000, -1000, -1000, -1000, -1000, -1000, 53, 53, -1000, + -1000, -1000, 53, -1000, -1000, -1000, -1000, -1000, -1000, 284, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 324, + 403, -1000, -1000, -1000, 217, -1000, 174, -1000, 313, -1000, + -1000, -1000, -1000, -1000, } var yyPgo = [...]int16{ - 0, 444, 13, 433, 6, 15, 430, 318, 23, 427, - 10, 422, 14, 222, 355, 419, 16, 415, 28, 12, - 413, 410, 7, 408, 9, 5, 396, 3, 2, 4, - 394, 25, 1, 393, 384, 33, 200, 381, 375, 86, - 373, 358, 27, 357, 26, 356, 11, 347, 345, 339, - 331, 325, 324, 319, 299, 285, 0, 297, 8, 296, - 288, 281, + 0, 478, 13, 463, 6, 15, 462, 371, 22, 458, + 9, 457, 14, 292, 378, 454, 16, 448, 19, 12, + 447, 443, 7, 439, 4, 5, 436, 3, 2, 10, + 435, 21, 1, 434, 433, 26, 204, 432, 422, 88, + 409, 407, 28, 406, 41, 405, 11, 403, 402, 387, + 385, 384, 379, 376, 373, 340, 0, 358, 8, 357, + 350, 342, } var yyR1 = [...]int8{ @@ -610,22 +612,22 @@ var yyR1 = [...]int8{ 2, 2, 2, 2, 2, 14, 14, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 13, 13, 13, 13, 15, 15, - 15, 16, 16, 16, 16, 16, 16, 16, 61, 21, - 21, 21, 21, 20, 20, 20, 20, 20, 20, 20, - 20, 20, 30, 30, 30, 22, 22, 22, 22, 23, - 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, - 24, 24, 24, 25, 25, 26, 26, 26, 11, 11, - 11, 11, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 6, 6, 6, 6, + 7, 7, 7, 7, 7, 13, 13, 13, 13, 15, + 15, 15, 16, 16, 16, 16, 16, 16, 16, 61, + 21, 21, 21, 21, 20, 20, 20, 20, 20, 20, + 20, 20, 20, 30, 30, 30, 22, 22, 22, 22, + 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 25, 25, 26, 26, 26, 11, + 11, 11, 11, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 6, 6, 6, 6, 8, 8, - 5, 5, 5, 5, 46, 46, 29, 29, 31, 31, - 32, 32, 28, 27, 27, 52, 10, 19, 19, 59, - 59, 59, 59, 59, 59, 59, 59, 12, 12, 56, - 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, - 57, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 8, 8, 5, 5, 5, 5, 46, 46, 29, 29, + 31, 31, 32, 32, 28, 27, 27, 52, 10, 19, + 19, 59, 59, 59, 59, 59, 59, 59, 59, 59, + 59, 12, 12, 56, 56, 56, 56, 56, 56, 56, + 56, 56, 56, 56, 56, 57, } var yyR2 = [...]int8{ @@ -642,116 +644,118 @@ var yyR2 = [...]int8{ 1, 3, 3, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 3, 4, 2, 0, 3, 1, - 2, 3, 3, 1, 3, 3, 2, 1, 2, 0, - 3, 2, 1, 1, 3, 1, 3, 4, 1, 3, - 5, 5, 1, 1, 1, 4, 3, 3, 2, 3, - 1, 2, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 4, 3, 3, 1, 2, 1, 1, + 1, 1, 1, 1, 1, 3, 4, 2, 0, 3, + 1, 2, 3, 3, 1, 3, 3, 2, 1, 2, + 0, 3, 2, 1, 1, 3, 1, 3, 4, 1, + 3, 5, 5, 1, 1, 1, 4, 3, 3, 2, + 3, 1, 2, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 4, 3, 3, 1, 2, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, - 1, 1, 1, 2, 1, 1, 1, 0, 1, 1, - 2, 3, 4, 6, 7, 4, 1, 1, 1, 1, - 2, 3, 3, 3, 3, 3, 3, 3, 6, 1, - 3, + 2, 2, 1, 1, 1, 2, 1, 1, 1, 0, + 1, 1, 2, 3, 3, 4, 4, 6, 7, 4, + 1, 1, 1, 1, 2, 3, 3, 3, 3, 3, + 3, 3, 3, 6, 1, 3, } var yyChk = [...]int16{ - -1000, -60, 101, 102, 103, 104, 2, 10, -14, -7, + -1000, -60, 102, 103, 104, 105, 2, 10, -14, -7, -13, 62, 63, 79, 64, 65, 66, 12, 47, 48, 51, 67, 18, 68, 83, 69, 70, 71, 72, 73, - 87, 90, 91, 74, 75, 92, 85, 84, 13, -61, - -14, 10, -39, -34, -37, -40, -45, -46, -47, -48, - -49, -51, -52, -53, -54, -55, -33, -56, -3, 12, - 19, 9, 15, 25, -8, -7, -44, 92, -12, -57, - 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, - 72, 73, 74, 75, 41, 57, 13, -55, -13, -15, - 20, -16, 12, -10, 2, 25, -21, 2, 41, 59, - 42, 43, 45, 46, 47, 48, 49, 50, 51, 52, - 53, 54, 56, 57, 83, 85, 84, 58, 14, 41, - 57, 53, 42, 52, 56, -35, -42, 2, 79, 87, - 15, -42, -39, -56, -39, -56, -44, 15, 15, -1, - 20, -2, 12, -10, 2, 20, 7, 2, 4, 2, - 4, 24, -36, -43, -38, -50, 78, -36, -36, -36, + 87, 90, 91, 74, 75, 92, 93, 85, 84, 13, + -61, -14, 10, -39, -34, -37, -40, -45, -46, -47, + -48, -49, -51, -52, -53, -54, -55, -33, -56, -3, + 12, 19, 9, 15, 25, -8, -7, -44, 92, 93, + -12, -57, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 41, 57, 13, -55, + -13, -15, 20, -16, 12, -10, 2, 25, -21, 2, + 41, 59, 42, 43, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 56, 57, 83, 85, 84, 58, + 14, 41, 57, 53, 42, 52, 56, -35, -42, 2, + 79, 87, 15, -42, -39, -56, -39, -56, -44, 15, + 15, 15, -1, 20, -2, 12, -10, 2, 20, 7, + 2, 4, 2, 4, 24, -36, -43, -38, -50, 78, -36, -36, -36, -36, -36, -36, -36, -36, -36, -36, - -36, -36, -59, 2, -46, -8, 92, -12, -56, 68, - 67, 15, -32, -9, 2, -29, -31, 90, 91, 19, - 9, 41, 57, -58, 2, -56, -46, -8, 92, -56, - -56, -56, -56, -56, -56, -42, -35, -18, 15, 2, - -18, -41, 22, -39, 22, 22, 22, -56, 20, 7, - 2, -5, 2, 4, 54, 44, 55, -5, 20, -16, - 25, 2, 25, 2, -20, 5, -30, -22, 12, -29, - -31, 16, -39, 82, 86, 80, 81, -39, -39, -39, - -39, -39, -39, -39, -39, -39, -39, -39, -39, -39, - -39, -39, -46, 92, -12, 15, -56, 15, 15, -56, - 15, -29, -29, 21, 6, 2, -17, 22, -4, -6, - 25, 2, 62, 78, 63, 79, 64, 65, 66, 80, - 81, 12, 82, 47, 48, 51, 67, 18, 68, 83, - 86, 69, 70, 71, 72, 73, 90, 91, 59, 74, - 75, 92, 85, 84, 22, 7, 7, 20, -2, 25, - 2, 25, 2, 26, 26, -31, 26, 41, 57, -23, - 24, 17, -24, 30, 28, 29, 35, 36, 37, 33, - 31, 34, 32, 38, -18, -18, -19, -18, -19, 15, - 15, -56, 22, -56, 22, -58, 21, 2, 22, 7, - 2, -39, -56, -28, 19, -28, 26, -28, -22, -22, - 24, 17, 2, 17, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 6, 22, -56, 22, 7, 21, - 2, 22, -4, 22, -28, 26, 26, 17, -24, -27, - 57, -28, -32, -32, -32, -29, -25, 14, -25, -27, - -25, -27, -11, 95, 96, 97, 98, 7, -56, -28, - -28, -28, -26, -32, -56, 22, 24, 21, 2, 22, - 21, -32, + -36, -36, -36, -36, -36, -59, 2, -46, -8, 92, + 93, -12, -56, 68, 67, 15, -32, -9, 2, -29, + -31, 90, 91, 19, 9, 41, 57, -58, 2, -56, + -46, -8, 92, 93, -56, -56, -56, -56, -56, -56, + -42, -35, -18, 15, 2, -18, -41, 22, -39, 22, + 22, 22, 22, -56, 20, 7, 2, -5, 2, 4, + 54, 44, 55, -5, 20, -16, 25, 2, 25, 2, + -20, 5, -30, -22, 12, -29, -31, 16, -39, 82, + 86, 80, 81, -39, -39, -39, -39, -39, -39, -39, + -39, -39, -39, -39, -39, -39, -39, -39, -46, 92, + 93, -12, 15, -56, 15, 15, 15, -56, 15, -29, + -29, 21, 6, 2, -17, 22, -4, -6, 25, 2, + 62, 78, 63, 79, 64, 65, 66, 80, 81, 12, + 82, 47, 48, 51, 67, 18, 68, 83, 86, 69, + 70, 71, 72, 73, 90, 91, 59, 74, 75, 92, + 93, 85, 84, 22, 7, 7, 20, -2, 25, 2, + 25, 2, 26, 26, -31, 26, 41, 57, -23, 24, + 17, -24, 30, 28, 29, 35, 36, 37, 33, 31, + 34, 32, 38, -18, -18, -19, -18, -19, 15, 15, + 15, -56, 22, 22, -56, 22, -58, 21, 2, 22, + 7, 2, -39, -56, -28, 19, -28, 26, -28, -22, + -22, 24, 17, 2, 17, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 22, 22, -56, 22, + 7, 21, 2, 22, -4, 22, -28, 26, 26, 17, + -24, -27, 57, -28, -32, -32, -32, -29, -25, 14, + -25, -27, -25, -27, -11, 96, 97, 98, 99, 7, + -56, -28, -28, -28, -26, -32, -56, 22, 24, 21, + 2, 22, 21, -32, } var yyDef = [...]int16{ - 0, -2, 137, 137, 0, 0, 7, 6, 1, 137, + 0, -2, 138, 138, 0, 0, 7, 6, 1, 138, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, - 126, 127, 128, 129, 130, 131, 132, 133, 0, 2, - -2, 3, 4, 8, 9, 10, 11, 12, 13, 14, - 15, 16, 17, 18, 19, 20, 21, 22, 0, 113, - 244, 245, 0, 255, 0, 90, 91, 131, 0, 279, - -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, - -2, -2, -2, -2, 238, 239, 0, 5, 105, 0, - 136, 139, 0, 143, 147, 256, 148, 152, 46, 46, + 126, 127, 128, 129, 130, 131, 132, 133, 134, 0, + 2, -2, 3, 4, 8, 9, 10, 11, 12, 13, + 14, 15, 16, 17, 18, 19, 20, 21, 22, 0, + 113, 246, 247, 0, 257, 0, 90, 91, 131, 132, + 0, 284, -2, -2, -2, -2, -2, -2, -2, -2, + -2, -2, -2, -2, -2, -2, 240, 241, 0, 5, + 105, 0, 137, 140, 0, 144, 148, 258, 149, 153, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, - 46, 46, 46, 46, 0, 74, 75, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 25, 26, 0, 0, - 0, 64, 0, 22, 88, -2, 89, 0, 0, 0, - 94, 96, 0, 100, 104, 134, 0, 140, 0, 146, - 0, 151, 0, 45, 50, 51, 47, 0, 0, 0, + 46, 46, 46, 46, 46, 46, 0, 74, 75, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 25, 26, + 0, 0, 0, 64, 0, 22, 88, -2, 89, 0, + 0, 0, 0, 94, 96, 0, 100, 104, 135, 0, + 141, 0, 147, 0, 152, 0, 45, 50, 51, 47, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 72, 73, 259, 0, 0, 0, 266, 267, - 268, 0, 76, 0, 78, 250, 251, 79, 80, 246, - 247, 0, 0, 0, 87, 71, 269, 0, 0, 271, - 272, 273, 274, 275, 276, 23, 24, 27, 0, 57, - 28, 0, 66, 68, 70, 280, 277, 0, 92, 0, - 97, 0, 103, 240, 241, 242, 243, 0, 135, 138, - 141, 144, 142, 145, 150, 153, 155, 158, 162, 163, - 164, 0, 29, 0, 0, -2, -2, 30, 31, 32, - 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, - 43, 44, 260, 0, 0, 0, 270, 0, 0, 0, - 0, 248, 249, 81, 0, 86, 0, 56, 59, 61, - 62, 63, 206, 207, 208, 209, 210, 211, 212, 213, - 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, - 234, 235, 236, 237, 65, 69, 0, 93, 95, 98, - 102, 99, 101, 0, 0, 0, 0, 0, 0, 0, - 0, 168, 170, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 48, 49, 52, 258, 53, 0, - 0, 0, 261, 0, 77, 0, 83, 85, 54, 0, - 60, 67, 0, 154, 252, 156, 0, 159, 0, 0, - 0, 166, 171, 167, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 262, 0, 265, 0, 82, - 84, 55, 58, 278, 157, 0, 0, 165, 169, 172, - 0, 254, 173, 174, 175, 176, 177, 0, 178, 179, - 180, 181, 182, 188, 189, 190, 191, 0, 0, 160, - 161, 253, 0, 186, 0, 263, 0, 184, 187, 264, - 183, 185, + 0, 0, 0, 0, 0, 72, 73, 261, 0, 0, + 0, 0, 270, 271, 272, 0, 76, 0, 78, 252, + 253, 79, 80, 248, 249, 0, 0, 0, 87, 71, + 273, 0, 0, 0, 275, 276, 277, 278, 279, 280, + 23, 24, 27, 0, 57, 28, 0, 66, 68, 70, + 285, 281, 282, 0, 92, 0, 97, 0, 103, 242, + 243, 244, 245, 0, 136, 139, 142, 145, 143, 146, + 151, 154, 156, 159, 163, 164, 165, 0, 29, 0, + 0, -2, -2, 30, 31, 32, 33, 34, 35, 36, + 37, 38, 39, 40, 41, 42, 43, 44, 262, 0, + 0, 0, 0, 274, 0, 0, 0, 0, 0, 250, + 251, 81, 0, 86, 0, 56, 59, 61, 62, 63, + 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, + 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, + 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, + 237, 238, 239, 65, 69, 0, 93, 95, 98, 102, + 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, + 169, 171, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 48, 49, 52, 260, 53, 0, 0, + 0, 0, 263, 264, 0, 77, 0, 83, 85, 54, + 0, 60, 67, 0, 155, 254, 157, 0, 160, 0, + 0, 0, 167, 172, 168, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 265, 266, 0, 269, + 0, 82, 84, 55, 58, 283, 158, 0, 0, 166, + 170, 173, 0, 256, 174, 175, 176, 177, 178, 0, + 179, 180, 181, 182, 183, 189, 190, 191, 192, 0, + 0, 161, 162, 255, 0, 187, 0, 267, 0, 185, + 188, 268, 184, 186, } var yyTok1 = [...]int8{ @@ -769,7 +773,7 @@ var yyTok2 = [...]int8{ 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, - 102, 103, 104, 105, + 102, 103, 104, 105, 106, } var yyTok3 = [...]int8{ @@ -1434,7 +1438,7 @@ yydefault: case 73: yyDollar = yyS[yypt-3 : yypt+1] { - yylex.(*parser).unexpected("offset", "number, duration, or step()") + yylex.(*parser).unexpected("offset", "number, duration, step(), or range()") yyVAL.node = yyDollar[1].node } case 74: @@ -1541,7 +1545,7 @@ yydefault: case 85: yyDollar = yyS[yypt-5 : yypt+1] { - yylex.(*parser).unexpected("subquery selector", "number, duration, or step() or \"]\"") + yylex.(*parser).unexpected("subquery selector", "number, duration, step(), range(), or \"]\"") yyVAL.node = yyDollar[1].node } case 86: @@ -1553,7 +1557,7 @@ yydefault: case 87: yyDollar = yyS[yypt-3 : yypt+1] { - yylex.(*parser).unexpected("subquery or range selector", "number, duration, or step()") + yylex.(*parser).unexpected("subquery or range selector", "number, duration, step(), or range()") yyVAL.node = yyDollar[1].node } case 88: @@ -1691,82 +1695,82 @@ yydefault: { yyVAL.labels = yyDollar[1].labels } - case 134: + case 135: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.labels = labels.New(yyDollar[2].lblList...) } - case 135: + case 136: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.labels = labels.New(yyDollar[2].lblList...) } - case 136: + case 137: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.labels = labels.New() } - case 137: + case 138: yyDollar = yyS[yypt-0 : yypt+1] { yyVAL.labels = labels.New() } - case 138: + case 139: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.lblList = append(yyDollar[1].lblList, yyDollar[3].label) } - case 139: + case 140: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.lblList = []labels.Label{yyDollar[1].label} } - case 140: + case 141: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("label set", "\",\" or \"}\"") yyVAL.lblList = yyDollar[1].lblList } - case 141: + case 142: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.label = labels.Label{Name: yyDollar[1].item.Val, Value: yylex.(*parser).unquoteString(yyDollar[3].item.Val)} } - case 142: + case 143: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.label = labels.Label{Name: yyDollar[1].item.Val, Value: yylex.(*parser).unquoteString(yyDollar[3].item.Val)} } - case 143: + case 144: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.label = labels.Label{Name: labels.MetricName, Value: yyDollar[1].item.Val} } - case 144: + case 145: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).unexpected("label set", "string") yyVAL.label = labels.Label{} } - case 145: + case 146: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).unexpected("label set", "string") yyVAL.label = labels.Label{} } - case 146: + case 147: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("label set", "\"=\"") yyVAL.label = labels.Label{} } - case 147: + case 148: yyDollar = yyS[yypt-1 : yypt+1] { yylex.(*parser).unexpected("label set", "identifier or \"}\"") yyVAL.label = labels.Label{} } - case 148: + case 149: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).generatedParserResult = &seriesDescription{ @@ -1774,33 +1778,33 @@ yydefault: values: yyDollar[2].series, } } - case 149: + case 150: yyDollar = yyS[yypt-0 : yypt+1] { yyVAL.series = []SequenceValue{} } - case 150: + case 151: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = append(yyDollar[1].series, yyDollar[3].series...) } - case 151: + case 152: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.series = yyDollar[1].series } - case 152: + case 153: yyDollar = yyS[yypt-1 : yypt+1] { yylex.(*parser).unexpected("series values", "") yyVAL.series = nil } - case 153: + case 154: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.series = []SequenceValue{{Omitted: true}} } - case 154: + case 155: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1808,12 +1812,12 @@ yydefault: yyVAL.series = append(yyVAL.series, SequenceValue{Omitted: true}) } } - case 155: + case 156: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.series = []SequenceValue{{Value: yyDollar[1].float}} } - case 156: + case 157: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1822,7 +1826,7 @@ yydefault: yyVAL.series = append(yyVAL.series, SequenceValue{Value: yyDollar[1].float}) } } - case 157: + case 158: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1832,12 +1836,12 @@ yydefault: yyDollar[1].float += yyDollar[2].float } } - case 158: + case 159: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.series = []SequenceValue{{Histogram: yyDollar[1].histogram}} } - case 159: + case 160: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1847,7 +1851,7 @@ yydefault: //$1 += $2 } } - case 160: + case 161: yyDollar = yyS[yypt-5 : yypt+1] { val, err := yylex.(*parser).histogramsIncreaseSeries(yyDollar[1].histogram, yyDollar[3].histogram, yyDollar[5].uint) @@ -1856,7 +1860,7 @@ yydefault: } yyVAL.series = val } - case 161: + case 162: yyDollar = yyS[yypt-5 : yypt+1] { val, err := yylex.(*parser).histogramsDecreaseSeries(yyDollar[1].histogram, yyDollar[3].histogram, yyDollar[5].uint) @@ -1865,7 +1869,7 @@ yydefault: } yyVAL.series = val } - case 162: + case 163: yyDollar = yyS[yypt-1 : yypt+1] { if yyDollar[1].item.Val != "stale" { @@ -1873,130 +1877,130 @@ yydefault: } yyVAL.float = math.Float64frombits(value.StaleNaN) } - case 165: + case 166: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&yyDollar[2].descriptors) } - case 166: + case 167: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&yyDollar[2].descriptors) } - case 167: + case 168: yyDollar = yyS[yypt-3 : yypt+1] { m := yylex.(*parser).newMap() yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m) } - case 168: + case 169: yyDollar = yyS[yypt-2 : yypt+1] { m := yylex.(*parser).newMap() yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m) } - case 169: + case 170: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = *(yylex.(*parser).mergeMaps(&yyDollar[1].descriptors, &yyDollar[3].descriptors)) } - case 170: + case 171: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.descriptors = yyDollar[1].descriptors } - case 171: + case 172: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("histogram description", "histogram description key, e.g. buckets:[5 10 7]") } - case 172: + case 173: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["schema"] = yyDollar[3].int } - case 173: + case 174: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["sum"] = yyDollar[3].float } - case 174: + case 175: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["count"] = yyDollar[3].float } - case 175: + case 176: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["z_bucket"] = yyDollar[3].float } - case 176: + case 177: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["z_bucket_w"] = yyDollar[3].float } - case 177: + case 178: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["custom_values"] = yyDollar[3].bucket_set } - case 178: + case 179: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["buckets"] = yyDollar[3].bucket_set } - case 179: + case 180: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["offset"] = yyDollar[3].int } - case 180: + case 181: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["n_buckets"] = yyDollar[3].bucket_set } - case 181: + case 182: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["n_offset"] = yyDollar[3].int } - case 182: + case 183: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["counter_reset_hint"] = yyDollar[3].item } - case 183: + case 184: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.bucket_set = yyDollar[2].bucket_set } - case 184: + case 185: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.bucket_set = yyDollar[2].bucket_set } - case 185: + case 186: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.bucket_set = append(yyDollar[1].bucket_set, yyDollar[3].float) } - case 186: + case 187: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.bucket_set = []float64{yyDollar[1].float} } - case 244: + case 246: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.node = &NumberLiteral{ @@ -2004,7 +2008,7 @@ yydefault: PosRange: yyDollar[1].item.PositionRange(), } } - case 245: + case 247: yyDollar = yyS[yypt-1 : yypt+1] { var err error @@ -2019,12 +2023,12 @@ yydefault: Duration: true, } } - case 246: + case 248: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.float = yylex.(*parser).number(yyDollar[1].item.Val) } - case 247: + case 249: yyDollar = yyS[yypt-1 : yypt+1] { var err error @@ -2035,17 +2039,17 @@ yydefault: } yyVAL.float = dur.Seconds() } - case 248: + case 250: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.float = yyDollar[2].float } - case 249: + case 251: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.float = -yyDollar[2].float } - case 252: + case 254: yyDollar = yyS[yypt-1 : yypt+1] { var err error @@ -2054,17 +2058,17 @@ yydefault: yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid repetition in series values: %s", err) } } - case 253: + case 255: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.int = -int64(yyDollar[2].uint) } - case 254: + case 256: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.int = int64(yyDollar[1].uint) } - case 255: + case 257: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.node = &StringLiteral{ @@ -2072,7 +2076,7 @@ yydefault: PosRange: yyDollar[1].item.PositionRange(), } } - case 256: + case 258: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.item = Item{ @@ -2081,12 +2085,12 @@ yydefault: Val: yylex.(*parser).unquoteString(yyDollar[1].item.Val), } } - case 257: + case 259: yyDollar = yyS[yypt-0 : yypt+1] { yyVAL.strings = nil } - case 259: + case 261: yyDollar = yyS[yypt-1 : yypt+1] { nl := yyDollar[1].node.(*NumberLiteral) @@ -2097,7 +2101,7 @@ yydefault: } yyVAL.node = nl } - case 260: + case 262: yyDollar = yyS[yypt-2 : yypt+1] { nl := yyDollar[2].node.(*NumberLiteral) @@ -2112,7 +2116,7 @@ yydefault: nl.PosRange.Start = yyDollar[1].item.Pos yyVAL.node = nl } - case 261: + case 263: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = &DurationExpr{ @@ -2121,7 +2125,16 @@ yydefault: EndPos: yyDollar[3].item.PositionRange().End, } } - case 262: + case 264: + yyDollar = yyS[yypt-3 : yypt+1] + { + yyVAL.node = &DurationExpr{ + Op: RANGE, + StartPos: yyDollar[1].item.PositionRange().Start, + EndPos: yyDollar[3].item.PositionRange().End, + } + } + case 265: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.node = &DurationExpr{ @@ -2134,7 +2147,20 @@ yydefault: StartPos: yyDollar[1].item.Pos, } } - case 263: + case 266: + yyDollar = yyS[yypt-4 : yypt+1] + { + yyVAL.node = &DurationExpr{ + Op: yyDollar[1].item.Typ, + RHS: &DurationExpr{ + Op: RANGE, + StartPos: yyDollar[2].item.PositionRange().Start, + EndPos: yyDollar[4].item.PositionRange().End, + }, + StartPos: yyDollar[1].item.Pos, + } + } + case 267: yyDollar = yyS[yypt-6 : yypt+1] { yyVAL.node = &DurationExpr{ @@ -2145,7 +2171,7 @@ yydefault: RHS: yyDollar[5].node.(Expr), } } - case 264: + case 268: yyDollar = yyS[yypt-7 : yypt+1] { yyVAL.node = &DurationExpr{ @@ -2161,7 +2187,7 @@ yydefault: }, } } - case 265: + case 269: yyDollar = yyS[yypt-4 : yypt+1] { de := yyDollar[3].node.(*DurationExpr) @@ -2176,7 +2202,7 @@ yydefault: } yyVAL.node = yyDollar[3].node } - case 269: + case 273: yyDollar = yyS[yypt-1 : yypt+1] { nl := yyDollar[1].node.(*NumberLiteral) @@ -2187,7 +2213,7 @@ yydefault: } yyVAL.node = nl } - case 270: + case 274: yyDollar = yyS[yypt-2 : yypt+1] { switch expr := yyDollar[2].node.(type) { @@ -2220,25 +2246,25 @@ yydefault: break } } - case 271: + case 275: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr)) yyVAL.node = &DurationExpr{Op: ADD, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)} } - case 272: + case 276: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr)) yyVAL.node = &DurationExpr{Op: SUB, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)} } - case 273: + case 277: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr)) yyVAL.node = &DurationExpr{Op: MUL, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)} } - case 274: + case 278: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr)) @@ -2249,7 +2275,7 @@ yydefault: } yyVAL.node = &DurationExpr{Op: DIV, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)} } - case 275: + case 279: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr)) @@ -2260,13 +2286,13 @@ yydefault: } yyVAL.node = &DurationExpr{Op: MOD, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)} } - case 276: + case 280: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr)) yyVAL.node = &DurationExpr{Op: POW, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)} } - case 277: + case 281: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = &DurationExpr{ @@ -2275,7 +2301,16 @@ yydefault: EndPos: yyDollar[3].item.PositionRange().End, } } - case 278: + case 282: + yyDollar = yyS[yypt-3 : yypt+1] + { + yyVAL.node = &DurationExpr{ + Op: RANGE, + StartPos: yyDollar[1].item.PositionRange().Start, + EndPos: yyDollar[3].item.PositionRange().End, + } + } + case 283: yyDollar = yyS[yypt-6 : yypt+1] { yyVAL.node = &DurationExpr{ @@ -2286,7 +2321,7 @@ yydefault: RHS: yyDollar[5].node.(Expr), } } - case 280: + case 285: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).experimentalDurationExpr(yyDollar[2].node.(Expr)) diff --git a/promql/parser/lex.go b/promql/parser/lex.go index 296b91d1ae..ad4b685150 100644 --- a/promql/parser/lex.go +++ b/promql/parser/lex.go @@ -143,6 +143,7 @@ var key = map[string]ItemType{ "start": START, "end": END, "step": STEP, + "range": RANGE, } var histogramDesc = map[string]ItemType{ @@ -915,6 +916,9 @@ func (l *Lexer) scanDurationKeyword() bool { case "step": l.emit(STEP) return true + case "range": + l.emit(RANGE) + return true case "min": l.emit(MIN) return true @@ -1175,7 +1179,7 @@ func lexDurationExpr(l *Lexer) stateFn { case r == ',': l.emit(COMMA) return lexDurationExpr - case r == 's' || r == 'S' || r == 'm' || r == 'M': + case r == 's' || r == 'S' || r == 'm' || r == 'M' || r == 'r' || r == 'R': if l.scanDurationKeyword() { return lexDurationExpr } diff --git a/promql/parser/parse_test.go b/promql/parser/parse_test.go index b5d7c288d1..62349efd93 100644 --- a/promql/parser/parse_test.go +++ b/promql/parser/parse_test.go @@ -2708,7 +2708,7 @@ var testExpr = []struct { errors: ParseErrors{ ParseErr{ PositionRange: posrange.PositionRange{Start: 4, End: 5}, - Err: errors.New("unexpected \"]\" in subquery or range selector, expected number, duration, or step()"), + Err: errors.New("unexpected \"]\" in subquery or range selector, expected number, duration, step(), or range()"), Query: `foo[]`, }, }, @@ -2741,7 +2741,7 @@ var testExpr = []struct { errors: ParseErrors{ ParseErr{ PositionRange: posrange.PositionRange{Start: 22, End: 22}, - Err: errors.New("unexpected end of input in offset, expected number, duration, or step()"), + Err: errors.New("unexpected end of input in offset, expected number, duration, step(), or range()"), Query: `some_metric[5m] OFFSET`, }, }, @@ -4698,6 +4698,100 @@ var testExpr = []struct { }, }, }, + { + input: `foo[range()]`, + expected: &MatrixSelector{ + VectorSelector: &VectorSelector{ + Name: "foo", + LabelMatchers: []*labels.Matcher{ + MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"), + }, + PosRange: posrange.PositionRange{Start: 0, End: 3}, + }, + RangeExpr: &DurationExpr{ + Op: RANGE, + StartPos: 4, + EndPos: 11, + }, + EndPos: 12, + }, + }, + { + input: `foo[-range()]`, + expected: &MatrixSelector{ + VectorSelector: &VectorSelector{ + Name: "foo", + LabelMatchers: []*labels.Matcher{ + MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"), + }, + PosRange: posrange.PositionRange{Start: 0, End: 3}, + }, + RangeExpr: &DurationExpr{ + Op: SUB, + StartPos: 4, + RHS: &DurationExpr{Op: RANGE, StartPos: 5, EndPos: 12}, + }, + EndPos: 13, + }, + }, + { + input: `foo offset range()`, + expected: &VectorSelector{ + Name: "foo", + LabelMatchers: []*labels.Matcher{ + MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"), + }, + PosRange: posrange.PositionRange{Start: 0, End: 18}, + OriginalOffsetExpr: &DurationExpr{ + Op: RANGE, + StartPos: 11, + EndPos: 18, + }, + }, + }, + { + input: `foo offset -range()`, + expected: &VectorSelector{ + Name: "foo", + LabelMatchers: []*labels.Matcher{ + MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"), + }, + PosRange: posrange.PositionRange{Start: 0, End: 19}, + OriginalOffsetExpr: &DurationExpr{ + Op: SUB, + RHS: &DurationExpr{Op: RANGE, StartPos: 12, EndPos: 19}, + StartPos: 11, + }, + }, + }, + { + input: `foo[max(range(),5s)]`, + expected: &MatrixSelector{ + VectorSelector: &VectorSelector{ + Name: "foo", + LabelMatchers: []*labels.Matcher{ + MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"), + }, + PosRange: posrange.PositionRange{Start: 0, End: 3}, + }, + RangeExpr: &DurationExpr{ + Op: MAX, + LHS: &DurationExpr{ + Op: RANGE, + StartPos: 8, + EndPos: 15, + }, + RHS: &NumberLiteral{ + Val: 5, + Duration: true, + PosRange: posrange.PositionRange{Start: 16, End: 18}, + }, + StartPos: 4, + EndPos: 19, + }, + EndPos: 20, + }, + }, { input: `foo[4s+4s:1s*2] offset (5s-8)`, expected: &SubqueryExpr{ @@ -4942,7 +5036,7 @@ var testExpr = []struct { errors: ParseErrors{ ParseErr{ PositionRange: posrange.PositionRange{Start: 8, End: 9}, - Err: errors.New(`unexpected "]" in subquery or range selector, expected number, duration, or step()`), + Err: errors.New(`unexpected "]" in subquery or range selector, expected number, duration, step(), or range()`), Query: `foo[step]`, }, }, diff --git a/promql/parser/printer.go b/promql/parser/printer.go index 961167428b..2531bb6272 100644 --- a/promql/parser/printer.go +++ b/promql/parser/printer.go @@ -182,6 +182,8 @@ func (node *DurationExpr) writeTo(b *bytes.Buffer) { switch { case node.Op == STEP: b.WriteString("step()") + case node.Op == RANGE: + b.WriteString("range()") case node.Op == MIN: b.WriteString("min(") b.WriteString(node.LHS.String()) diff --git a/promql/parser/printer_test.go b/promql/parser/printer_test.go index b28da988da..b7fa3e6ccb 100644 --- a/promql/parser/printer_test.go +++ b/promql/parser/printer_test.go @@ -266,6 +266,21 @@ func TestExprString(t *testing.T) { { in: "foo[200 - min(step() + 10s, -max(step() ^ 2, 3))]", }, + { + in: "foo[range()]", + }, + { + in: "foo[-range()]", + }, + { + in: "foo offset range()", + }, + { + in: "foo offset -range()", + }, + { + in: "foo[max(range(), 5s)]", + }, { in: `predict_linear(foo[1h], 3000)`, }, diff --git a/promql/promqltest/test.go b/promql/promqltest/test.go index d4a11b9e50..83e47f1915 100644 --- a/promql/promqltest/test.go +++ b/promql/promqltest/test.go @@ -1519,6 +1519,10 @@ func (t *test) runInstantQuery(iq atModifierTestCase, cmd *evalCmd, engine promq // Check query returns same result in range mode, // by checking against the middle step. + // Skip this check for queries containing range() since it would resolve differently. + if strings.Contains(iq.expr, "range()") { + return nil + } q, err = engine.NewRangeQuery(t.context, t.storage, nil, iq.expr, iq.evalTime.Add(-time.Minute), iq.evalTime.Add(time.Minute), time.Minute) if err != nil { return fmt.Errorf("error creating range query for %q (line %d): %w", cmd.expr, cmd.line, err) diff --git a/promql/promqltest/testdata/duration_expression.test b/promql/promqltest/testdata/duration_expression.test index db8253777b..e58b34131b 100644 --- a/promql/promqltest/testdata/duration_expression.test +++ b/promql/promqltest/testdata/duration_expression.test @@ -225,4 +225,27 @@ eval range from 50s to 60s step 5s metric1_total offset max(3s,min(step(), 1s))+ {} 8047 8052 8057 eval range from 50s to 60s step 5s metric1_total offset -(min(step(), 2s)-5)+8000 - {} 8047 8052 8057 \ No newline at end of file + {} 8047 8052 8057 + +# Test range() function - resolves to query range (end - start). +# For a range query from 50s to 60s, range() = 10s. +eval range from 50s to 60s step 10s count_over_time(metric1_total[range()]) + {} 10 10 + +eval range from 50s to 60s step 5s count_over_time(metric1_total[range()]) + {} 10 10 10 + +eval range from 50s to 60s step 5s metric1_total offset range() + metric1_total{} 40 45 50 + +eval range from 50s to 60s step 5s metric1_total offset min(range(), 8s) + metric1_total{} 42 47 52 + +clear + +load 1s + metric1_total 0+1x100 + +# For an instant query (start == end), range() = 0s, offset 0s. +eval instant at 50s metric1_total offset range() + metric1_total{} 50 diff --git a/scrape/helpers_test.go b/scrape/helpers_test.go index ff7a7bf65a..dd5179b360 100644 --- a/scrape/helpers_test.go +++ b/scrape/helpers_test.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -17,240 +17,127 @@ import ( "bytes" "context" "encoding/binary" - "fmt" - "math" - "strings" - "sync" + "net/http" "testing" + "time" "github.com/gogo/protobuf/proto" dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" - "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/pool" + "github.com/prometheus/prometheus/util/teststorage" ) -type nopAppendable struct{} +// For readability. +type sample = teststorage.Sample -func (nopAppendable) Appender(context.Context) storage.Appender { - return nopAppender{} -} - -type nopAppender struct{} - -func (nopAppender) SetOptions(*storage.AppendOptions) {} - -func (nopAppender) Append(storage.SeriesRef, labels.Labels, int64, float64) (storage.SeriesRef, error) { - return 1, nil -} - -func (nopAppender) AppendExemplar(storage.SeriesRef, labels.Labels, exemplar.Exemplar) (storage.SeriesRef, error) { - return 2, nil -} - -func (nopAppender) AppendHistogram(storage.SeriesRef, labels.Labels, int64, *histogram.Histogram, *histogram.FloatHistogram) (storage.SeriesRef, error) { - return 3, nil -} - -func (nopAppender) AppendHistogramSTZeroSample(storage.SeriesRef, labels.Labels, int64, int64, *histogram.Histogram, *histogram.FloatHistogram) (storage.SeriesRef, error) { - return 0, nil -} - -func (nopAppender) UpdateMetadata(storage.SeriesRef, labels.Labels, metadata.Metadata) (storage.SeriesRef, error) { - return 4, nil -} - -func (nopAppender) AppendSTZeroSample(storage.SeriesRef, labels.Labels, int64, int64) (storage.SeriesRef, error) { - return 5, nil -} - -func (nopAppender) Commit() error { return nil } -func (nopAppender) Rollback() error { return nil } - -type floatSample struct { - metric labels.Labels - t int64 - f float64 -} - -func equalFloatSamples(a, b floatSample) bool { - // Compare Float64bits so NaN values which are exactly the same will compare equal. - return labels.Equal(a.metric, b.metric) && a.t == b.t && math.Float64bits(a.f) == math.Float64bits(b.f) -} - -type histogramSample struct { - metric labels.Labels - t int64 - h *histogram.Histogram - fh *histogram.FloatHistogram -} - -type metadataEntry struct { - m metadata.Metadata - metric labels.Labels -} - -func metadataEntryEqual(a, b metadataEntry) bool { - if !labels.Equal(a.metric, b.metric) { - return false - } - if a.m.Type != b.m.Type { - return false - } - if a.m.Unit != b.m.Unit { - return false - } - if a.m.Help != b.m.Help { - return false - } - return true -} - -type collectResultAppendable struct { - *collectResultAppender -} - -func (a *collectResultAppendable) Appender(context.Context) storage.Appender { - return a -} - -// collectResultAppender records all samples that were added through the appender. -// It can be used as its zero value or be backed by another appender it writes samples through. -type collectResultAppender struct { - mtx sync.Mutex - - next storage.Appender - resultFloats []floatSample - pendingFloats []floatSample - rolledbackFloats []floatSample - resultHistograms []histogramSample - pendingHistograms []histogramSample - rolledbackHistograms []histogramSample - resultExemplars []exemplar.Exemplar - pendingExemplars []exemplar.Exemplar - resultMetadata []metadataEntry - pendingMetadata []metadataEntry -} - -func (*collectResultAppender) SetOptions(*storage.AppendOptions) {} - -func (a *collectResultAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) { - a.mtx.Lock() - defer a.mtx.Unlock() - a.pendingFloats = append(a.pendingFloats, floatSample{ - metric: lset, - t: t, - f: v, - }) - - if a.next == nil { - if ref == 0 { - // Use labels hash as a stand-in for unique series reference, to avoid having to track all series. - ref = storage.SeriesRef(lset.Hash()) - } - return ref, nil - } - - ref, err := a.next.Append(ref, lset, t, v) - if err != nil { - return 0, err +func withCtx(ctx context.Context) func(sl *scrapeLoop) { + return func(sl *scrapeLoop) { + sl.ctx = ctx } - return ref, nil -} - -func (a *collectResultAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { - a.mtx.Lock() - defer a.mtx.Unlock() - a.pendingExemplars = append(a.pendingExemplars, e) - if a.next == nil { - return 0, nil - } - - return a.next.AppendExemplar(ref, l, e) -} - -func (a *collectResultAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { - a.mtx.Lock() - defer a.mtx.Unlock() - a.pendingHistograms = append(a.pendingHistograms, histogramSample{h: h, fh: fh, t: t, metric: l}) - if a.next == nil { - return 0, nil - } - - return a.next.AppendHistogram(ref, l, t, h, fh) } -func (a *collectResultAppender) AppendHistogramSTZeroSample(ref storage.SeriesRef, l labels.Labels, _, st int64, h *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) { - if h != nil { - return a.AppendHistogram(ref, l, st, &histogram.Histogram{}, nil) +func withAppendable(appendable storage.Appendable) func(sl *scrapeLoop) { + return func(sl *scrapeLoop) { + sl.appendable = appendable } - return a.AppendHistogram(ref, l, st, nil, &histogram.FloatHistogram{}) } -func (a *collectResultAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) { - a.mtx.Lock() - defer a.mtx.Unlock() - a.pendingMetadata = append(a.pendingMetadata, metadataEntry{metric: l, m: m}) - if a.next == nil { - if ref == 0 { - ref = storage.SeriesRef(l.Hash()) - } - return ref, nil - } - - return a.next.UpdateMetadata(ref, l, m) -} - -func (a *collectResultAppender) AppendSTZeroSample(ref storage.SeriesRef, l labels.Labels, _, st int64) (storage.SeriesRef, error) { - return a.Append(ref, l, st, 0.0) -} - -func (a *collectResultAppender) Commit() error { - a.mtx.Lock() - defer a.mtx.Unlock() - a.resultFloats = append(a.resultFloats, a.pendingFloats...) - a.resultExemplars = append(a.resultExemplars, a.pendingExemplars...) - a.resultHistograms = append(a.resultHistograms, a.pendingHistograms...) - a.resultMetadata = append(a.resultMetadata, a.pendingMetadata...) - a.pendingFloats = nil - a.pendingExemplars = nil - a.pendingHistograms = nil - a.pendingMetadata = nil - if a.next == nil { - return nil - } - return a.next.Commit() -} - -func (a *collectResultAppender) Rollback() error { - a.mtx.Lock() - defer a.mtx.Unlock() - a.rolledbackFloats = a.pendingFloats - a.rolledbackHistograms = a.pendingHistograms - a.pendingFloats = nil - a.pendingHistograms = nil - if a.next == nil { - return nil +// newTestScrapeLoop is the initial scrape loop for all tests. +// It returns scrapeLoop and mock scraper you can customize. +// +// It's recommended to use withXYZ functions for simple option customizations, e.g: +// +// appTest := teststorage.NewAppendable() +// sl, _ := newTestScrapeLoop(t, withAppendable(appTest)) +// +// However, when changing more than one scrapeLoop options it's more readable to have one explicit opt function: +// +// ctx, cancel := context.WithCancel(t.Context()) +// appTest := teststorage.NewAppendable() +// sl, scraper := newTestScrapeLoop(t, func(sl *scrapeLoop) { +// sl.ctx = ctx +// sl.appendable = appTest +// // Since we're writing samples directly below we need to provide a protocol fallback. +// sl.fallbackScrapeProtocol = "text/plain" +// }) +// +// NOTE: Try to NOT add more parameter to this function. Try to NOT add more +// newTestScrapeLoop-like constructors. It should be flexible enough with scrapeLoop +// used for initial options. +func newTestScrapeLoop(t testing.TB, opts ...func(sl *scrapeLoop)) (_ *scrapeLoop, scraper *testScraper) { + metrics := newTestScrapeMetrics(t) + sl := &scrapeLoop{ + stopped: make(chan struct{}), + + l: promslog.NewNopLogger(), + cache: newScrapeCache(metrics), + + interval: 10 * time.Millisecond, + timeout: 1 * time.Hour, + sampleMutator: nopMutator, + reportSampleMutator: nopMutator, + + appendable: teststorage.NewAppendable(), + buffers: pool.New(1e3, 1e6, 3, func(sz int) any { return make([]byte, 0, sz) }), + metrics: metrics, + maxSchema: histogram.ExponentialSchemaMax, + honorTimestamps: true, + enableCompression: true, + validationScheme: model.UTF8Validation, + symbolTable: labels.NewSymbolTable(), + appendMetadataToWAL: true, // Tests assumes it's enabled, unless explicitly turned off. } - return a.next.Rollback() -} - -func (a *collectResultAppender) String() string { - var sb strings.Builder - for _, s := range a.resultFloats { - sb.WriteString(fmt.Sprintf("committed: %s %f %d\n", s.metric, s.f, s.t)) + for _, o := range opts { + o(sl) } - for _, s := range a.pendingFloats { - sb.WriteString(fmt.Sprintf("pending: %s %f %d\n", s.metric, s.f, s.t)) + // Validate user opts for convenience. + require.Nil(t, sl.parentCtx, "newTestScrapeLoop does not support injecting non-nil parent context") + require.Nil(t, sl.appenderCtx, "newTestScrapeLoop does not support injecting non-nil appender context") + require.Nil(t, sl.cancel, "newTestScrapeLoop does not support injecting custom cancel function") + require.Nil(t, sl.scraper, "newTestScrapeLoop does not support injecting scraper, it's mocked, use the returned scraper") + + rootCtx := t.Context() + // Use sl.ctx for context injection. + // True contexts (sl.appenderCtx, sl.parentCtx, sl.ctx) are populated from it + if sl.ctx != nil { + rootCtx = sl.ctx } - for _, s := range a.rolledbackFloats { - sb.WriteString(fmt.Sprintf("rolledback: %s %f %d\n", s.metric, s.f, s.t)) + ctx, cancel := context.WithCancel(rootCtx) + sl.ctx = ctx + sl.cancel = cancel + sl.appenderCtx = rootCtx + sl.parentCtx = rootCtx + + scraper = &testScraper{} + sl.scraper = scraper + return sl, scraper +} + +func newTestScrapePool(t *testing.T, injectNewLoop func(options scrapeLoopOptions) loop) *scrapePool { + return &scrapePool{ + ctx: t.Context(), + cancel: func() {}, + logger: promslog.NewNopLogger(), + config: &config.ScrapeConfig{}, + options: &Options{}, + client: http.DefaultClient, + + activeTargets: map[uint64]*Target{}, + loops: map[uint64]loop{}, + injectTestNewLoop: injectNewLoop, + + appendable: teststorage.NewAppendable(), + symbolTable: labels.NewSymbolTable(), + metrics: newTestScrapeMetrics(t), } - return sb.String() } // protoMarshalDelimited marshals a MetricFamily into a delimited diff --git a/scrape/manager.go b/scrape/manager.go index 9bb6988df9..bd68c186c0 100644 --- a/scrape/manager.go +++ b/scrape/manager.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -39,8 +39,8 @@ import ( "github.com/prometheus/prometheus/util/pool" ) -// NewManager is the Manager constructor. -func NewManager(o *Options, logger *slog.Logger, newScrapeFailureLogger func(string) (*logging.JSONFileLogger, error), app storage.Appendable, registerer prometheus.Registerer) (*Manager, error) { +// NewManager is the Manager constructor using Appendable. +func NewManager(o *Options, logger *slog.Logger, newScrapeFailureLogger func(string) (*logging.JSONFileLogger, error), appendable storage.Appendable, registerer prometheus.Registerer) (*Manager, error) { if o == nil { o = &Options{} } @@ -54,7 +54,7 @@ func NewManager(o *Options, logger *slog.Logger, newScrapeFailureLogger func(str } m := &Manager{ - append: app, + appendable: appendable, opts: o, logger: logger, newScrapeFailureLogger: newScrapeFailureLogger, @@ -87,15 +87,15 @@ type Options struct { // Option to enable appending of scraped Metadata to the TSDB/other appenders. Individual appenders // can decide what to do with metadata, but for practical purposes this flag exists so that metadata // can be written to the WAL and thus read for remote write. - // TODO: implement some form of metadata storage AppendMetadata bool // Option to increase the interval used by scrape manager to throttle target groups updates. DiscoveryReloadInterval model.Duration + // Option to enable the ingestion of the created timestamp as a synthetic zero sample. // See: https://github.com/prometheus/proposals/blob/main/proposals/2023-06-13_created-timestamp.md EnableStartTimestampZeroIngestion bool - // EnableTypeAndUnitLabels + // EnableTypeAndUnitLabels represents type-and-unit-labels feature flag. EnableTypeAndUnitLabels bool // Optional HTTP client options to use when scraping. @@ -111,9 +111,11 @@ type Options struct { // Manager maintains a set of scrape pools and manages start/stop cycles // when receiving new target groups from the discovery manager. type Manager struct { - opts *Options - logger *slog.Logger - append storage.Appendable + opts *Options + logger *slog.Logger + + appendable storage.Appendable + graceShut chan struct{} offsetSeed uint64 // Global offsetSeed seed is used to spread scrape workload across HA setup. @@ -194,7 +196,7 @@ func (m *Manager) reload() { continue } m.metrics.targetScrapePools.Inc() - sp, err := newScrapePool(scrapeConfig, m.append, m.offsetSeed, m.logger.With("scrape_pool", setName), m.buffers, m.opts, m.metrics) + sp, err := newScrapePool(scrapeConfig, m.appendable, m.offsetSeed, m.logger.With("scrape_pool", setName), m.buffers, m.opts, m.metrics) if err != nil { m.metrics.targetScrapePoolsFailed.Inc() m.logger.Error("error creating new scrape pool", "err", err, "scrape_pool", setName) diff --git a/scrape/manager_test.go b/scrape/manager_test.go index 1ec4875d19..d4898eb996 100644 --- a/scrape/manager_test.go +++ b/scrape/manager_test.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -51,6 +51,7 @@ import ( "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/util/runutil" + "github.com/prometheus/prometheus/util/teststorage" "github.com/prometheus/prometheus/util/testutil" ) @@ -527,21 +528,12 @@ scrape_configs: ch <- struct{}{} return noopLoop() } - sp := &scrapePool{ - appendable: &nopAppendable{}, - activeTargets: map[uint64]*Target{ - 1: {}, - }, - loops: map[uint64]loop{ - 1: noopLoop(), - }, - newLoop: newLoop, - logger: nil, - config: cfg1.ScrapeConfigs[0], - client: http.DefaultClient, - metrics: scrapeManager.metrics, - symbolTable: labels.NewSymbolTable(), - } + sp := newTestScrapePool(t, newLoop) + sp.activeTargets[1] = &Target{} + sp.loops[1] = noopLoop() + sp.config = cfg1.ScrapeConfigs[0] + sp.metrics = scrapeManager.metrics + scrapeManager.scrapePools = map[string]*scrapePool{ "job1": sp, } @@ -691,18 +683,11 @@ scrape_configs: for _, sc := range cfg.ScrapeConfigs { _, cancel := context.WithCancel(context.Background()) defer cancel() - sp := &scrapePool{ - appendable: &nopAppendable{}, - activeTargets: map[uint64]*Target{}, - loops: map[uint64]loop{ - 1: noopLoop(), - }, - newLoop: newLoop, - logger: nil, - config: sc, - client: http.DefaultClient, - cancel: cancel, - } + + sp := newTestScrapePool(t, newLoop) + sp.loops[1] = noopLoop() + sp.config = cfg1.ScrapeConfigs[0] + sp.metrics = scrapeManager.metrics for _, c := range sc.ServiceDiscoveryConfigs { staticConfig := c.(discovery.StaticConfig) for _, group := range staticConfig { @@ -764,7 +749,7 @@ func TestManagerSTZeroIngestion(t *testing.T) { for _, testWithST := range []bool{false, true} { t.Run(fmt.Sprintf("withST=%v", testWithST), func(t *testing.T) { for _, testSTZeroIngest := range []bool{false, true} { - t.Run(fmt.Sprintf("ctZeroIngest=%v", testSTZeroIngest), func(t *testing.T) { + t.Run(fmt.Sprintf("stZeroIngest=%v", testSTZeroIngest), func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -777,11 +762,11 @@ func TestManagerSTZeroIngestion(t *testing.T) { // TODO(bwplotka): Add more types than just counter? encoded := prepareTestEncodedCounter(t, testFormat, expectedMetricName, expectedSampleValue, sampleTs, stTs) - app := &collectResultAppender{} + app := teststorage.NewAppendable() discoveryManager, scrapeManager := runManagers(t, ctx, &Options{ EnableStartTimestampZeroIngestion: testSTZeroIngest, skipOffsetting: true, - }, &collectResultAppendable{app}) + }, app) defer scrapeManager.Stop() server := setupTestServer(t, config.ScrapeProtocolsHeaders[testFormat], encoded) @@ -806,11 +791,8 @@ scrape_configs: ctx, cancel = context.WithTimeout(ctx, 1*time.Minute) defer cancel() require.NoError(t, runutil.Retry(100*time.Millisecond, ctx.Done(), func() error { - app.mtx.Lock() - defer app.mtx.Unlock() - // Check if scrape happened and grab the relevant samples. - if len(app.resultFloats) > 0 { + if len(app.ResultSamples()) > 0 { return nil } return errors.New("expected some float samples, got none") @@ -818,32 +800,32 @@ scrape_configs: // Verify results. // Verify what we got vs expectations around ST injection. - samples := findSamplesForMetric(app.resultFloats, expectedMetricName) + got := findSamplesForMetric(app.ResultSamples(), expectedMetricName) if testWithST && testSTZeroIngest { - require.Len(t, samples, 2) - require.Equal(t, 0.0, samples[0].f) - require.Equal(t, timestamp.FromTime(stTs), samples[0].t) - require.Equal(t, expectedSampleValue, samples[1].f) - require.Equal(t, timestamp.FromTime(sampleTs), samples[1].t) + require.Len(t, got, 2) + require.Equal(t, 0.0, got[0].V) + require.Equal(t, timestamp.FromTime(stTs), got[0].T) + require.Equal(t, expectedSampleValue, got[1].V) + require.Equal(t, timestamp.FromTime(sampleTs), got[1].T) } else { - require.Len(t, samples, 1) - require.Equal(t, expectedSampleValue, samples[0].f) - require.Equal(t, timestamp.FromTime(sampleTs), samples[0].t) + require.Len(t, got, 1) + require.Equal(t, expectedSampleValue, got[0].V) + require.Equal(t, timestamp.FromTime(sampleTs), got[0].T) } // Verify what we got vs expectations around additional _created series for OM text. // enableSTZeroInjection also kills that _created line. - createdSeriesSamples := findSamplesForMetric(app.resultFloats, expectedCreatedMetricName) + gotSTSeries := findSamplesForMetric(app.ResultSamples(), expectedCreatedMetricName) if testFormat == config.OpenMetricsText1_0_0 && testWithST && !testSTZeroIngest { // For OM Text, when counter has ST, and feature flag disabled we should see _created lines. - require.Len(t, createdSeriesSamples, 1) + require.Len(t, gotSTSeries, 1) // Conversion taken from common/expfmt.writeOpenMetricsFloat. // We don't check the st timestamp as explicit ts was not implemented in expfmt.Encoder, // but exists in OM https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#:~:text=An%20example%20with%20a%20Metric%20with%20no%20labels%2C%20and%20a%20MetricPoint%20with%20a%20timestamp%20and%20a%20created // We can implement this, but we want to potentially get rid of OM 1.0 ST lines - require.Equal(t, float64(timestamppb.New(stTs).AsTime().UnixNano())/1e9, createdSeriesSamples[0].f) + require.Equal(t, float64(timestamppb.New(stTs).AsTime().UnixNano())/1e9, gotSTSeries[0].V) } else { - require.Empty(t, createdSeriesSamples) + require.Empty(t, gotSTSeries) } }) } @@ -885,9 +867,9 @@ func prepareTestEncodedCounter(t *testing.T, format config.ScrapeProtocol, mName } } -func findSamplesForMetric(floats []floatSample, metricName string) (ret []floatSample) { +func findSamplesForMetric(floats []sample, metricName string) (ret []sample) { for _, f := range floats { - if f.metric.Get(model.MetricNameLabel) == metricName { + if f.L.Get(model.MetricNameLabel) == metricName { ret = append(ret, f) } } @@ -964,11 +946,11 @@ func TestManagerSTZeroIngestionHistogram(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - app := &collectResultAppender{} + app := teststorage.NewAppendable() discoveryManager, scrapeManager := runManagers(t, ctx, &Options{ EnableStartTimestampZeroIngestion: tc.enableSTZeroIngestion, skipOffsetting: true, - }, &collectResultAppendable{app}) + }, app) defer scrapeManager.Stop() once := sync.Once{} @@ -1012,43 +994,33 @@ scrape_configs: `, serverURL.Host) applyConfig(t, testConfig, scrapeManager, discoveryManager) - var got []histogramSample - // Wait for one scrape. ctx, cancel = context.WithTimeout(ctx, 1*time.Minute) defer cancel() require.NoError(t, runutil.Retry(100*time.Millisecond, ctx.Done(), func() error { - app.mtx.Lock() - defer app.mtx.Unlock() - - // Check if scrape happened and grab the relevant histograms, they have to be there - or it's a bug - // and it's not worth waiting. - for _, h := range app.resultHistograms { - if h.metric.Get(model.MetricNameLabel) == mName { - got = append(got, h) - } - } - if len(app.resultHistograms) > 0 { + if len(app.ResultSamples()) > 0 { return nil } return errors.New("expected some histogram samples, got none") }), "after 1 minute") + got := findSamplesForMetric(app.ResultSamples(), mName) + // Check for zero samples, assuming we only injected always one histogram sample. // Did it contain ST to inject? If yes, was ST zero enabled? if tc.inputHistSample.CreatedTimestamp.IsValid() && tc.enableSTZeroIngestion { require.Len(t, got, 2) // Zero sample. - require.Equal(t, histogram.Histogram{}, *got[0].h) + require.Equal(t, histogram.Histogram{}, *got[0].H) // Quick soft check to make sure it's the same sample or at least not zero. - require.Equal(t, tc.inputHistSample.GetSampleSum(), got[1].h.Sum) + require.Equal(t, tc.inputHistSample.GetSampleSum(), got[1].H.Sum) return } // Expect only one, valid sample. require.Len(t, got, 1) // Quick soft check to make sure it's the same sample or at least not zero. - require.Equal(t, tc.inputHistSample.GetSampleSum(), got[0].h.Sum) + require.Equal(t, tc.inputHistSample.GetSampleSum(), got[0].H.Sum) }) } } @@ -1083,11 +1055,11 @@ func TestNHCBAndSTZeroIngestion(t *testing.T) { ctx := t.Context() - app := &collectResultAppender{} + app := teststorage.NewAppendable() discoveryManager, scrapeManager := runManagers(t, ctx, &Options{ EnableStartTimestampZeroIngestion: true, skipOffsetting: true, - }, &collectResultAppendable{app}) + }, app) defer scrapeManager.Stop() once := sync.Once{} @@ -1146,33 +1118,19 @@ scrape_configs: return exists }, 5*time.Second, 100*time.Millisecond, "scrape pool should be created for job 'test'") - // Helper function to get matching histograms to avoid race conditions. - getMatchingHistograms := func() []histogramSample { - app.mtx.Lock() - defer app.mtx.Unlock() - - var got []histogramSample - for _, h := range app.resultHistograms { - if h.metric.Get(model.MetricNameLabel) == mName { - got = append(got, h) - } - } - return got - } - require.Eventually(t, func() bool { - return len(getMatchingHistograms()) > 0 + return len(app.ResultSamples()) > 0 }, 1*time.Minute, 100*time.Millisecond, "expected histogram samples, got none") // Verify that samples were ingested (proving both features work together). - got := getMatchingHistograms() + got := findSamplesForMetric(app.ResultSamples(), mName) // With ST zero ingestion enabled and a created timestamp present, we expect 2 samples: // one zero sample and one actual sample. require.Len(t, got, 2, "expected 2 histogram samples (zero sample + actual sample)") - require.Equal(t, histogram.Histogram{}, *got[0].h, "first sample should be zero sample") - require.InDelta(t, expectedHistogramSum, got[1].h.Sum, 1e-9, "second sample should retain the expected sum") - require.Len(t, app.resultExemplars, 2, "expected 2 exemplars from histogram buckets") + require.Equal(t, histogram.Histogram{}, *got[0].H, "first sample should be zero sample") + require.InDelta(t, expectedHistogramSum, got[1].H.Sum, 1e-9, "second sample should retain the expected sum") + require.Len(t, got[1].ES, 2, "expected 2 exemplars on second histogram") } func applyConfig( @@ -1203,7 +1161,7 @@ func runManagers(t *testing.T, ctx context.Context, opts *Options, app storage.A } opts.DiscoveryReloadInterval = model.Duration(100 * time.Millisecond) if app == nil { - app = nopAppendable{} + app = teststorage.NewAppendable() } reg := prometheus.NewRegistry() @@ -1601,7 +1559,7 @@ scrape_configs: cfg := loadConfiguration(t, cfgText) - m, err := NewManager(&Options{}, nil, nil, &nopAppendable{}, prometheus.NewRegistry()) + m, err := NewManager(&Options{}, nil, nil, teststorage.NewAppendable(), prometheus.NewRegistry()) require.NoError(t, err) defer m.Stop() require.NoError(t, m.ApplyConfig(cfg)) diff --git a/scrape/scrape.go b/scrape/scrape.go index b653873bad..33683b4caf 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -1,4 +1,4 @@ -// Copyright 2016 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -59,6 +59,8 @@ import ( "github.com/prometheus/prometheus/util/pool" ) +var aOptionRejectEarlyOOO = storage.AppendOptions{DiscardOutOfOrder: true} + // ScrapeTimestampTolerance is the tolerance for scrape appends timestamps // alignment, to enable better compression at the TSDB level. // See https://github.com/prometheus/prometheus/issues/7846 @@ -67,7 +69,7 @@ var ScrapeTimestampTolerance = 2 * time.Millisecond // AlignScrapeTimestamps enables the tolerance for scrape appends timestamps described above. var AlignScrapeTimestamps = true -var errNameLabelMandatory = fmt.Errorf("missing metric name (%s label)", labels.MetricName) +var errNameLabelMandatory = fmt.Errorf("missing metric name (%s label)", model.MetricNameLabel) var _ FailureLogger = (*logging.JSONFileLogger)(nil) @@ -82,8 +84,9 @@ type FailureLogger interface { type scrapePool struct { appendable storage.Appendable logger *slog.Logger + ctx context.Context cancel context.CancelFunc - httpOpts []config_util.HTTPClientOption + options *Options // mtx must not be taken after targetMtx. mtx sync.Mutex @@ -102,16 +105,15 @@ type scrapePool struct { droppedTargets []*Target // Subject to KeepDroppedTargets limit. droppedTargetsCount int // Count of all dropped targets. - // Constructor for new scrape loops. This is settable for testing convenience. - newLoop func(scrapeLoopOptions) loop + // newLoop injection for testing purposes. + injectTestNewLoop func(scrapeLoopOptions) loop - metrics *scrapeMetrics + metrics *scrapeMetrics + buffers *pool.Pool + offsetSeed uint64 scrapeFailureLogger FailureLogger scrapeFailureLoggerMtx sync.RWMutex - - validationScheme model.ValidationScheme - escapingScheme model.EscapingScheme } type labelLimits struct { @@ -120,118 +122,80 @@ type labelLimits struct { labelValueLengthLimit int } -type scrapeLoopOptions struct { - target *Target - scraper scraper - sampleLimit int - bucketLimit int - maxSchema int32 - labelLimits *labelLimits - honorLabels bool - honorTimestamps bool - trackTimestampsStaleness bool - interval time.Duration - timeout time.Duration - scrapeNativeHist bool - alwaysScrapeClassicHist bool - convertClassicHistToNHCB bool - fallbackScrapeProtocol string - - mrc []*relabel.Config - cache *scrapeCache - enableCompression bool -} - const maxAheadTime = 10 * time.Minute // returning an empty label set is interpreted as "drop". type labelsMutator func(labels.Labels) labels.Labels -func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed uint64, logger *slog.Logger, buffers *pool.Pool, options *Options, metrics *scrapeMetrics) (*scrapePool, error) { +// scrapeLoopAppendAdapter allows support for multiple storage.Appender versions. +type scrapeLoopAppendAdapter interface { + Commit() error + Rollback() error + + addReportSample(s reportSample, t int64, v float64, b *labels.Builder, rejectOOO bool) error + append(b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error) +} + +func newScrapePool( + cfg *config.ScrapeConfig, + appendable storage.Appendable, + offsetSeed uint64, + logger *slog.Logger, + buffers *pool.Pool, + options *Options, + metrics *scrapeMetrics, +) (*scrapePool, error) { if logger == nil { logger = promslog.NewNopLogger() } + if buffers == nil { + buffers = pool.New(1e3, 1e6, 3, func(sz int) any { return make([]byte, 0, sz) }) + } client, err := newScrapeClient(cfg.HTTPClientConfig, cfg.JobName, options.HTTPClientOptions...) if err != nil { return nil, err } + // Validate scheme so we don't need to do it later. + // We also do it on scrapePool.reload(...) + // TODO(bwplotka): Can we move it to scrape config validation? if err := namevalidationutil.CheckNameValidationScheme(cfg.MetricNameValidationScheme); err != nil { return nil, errors.New("newScrapePool: MetricNameValidationScheme must be set in scrape configuration") } - var escapingScheme model.EscapingScheme - escapingScheme, err = config.ToEscapingScheme(cfg.MetricNameEscapingScheme, cfg.MetricNameValidationScheme) - if err != nil { + if _, err = config.ToEscapingScheme(cfg.MetricNameEscapingScheme, cfg.MetricNameValidationScheme); err != nil { return nil, fmt.Errorf("invalid metric name escaping scheme, %w", err) } + symbols := labels.NewSymbolTable() ctx, cancel := context.WithCancel(context.Background()) sp := &scrapePool{ + appendable: appendable, + logger: logger, + ctx: ctx, cancel: cancel, - appendable: app, + options: options, config: cfg, client: client, - activeTargets: map[uint64]*Target{}, loops: map[uint64]loop{}, - symbolTable: labels.NewSymbolTable(), + symbolTable: symbols, lastSymbolTableCheck: time.Now(), - logger: logger, + activeTargets: map[uint64]*Target{}, metrics: metrics, - httpOpts: options.HTTPClientOptions, - validationScheme: cfg.MetricNameValidationScheme, - escapingScheme: escapingScheme, - } - sp.newLoop = func(opts scrapeLoopOptions) loop { - // Update the targets retrieval function for metadata to a new scrape cache. - cache := opts.cache - if cache == nil { - cache = newScrapeCache(metrics) - } - opts.target.SetMetadataStore(cache) - - return newScrapeLoop( - ctx, - opts.scraper, - logger.With("target", opts.target), - buffers, - func(l labels.Labels) labels.Labels { - return mutateSampleLabels(l, opts.target, opts.honorLabels, opts.mrc) - }, - func(l labels.Labels) labels.Labels { return mutateReportSampleLabels(l, opts.target) }, - func(ctx context.Context) storage.Appender { return app.Appender(ctx) }, - cache, - sp.symbolTable, - offsetSeed, - opts.honorTimestamps, - opts.trackTimestampsStaleness, - opts.enableCompression, - opts.sampleLimit, - opts.bucketLimit, - opts.maxSchema, - opts.labelLimits, - opts.interval, - opts.timeout, - opts.alwaysScrapeClassicHist, - opts.convertClassicHistToNHCB, - cfg.ScrapeNativeHistogramsEnabled(), - options.EnableStartTimestampZeroIngestion, - options.EnableTypeAndUnitLabels, - options.ExtraMetrics, - options.AppendMetadata, - opts.target, - options.PassMetadataInContext, - metrics, - options.skipOffsetting, - sp.validationScheme, - sp.escapingScheme, - opts.fallbackScrapeProtocol, - ) + buffers: buffers, + offsetSeed: offsetSeed, } sp.metrics.targetScrapePoolTargetLimit.WithLabelValues(sp.config.JobName).Set(float64(sp.config.TargetLimit)) return sp, nil } +func (sp *scrapePool) newLoop(opts scrapeLoopOptions) loop { + if sp.injectTestNewLoop != nil { + return sp.injectTestNewLoop(opts) + } + return newScrapeLoop(opts) +} + func (sp *scrapePool) ActiveTargets() []*Target { sp.targetMtx.Lock() defer sp.targetMtx.Unlock() @@ -323,7 +287,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error { sp.metrics.targetScrapePoolReloads.Inc() start := time.Now() - client, err := newScrapeClient(cfg.HTTPClientConfig, cfg.JobName, sp.httpOpts...) + client, err := newScrapeClient(cfg.HTTPClientConfig, cfg.JobName, sp.options.HTTPClientOptions...) if err != nil { sp.metrics.targetScrapePoolReloadsFailed.Inc() return err @@ -333,17 +297,14 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error { sp.config = cfg oldClient := sp.client sp.client = client + + // Validate scheme so we don't need to do it later. if err := namevalidationutil.CheckNameValidationScheme(cfg.MetricNameValidationScheme); err != nil { return errors.New("scrapePool.reload: MetricNameValidationScheme must be set in scrape configuration") } - sp.validationScheme = cfg.MetricNameValidationScheme - var escapingScheme model.EscapingScheme - escapingScheme, err = model.ToEscapingScheme(cfg.MetricNameEscapingScheme) - if err != nil { - return fmt.Errorf("invalid metric name escaping scheme, %w", err) + if _, err = config.ToEscapingScheme(cfg.MetricNameEscapingScheme, cfg.MetricNameValidationScheme); err != nil { + return fmt.Errorf("scrapePool.reload: invalid metric name escaping scheme, %w", err) } - sp.escapingScheme = escapingScheme - sp.metrics.targetScrapePoolTargetLimit.WithLabelValues(sp.config.JobName).Set(float64(sp.config.TargetLimit)) sp.restartLoops(reuseCache) @@ -355,30 +316,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error { } func (sp *scrapePool) restartLoops(reuseCache bool) { - var ( - wg sync.WaitGroup - interval = time.Duration(sp.config.ScrapeInterval) - timeout = time.Duration(sp.config.ScrapeTimeout) - bodySizeLimit = int64(sp.config.BodySizeLimit) - sampleLimit = int(sp.config.SampleLimit) - bucketLimit = int(sp.config.NativeHistogramBucketLimit) - maxSchema = pickSchema(sp.config.NativeHistogramMinBucketFactor) - labelLimits = &labelLimits{ - labelLimit: int(sp.config.LabelLimit), - labelNameLengthLimit: int(sp.config.LabelNameLengthLimit), - labelValueLengthLimit: int(sp.config.LabelValueLengthLimit), - } - honorLabels = sp.config.HonorLabels - honorTimestamps = sp.config.HonorTimestamps - enableCompression = sp.config.EnableCompression - trackTimestampsStaleness = sp.config.TrackTimestampsStaleness - mrc = sp.config.MetricRelabelConfigs - fallbackScrapeProtocol = sp.config.ScrapeFallbackProtocol.HeaderMediaType() - scrapeNativeHist = sp.config.ScrapeNativeHistogramsEnabled() - alwaysScrapeClassicHist = sp.config.AlwaysScrapeClassicHistogramsEnabled() - convertClassicHistToNHCB = sp.config.ConvertClassicHistogramsToNHCBEnabled() - ) - + var wg sync.WaitGroup sp.targetMtx.Lock() forcedErr := sp.refreshTargetLimitErr() @@ -392,38 +330,27 @@ func (sp *scrapePool) restartLoops(reuseCache bool) { } t := sp.activeTargets[fp] - targetInterval, targetTimeout, err := t.intervalAndTimeout(interval, timeout) - var ( - s = &targetScraper{ + targetInterval, targetTimeout, err := t.intervalAndTimeout( + time.Duration(sp.config.ScrapeInterval), + time.Duration(sp.config.ScrapeTimeout), + ) + escapingScheme, _ := config.ToEscapingScheme(sp.config.MetricNameEscapingScheme, sp.config.MetricNameValidationScheme) + newLoop := sp.newLoop(scrapeLoopOptions{ + target: t, + scraper: &targetScraper{ Target: t, client: sp.client, timeout: targetTimeout, - bodySizeLimit: bodySizeLimit, - acceptHeader: acceptHeader(sp.config.ScrapeProtocols, sp.escapingScheme), - acceptEncodingHeader: acceptEncodingHeader(enableCompression), + bodySizeLimit: int64(sp.config.BodySizeLimit), + acceptHeader: acceptHeader(sp.config.ScrapeProtocols, escapingScheme), + acceptEncodingHeader: acceptEncodingHeader(sp.config.EnableCompression), metrics: sp.metrics, - } - newLoop = sp.newLoop(scrapeLoopOptions{ - target: t, - scraper: s, - sampleLimit: sampleLimit, - bucketLimit: bucketLimit, - maxSchema: maxSchema, - labelLimits: labelLimits, - honorLabels: honorLabels, - honorTimestamps: honorTimestamps, - enableCompression: enableCompression, - trackTimestampsStaleness: trackTimestampsStaleness, - mrc: mrc, - cache: cache, - interval: targetInterval, - timeout: targetTimeout, - fallbackScrapeProtocol: fallbackScrapeProtocol, - scrapeNativeHist: scrapeNativeHist, - alwaysScrapeClassicHist: alwaysScrapeClassicHist, - convertClassicHistToNHCB: convertClassicHistToNHCB, - }) - ) + }, + cache: cache, + interval: targetInterval, + timeout: targetTimeout, + sp: sp, + }) if err != nil { newLoop.setForcedError(err) } @@ -516,31 +443,10 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) { // scrape loops for new targets, and stops scrape loops for disappeared targets. // It returns after all stopped scrape loops terminated. func (sp *scrapePool) sync(targets []*Target) { - var ( - uniqueLoops = make(map[uint64]loop) - interval = time.Duration(sp.config.ScrapeInterval) - timeout = time.Duration(sp.config.ScrapeTimeout) - bodySizeLimit = int64(sp.config.BodySizeLimit) - sampleLimit = int(sp.config.SampleLimit) - bucketLimit = int(sp.config.NativeHistogramBucketLimit) - maxSchema = pickSchema(sp.config.NativeHistogramMinBucketFactor) - labelLimits = &labelLimits{ - labelLimit: int(sp.config.LabelLimit), - labelNameLengthLimit: int(sp.config.LabelNameLengthLimit), - labelValueLengthLimit: int(sp.config.LabelValueLengthLimit), - } - honorLabels = sp.config.HonorLabels - honorTimestamps = sp.config.HonorTimestamps - enableCompression = sp.config.EnableCompression - trackTimestampsStaleness = sp.config.TrackTimestampsStaleness - mrc = sp.config.MetricRelabelConfigs - fallbackScrapeProtocol = sp.config.ScrapeFallbackProtocol.HeaderMediaType() - scrapeNativeHist = sp.config.ScrapeNativeHistogramsEnabled() - alwaysScrapeClassicHist = sp.config.AlwaysScrapeClassicHistogramsEnabled() - convertClassicHistToNHCB = sp.config.ConvertClassicHistogramsToNHCBEnabled() - ) + uniqueLoops := make(map[uint64]loop) sp.targetMtx.Lock() + escapingScheme, _ := config.ToEscapingScheme(sp.config.MetricNameEscapingScheme, sp.config.MetricNameValidationScheme) for _, t := range targets { hash := t.hash() @@ -549,34 +455,25 @@ func (sp *scrapePool) sync(targets []*Target) { // so whether changed via relabeling or not, they'll exist and hold the correct values // for every target. var err error - interval, timeout, err = t.intervalAndTimeout(interval, timeout) - s := &targetScraper{ - Target: t, - client: sp.client, - timeout: timeout, - bodySizeLimit: bodySizeLimit, - acceptHeader: acceptHeader(sp.config.ScrapeProtocols, sp.escapingScheme), - acceptEncodingHeader: acceptEncodingHeader(enableCompression), - metrics: sp.metrics, - } + targetInterval, targetTimeout, err := t.intervalAndTimeout( + time.Duration(sp.config.ScrapeInterval), + time.Duration(sp.config.ScrapeTimeout), + ) l := sp.newLoop(scrapeLoopOptions{ - target: t, - scraper: s, - sampleLimit: sampleLimit, - bucketLimit: bucketLimit, - maxSchema: maxSchema, - labelLimits: labelLimits, - honorLabels: honorLabels, - honorTimestamps: honorTimestamps, - enableCompression: enableCompression, - trackTimestampsStaleness: trackTimestampsStaleness, - mrc: mrc, - interval: interval, - timeout: timeout, - scrapeNativeHist: scrapeNativeHist, - alwaysScrapeClassicHist: alwaysScrapeClassicHist, - convertClassicHistToNHCB: convertClassicHistToNHCB, - fallbackScrapeProtocol: fallbackScrapeProtocol, + target: t, + scraper: &targetScraper{ + Target: t, + client: sp.client, + timeout: targetTimeout, + bodySizeLimit: int64(sp.config.BodySizeLimit), + acceptHeader: acceptHeader(sp.config.ScrapeProtocols, escapingScheme), + acceptEncodingHeader: acceptEncodingHeader(sp.config.EnableCompression), + metrics: sp.metrics, + }, + cache: newScrapeCache(sp.metrics), + interval: targetInterval, + timeout: targetTimeout, + sp: sp, }) if err != nil { l.setForcedError(err) @@ -661,7 +558,7 @@ func verifyLabelLimits(lset labels.Labels, limits *labelLimits) error { return nil } - met := lset.Get(labels.MetricName) + met := lset.Get(model.MetricNameLabel) if limits.labelLimit > 0 { nbLabels := lset.Len() if nbLabels > limits.labelLimit { @@ -749,8 +646,8 @@ func mutateReportSampleLabels(lset labels.Labels, target *Target) labels.Labels return lb.Labels() } -// appender returns an appender for ingested samples from the target. -func appender(app storage.Appender, sampleLimit, bucketLimit int, maxSchema int32) storage.Appender { +// appenderWithLimits returns an appender with additional validation. +func appenderWithLimits(app storage.Appender, sampleLimit, bucketLimit int, maxSchema int32) storage.Appender { app = &timeLimitAppender{ Appender: app, maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)), @@ -927,55 +824,63 @@ type cacheEntry struct { } type scrapeLoop struct { - scraper scraper - l *slog.Logger - scrapeFailureLogger FailureLogger - scrapeFailureLoggerMtx sync.RWMutex - cache *scrapeCache - lastScrapeSize int - buffers *pool.Pool - offsetSeed uint64 - honorTimestamps bool - trackTimestampsStaleness bool - enableCompression bool - forcedErr error - forcedErrMtx sync.Mutex - sampleLimit int - bucketLimit int - maxSchema int32 - labelLimits *labelLimits - interval time.Duration - timeout time.Duration - validationScheme model.ValidationScheme - escapingScheme model.EscapingScheme - - alwaysScrapeClassicHist bool - convertClassicHistToNHCB bool - enableSTZeroIngestion bool - enableTypeAndUnitLabels bool - fallbackScrapeProtocol string - - enableNativeHistogramScraping bool - - appender func(ctx context.Context) storage.Appender - symbolTable *labels.SymbolTable - sampleMutator labelsMutator - reportSampleMutator labelsMutator - - parentCtx context.Context - appenderCtx context.Context + // Parameters. ctx context.Context cancel func() stopped chan struct{} + parentCtx context.Context + appenderCtx context.Context + l *slog.Logger + cache *scrapeCache - disabledEndOfRunStalenessMarkers atomic.Bool - - reportExtraMetrics bool - appendMetadataToWAL bool - - metrics *scrapeMetrics + interval time.Duration + timeout time.Duration + sampleMutator labelsMutator + reportSampleMutator labelsMutator + scraper scraper + + // Static params per scrapePool. + appendable storage.Appendable + buffers *pool.Pool + offsetSeed uint64 + symbolTable *labels.SymbolTable + metrics *scrapeMetrics + + // Options from config.ScrapeConfig. + sampleLimit int + bucketLimit int + maxSchema int32 + labelLimits *labelLimits + honorLabels bool + honorTimestamps bool + trackTimestampsStaleness bool + enableNativeHistogramScraping bool + alwaysScrapeClassicHist bool + convertClassicHistToNHCB bool + fallbackScrapeProtocol string + enableCompression bool + mrc []*relabel.Config + validationScheme model.ValidationScheme + + // Options from scrape.Options. + enableSTZeroIngestion bool + enableTypeAndUnitLabels bool + reportExtraMetrics bool + appendMetadataToWAL bool + passMetadataInContext bool + skipOffsetting bool // For testability. + + // error injection through setForcedError. + forcedErr error + forcedErrMtx sync.Mutex + + // Special logger set on setScrapeFailureLogger + scrapeFailureLoggerMtx sync.RWMutex + scrapeFailureLogger FailureLogger - skipOffsetting bool // For testability. + // Locally cached data. + lastScrapeSize int + disabledEndOfRunStalenessMarkers atomic.Bool } // scrapeCache tracks mappings of exposed metric strings to label sets and @@ -1000,8 +905,8 @@ type scrapeCache struct { seriesCur map[storage.SeriesRef]*cacheEntry seriesPrev map[storage.SeriesRef]*cacheEntry - // TODO(bwplotka): Consider moving Metadata API to use WAL instead of scrape loop to - // avoid locking (using metadata API can block scraping). + // TODO(bwplotka): Consider moving metadata caching to head. See + // https://github.com/prometheus/prometheus/issues/17619. metaMtx sync.Mutex // Mutex is needed due to api touching it when metadata is queried. metadata map[string]*metaEntry // metadata by metric family name. @@ -1236,99 +1141,87 @@ func (c *scrapeCache) LengthMetadata() int { return len(c.metadata) } -func newScrapeLoop(ctx context.Context, - sc scraper, - l *slog.Logger, - buffers *pool.Pool, - sampleMutator labelsMutator, - reportSampleMutator labelsMutator, - appender func(ctx context.Context) storage.Appender, - cache *scrapeCache, - symbolTable *labels.SymbolTable, - offsetSeed uint64, - honorTimestamps bool, - trackTimestampsStaleness bool, - enableCompression bool, - sampleLimit int, - bucketLimit int, - maxSchema int32, - labelLimits *labelLimits, - interval time.Duration, - timeout time.Duration, - alwaysScrapeClassicHist bool, - convertClassicHistToNHCB bool, - enableNativeHistogramScraping bool, - enableSTZeroIngestion bool, - enableTypeAndUnitLabels bool, - reportExtraMetrics bool, - appendMetadataToWAL bool, - target *Target, - passMetadataInContext bool, - metrics *scrapeMetrics, - skipOffsetting bool, - validationScheme model.ValidationScheme, - escapingScheme model.EscapingScheme, - fallbackScrapeProtocol string, -) *scrapeLoop { - if l == nil { - l = promslog.NewNopLogger() - } - if buffers == nil { - buffers = pool.New(1e3, 1e6, 3, func(sz int) any { return make([]byte, 0, sz) }) - } - if cache == nil { - cache = newScrapeCache(metrics) - } +// scrapeLoopOptions contains static options that do not change per scrapePool lifecycle. +type scrapeLoopOptions struct { + target *Target + scraper scraper + cache *scrapeCache + interval, timeout time.Duration + + sp *scrapePool +} - appenderCtx := ctx +// newScrapeLoop constructs new scrapeLoop. +// NOTE: Technically this could be a scrapePool method, but it's a standalone function to make it clear scrapeLoop +// can be used outside scrapePool lifecycle (e.g. in tests). +func newScrapeLoop(opts scrapeLoopOptions) *scrapeLoop { + // Update the targets retrieval function for metadata to a new target. + opts.target.SetMetadataStore(opts.cache) - if passMetadataInContext { + appenderCtx := opts.sp.ctx + if opts.sp.options.PassMetadataInContext { // Store the cache and target in the context. This is then used by downstream OTel Collector // to lookup the metadata required to process the samples. Not used by Prometheus itself. // TODO(gouthamve) We're using a dedicated context because using the parentCtx caused a memory // leak. We should ideally fix the main leak. See: https://github.com/prometheus/prometheus/pull/10590 - appenderCtx = ContextWithMetricMetadataStore(appenderCtx, cache) - appenderCtx = ContextWithTarget(appenderCtx, target) - } - - sl := &scrapeLoop{ - scraper: sc, - buffers: buffers, - cache: cache, - appender: appender, - symbolTable: symbolTable, - sampleMutator: sampleMutator, - reportSampleMutator: reportSampleMutator, - stopped: make(chan struct{}), - offsetSeed: offsetSeed, - l: l, - parentCtx: ctx, - appenderCtx: appenderCtx, - honorTimestamps: honorTimestamps, - trackTimestampsStaleness: trackTimestampsStaleness, - enableCompression: enableCompression, - sampleLimit: sampleLimit, - bucketLimit: bucketLimit, - maxSchema: maxSchema, - labelLimits: labelLimits, - interval: interval, - timeout: timeout, - alwaysScrapeClassicHist: alwaysScrapeClassicHist, - convertClassicHistToNHCB: convertClassicHistToNHCB, - enableSTZeroIngestion: enableSTZeroIngestion, - enableTypeAndUnitLabels: enableTypeAndUnitLabels, - fallbackScrapeProtocol: fallbackScrapeProtocol, - enableNativeHistogramScraping: enableNativeHistogramScraping, - reportExtraMetrics: reportExtraMetrics, - appendMetadataToWAL: appendMetadataToWAL, - metrics: metrics, - skipOffsetting: skipOffsetting, - validationScheme: validationScheme, - escapingScheme: escapingScheme, - } - sl.ctx, sl.cancel = context.WithCancel(ctx) - - return sl + // TODO(bwplotka): Remove once OpenTelemetry collector uses AppenderV2 (add issue) + appenderCtx = ContextWithMetricMetadataStore(appenderCtx, opts.cache) + appenderCtx = ContextWithTarget(appenderCtx, opts.target) + } + + ctx, cancel := context.WithCancel(opts.sp.ctx) + return &scrapeLoop{ + ctx: ctx, + cancel: cancel, + stopped: make(chan struct{}), + parentCtx: opts.sp.ctx, + appenderCtx: appenderCtx, + l: opts.sp.logger.With("target", opts.target), + cache: opts.cache, + + interval: opts.interval, + timeout: opts.timeout, + sampleMutator: func(l labels.Labels) labels.Labels { + return mutateSampleLabels(l, opts.target, opts.sp.config.HonorLabels, opts.sp.config.MetricRelabelConfigs) + }, + reportSampleMutator: func(l labels.Labels) labels.Labels { return mutateReportSampleLabels(l, opts.target) }, + scraper: opts.scraper, + + // Static params per scrapePool. + appendable: opts.sp.appendable, + buffers: opts.sp.buffers, + offsetSeed: opts.sp.offsetSeed, + symbolTable: opts.sp.symbolTable, + metrics: opts.sp.metrics, + + // config.ScrapeConfig. + sampleLimit: int(opts.sp.config.SampleLimit), + bucketLimit: int(opts.sp.config.NativeHistogramBucketLimit), + maxSchema: pickSchema(opts.sp.config.NativeHistogramMinBucketFactor), + labelLimits: &labelLimits{ + labelLimit: int(opts.sp.config.LabelLimit), + labelNameLengthLimit: int(opts.sp.config.LabelNameLengthLimit), + labelValueLengthLimit: int(opts.sp.config.LabelValueLengthLimit), + }, + honorLabels: opts.sp.config.HonorLabels, + honorTimestamps: opts.sp.config.HonorTimestamps, + trackTimestampsStaleness: opts.sp.config.TrackTimestampsStaleness, + enableNativeHistogramScraping: opts.sp.config.ScrapeNativeHistogramsEnabled(), + alwaysScrapeClassicHist: opts.sp.config.AlwaysScrapeClassicHistogramsEnabled(), + convertClassicHistToNHCB: opts.sp.config.ConvertClassicHistogramsToNHCBEnabled(), + fallbackScrapeProtocol: opts.sp.config.ScrapeFallbackProtocol.HeaderMediaType(), + enableCompression: opts.sp.config.EnableCompression, + mrc: opts.sp.config.MetricRelabelConfigs, + validationScheme: opts.sp.config.MetricNameValidationScheme, + + // scrape.Options. + enableSTZeroIngestion: opts.sp.options.EnableStartTimestampZeroIngestion, + enableTypeAndUnitLabels: opts.sp.options.EnableTypeAndUnitLabels, + reportExtraMetrics: opts.sp.options.ExtraMetrics, + appendMetadataToWAL: opts.sp.options.AppendMetadata, + passMetadataInContext: opts.sp.options.PassMetadataInContext, + skipOffsetting: opts.sp.options.skipOffsetting, + } } func (sl *scrapeLoop) setScrapeFailureLogger(l FailureLogger) { @@ -1407,6 +1300,11 @@ mainLoop: } } +func (sl *scrapeLoop) appender() scrapeLoopAppendAdapter { + // NOTE(bwplotka): Add AppenderV2 implementation, see https://github.com/prometheus/prometheus/issues/17632. + return &scrapeLoopAppender{scrapeLoop: sl, Appender: sl.appendable.Appender(sl.appenderCtx)} +} + // scrapeAndReport performs a scrape and then appends the result to the storage // together with reporting metrics, by using as few appenders as possible. // In the happy scenario, a single appender is used. @@ -1428,10 +1326,10 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er var total, added, seriesAdded, bytesRead int var err, appErr, scrapeErr error - app := sl.appender(sl.appenderCtx) + app := sl.appender() defer func() { if err != nil { - app.Rollback() + _ = app.Rollback() return } err = app.Commit() @@ -1449,9 +1347,9 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er if forcedErr := sl.getForcedError(); forcedErr != nil { scrapeErr = forcedErr // Add stale markers. - if _, _, _, err := sl.append(app, []byte{}, "", appendTime); err != nil { - app.Rollback() - app = sl.appender(sl.appenderCtx) + if _, _, _, err := app.append([]byte{}, "", appendTime); err != nil { + _ = app.Rollback() + app = sl.appender() sl.l.Warn("Append failed", "err", err) } if errc != nil { @@ -1507,16 +1405,16 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er // A failed scrape is the same as an empty scrape, // we still call sl.append to trigger stale markers. - total, added, seriesAdded, appErr = sl.append(app, b, contentType, appendTime) + total, added, seriesAdded, appErr = app.append(b, contentType, appendTime) if appErr != nil { - app.Rollback() - app = sl.appender(sl.appenderCtx) + _ = app.Rollback() + app = sl.appender() sl.l.Debug("Append failed", "err", appErr) // The append failed, probably due to a parse error or sample limit. // Call sl.append again with an empty scrape to trigger stale markers. - if _, _, _, err := sl.append(app, []byte{}, "", appendTime); err != nil { - app.Rollback() - app = sl.appender(sl.appenderCtx) + if _, _, _, err := app.append([]byte{}, "", appendTime); err != nil { + _ = app.Rollback() + app = sl.appender() sl.l.Warn("Append failed", "err", err) } } @@ -1586,11 +1484,11 @@ func (sl *scrapeLoop) endOfRunStaleness(last time.Time, ticker *time.Ticker, int // If the target has since been recreated and scraped, the // stale markers will be out of order and ignored. // sl.context would have been cancelled, hence using sl.appenderCtx. - app := sl.appender(sl.appenderCtx) + app := sl.appender() var err error defer func() { if err != nil { - app.Rollback() + _ = app.Rollback() return } err = app.Commit() @@ -1598,9 +1496,9 @@ func (sl *scrapeLoop) endOfRunStaleness(last time.Time, ticker *time.Ticker, int sl.l.Warn("Stale commit failed", "err", err) } }() - if _, _, _, err = sl.append(app, []byte{}, "", staleTime); err != nil { - app.Rollback() - app = sl.appender(sl.appenderCtx) + if _, _, _, err = app.append([]byte{}, "", staleTime); err != nil { + _ = app.Rollback() + app = sl.appender() sl.l.Warn("Stale append failed", "err", err) } if err = sl.reportStale(app, staleTime); err != nil { @@ -1634,7 +1532,7 @@ type appendErrors struct { func (sl *scrapeLoop) updateStaleMarkers(app storage.Appender, defTime int64) (err error) { sl.cache.forEachStale(func(ref storage.SeriesRef, lset labels.Labels) bool { // Series no longer exposed, mark it stale. - app.SetOptions(&storage.AppendOptions{DiscardOutOfOrder: true}) + app.SetOptions(&aOptionRejectEarlyOOO) _, err = app.Append(ref, lset, defTime, math.Float64frombits(value.StaleNaN)) app.SetOptions(nil) switch { @@ -1648,12 +1546,20 @@ func (sl *scrapeLoop) updateStaleMarkers(app storage.Appender, defTime int64) (e return err } -func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error) { +type scrapeLoopAppender struct { + *scrapeLoop + + storage.Appender +} + +var _ scrapeLoopAppendAdapter = &scrapeLoopAppender{} + +func (sl *scrapeLoopAppender) append(b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error) { defTime := timestamp.FromTime(ts) if len(b) == 0 { // Empty scrape. Just update the stale makers and swap the cache (but don't flush it). - err = sl.updateStaleMarkers(app, defTime) + err = sl.updateStaleMarkers(sl.Appender, defTime) sl.cache.iterDone(false) return total, added, seriesAdded, err } @@ -1696,7 +1602,7 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, exemplars := make([]exemplar.Exemplar, 0, 1) // Take an appender with limits. - app = appender(app, sl.sampleLimit, sl.bucketLimit, sl.maxSchema) + app := appenderWithLimits(sl.Appender, sl.sampleLimit, sl.bucketLimit, sl.maxSchema) defer func() { if err != nil { @@ -1785,7 +1691,7 @@ loop: continue } - if !lset.Has(labels.MetricName) { + if !lset.Has(model.MetricNameLabel) { err = errNameLabelMandatory break loop } @@ -1859,7 +1765,7 @@ loop: // But make sure we only do this if we have a cache entry (ce) for our series. sl.cache.trackStaleness(ref, ce) } - if sampleAdded && sampleLimitErr == nil && bucketLimitErr == nil { + if sampleLimitErr == nil && bucketLimitErr == nil { seriesAdded++ } } @@ -1917,7 +1823,7 @@ loop: // In majority cases we can trust that the current series/histogram is matching the lastMeta and lastMFName. // However, optional TYPE etc metadata and broken OM text can break this, detect those cases here. // TODO(bwplotka): Consider moving this to parser as many parser users end up doing this (e.g. ST and NHCB parsing). - if isSeriesPartOfFamily(lset.Get(labels.MetricName), lastMFName, lastMeta.Type) { + if isSeriesPartOfFamily(lset.Get(model.MetricNameLabel), lastMFName, lastMeta.Type) { if _, merr := app.UpdateMetadata(ref, lset, lastMeta.Metadata); merr != nil { // No need to fail the scrape on errors appending metadata. sl.l.Debug("Error when appending metadata in scrape loop", "ref", fmt.Sprintf("%d", ref), "metadata", fmt.Sprintf("%+v", lastMeta.Metadata), "err", merr) @@ -2029,7 +1935,7 @@ func isSeriesPartOfFamily(mName string, mfName []byte, typ model.MetricType) boo // during normal operation (e.g., accidental cardinality explosion, sudden traffic spikes). // Current case ordering prevents exercising other cases when limits are exceeded. // Remaining error cases typically occur only a few times, often during initial setup. -func (sl *scrapeLoop) checkAddError(met []byte, err error, sampleLimitErr, bucketLimitErr *error, appErrs *appendErrors) (bool, error) { +func (sl *scrapeLoop) checkAddError(met []byte, err error, sampleLimitErr, bucketLimitErr *error, appErrs *appendErrors) (sampleAdded bool, _ error) { switch { case err == nil: return true, nil @@ -2141,7 +2047,7 @@ var ( } ) -func (sl *scrapeLoop) report(app storage.Appender, start time.Time, duration time.Duration, scraped, added, seriesAdded, bytes int, scrapeErr error) (err error) { +func (sl *scrapeLoop) report(app scrapeLoopAppendAdapter, start time.Time, duration time.Duration, scraped, added, seriesAdded, bytes int, scrapeErr error) (err error) { sl.scraper.Report(start, duration, scrapeErr) ts := timestamp.FromTime(start) @@ -2152,71 +2058,70 @@ func (sl *scrapeLoop) report(app storage.Appender, start time.Time, duration tim } b := labels.NewBuilderWithSymbolTable(sl.symbolTable) - if err = sl.addReportSample(app, scrapeHealthMetric, ts, health, b); err != nil { + if err = app.addReportSample(scrapeHealthMetric, ts, health, b, false); err != nil { return err } - if err = sl.addReportSample(app, scrapeDurationMetric, ts, duration.Seconds(), b); err != nil { + if err = app.addReportSample(scrapeDurationMetric, ts, duration.Seconds(), b, false); err != nil { return err } - if err = sl.addReportSample(app, scrapeSamplesMetric, ts, float64(scraped), b); err != nil { + if err = app.addReportSample(scrapeSamplesMetric, ts, float64(scraped), b, false); err != nil { return err } - if err = sl.addReportSample(app, samplesPostRelabelMetric, ts, float64(added), b); err != nil { + if err = app.addReportSample(samplesPostRelabelMetric, ts, float64(added), b, false); err != nil { return err } - if err = sl.addReportSample(app, scrapeSeriesAddedMetric, ts, float64(seriesAdded), b); err != nil { + if err = app.addReportSample(scrapeSeriesAddedMetric, ts, float64(seriesAdded), b, false); err != nil { return err } if sl.reportExtraMetrics { - if err = sl.addReportSample(app, scrapeTimeoutMetric, ts, sl.timeout.Seconds(), b); err != nil { + if err = app.addReportSample(scrapeTimeoutMetric, ts, sl.timeout.Seconds(), b, false); err != nil { return err } - if err = sl.addReportSample(app, scrapeSampleLimitMetric, ts, float64(sl.sampleLimit), b); err != nil { + if err = app.addReportSample(scrapeSampleLimitMetric, ts, float64(sl.sampleLimit), b, false); err != nil { return err } - if err = sl.addReportSample(app, scrapeBodySizeBytesMetric, ts, float64(bytes), b); err != nil { + if err = app.addReportSample(scrapeBodySizeBytesMetric, ts, float64(bytes), b, false); err != nil { return err } } return err } -func (sl *scrapeLoop) reportStale(app storage.Appender, start time.Time) (err error) { +func (sl *scrapeLoop) reportStale(app scrapeLoopAppendAdapter, start time.Time) (err error) { ts := timestamp.FromTime(start) - app.SetOptions(&storage.AppendOptions{DiscardOutOfOrder: true}) stale := math.Float64frombits(value.StaleNaN) b := labels.NewBuilder(labels.EmptyLabels()) - if err = sl.addReportSample(app, scrapeHealthMetric, ts, stale, b); err != nil { + if err = app.addReportSample(scrapeHealthMetric, ts, stale, b, true); err != nil { return err } - if err = sl.addReportSample(app, scrapeDurationMetric, ts, stale, b); err != nil { + if err = app.addReportSample(scrapeDurationMetric, ts, stale, b, true); err != nil { return err } - if err = sl.addReportSample(app, scrapeSamplesMetric, ts, stale, b); err != nil { + if err = app.addReportSample(scrapeSamplesMetric, ts, stale, b, true); err != nil { return err } - if err = sl.addReportSample(app, samplesPostRelabelMetric, ts, stale, b); err != nil { + if err = app.addReportSample(samplesPostRelabelMetric, ts, stale, b, true); err != nil { return err } - if err = sl.addReportSample(app, scrapeSeriesAddedMetric, ts, stale, b); err != nil { + if err = app.addReportSample(scrapeSeriesAddedMetric, ts, stale, b, true); err != nil { return err } if sl.reportExtraMetrics { - if err = sl.addReportSample(app, scrapeTimeoutMetric, ts, stale, b); err != nil { + if err = app.addReportSample(scrapeTimeoutMetric, ts, stale, b, true); err != nil { return err } - if err = sl.addReportSample(app, scrapeSampleLimitMetric, ts, stale, b); err != nil { + if err = app.addReportSample(scrapeSampleLimitMetric, ts, stale, b, true); err != nil { return err } - if err = sl.addReportSample(app, scrapeBodySizeBytesMetric, ts, stale, b); err != nil { + if err = app.addReportSample(scrapeBodySizeBytesMetric, ts, stale, b, true); err != nil { return err } } return err } -func (sl *scrapeLoop) addReportSample(app storage.Appender, s reportSample, t int64, v float64, b *labels.Builder) error { +func (sl *scrapeLoopAppender) addReportSample(s reportSample, t int64, v float64, b *labels.Builder, rejectOOO bool) (err error) { ce, ok, _ := sl.cache.get(s.name) var ref storage.SeriesRef var lset labels.Labels @@ -2228,18 +2133,26 @@ func (sl *scrapeLoop) addReportSample(app storage.Appender, s reportSample, t in // with scraped metrics in the cache. // We have to drop it when building the actual metric. b.Reset(labels.EmptyLabels()) - b.Set(labels.MetricName, string(s.name[:len(s.name)-1])) + b.Set(model.MetricNameLabel, string(s.name[:len(s.name)-1])) lset = sl.reportSampleMutator(b.Labels()) } - ref, err := app.Append(ref, lset, t, v) + // This will be improved in AppenderV2. + if rejectOOO { + sl.SetOptions(&aOptionRejectEarlyOOO) + ref, err = sl.Append(ref, lset, t, v) + sl.SetOptions(nil) + } else { + ref, err = sl.Append(ref, lset, t, v) + } + switch { case err == nil: if !ok { sl.cache.addRef(s.name, ref, lset, lset.Hash()) // We only need to add metadata once a scrape target appears. if sl.appendMetadataToWAL { - if _, merr := app.UpdateMetadata(ref, lset, s.Metadata); merr != nil { + if _, merr := sl.UpdateMetadata(ref, lset, s.Metadata); merr != nil { sl.l.Debug("Error when appending metadata in addReportSample", "ref", fmt.Sprintf("%d", ref), "metadata", fmt.Sprintf("%+v", s.Metadata), "err", merr) } } diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index eab1499158..7aa633d387 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -1,4 +1,4 @@ -// Copyright 2016 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -89,7 +89,7 @@ func newTestScrapeMetrics(t testing.TB) *scrapeMetrics { func TestNewScrapePool(t *testing.T) { var ( - app = &nopAppendable{} + app = teststorage.NewAppendable() cfg = &config.ScrapeConfig{ MetricNameValidationScheme: model.UTF8Validation, MetricNameEscapingScheme: model.AllowUTF8, @@ -98,20 +98,17 @@ func TestNewScrapePool(t *testing.T) { ) require.NoError(t, err) - a, ok := sp.appendable.(*nopAppendable) + a, ok := sp.appendable.(*teststorage.Appendable) require.True(t, ok, "Failure to append.") require.Equal(t, app, a, "Wrong sample appender.") require.Equal(t, cfg, sp.config, "Wrong scrape config.") - require.NotNil(t, sp.newLoop, "newLoop function not initialized.") } func TestStorageHandlesOutOfOrderTimestamps(t *testing.T) { // Test with default OutOfOrderTimeWindow (0) t.Run("Out-Of-Order Sample Disabled", func(t *testing.T) { s := teststorage.New(t) - t.Cleanup(func() { - _ = s.Close() - }) + t.Cleanup(func() { _ = s.Close() }) runScrapeLoopTest(t, s, false) }) @@ -119,19 +116,14 @@ func TestStorageHandlesOutOfOrderTimestamps(t *testing.T) { // Test with specific OutOfOrderTimeWindow (600000) t.Run("Out-Of-Order Sample Enabled", func(t *testing.T) { s := teststorage.New(t, 600000) - t.Cleanup(func() { - _ = s.Close() - }) + t.Cleanup(func() { _ = s.Close() }) runScrapeLoopTest(t, s, true) }) } func runScrapeLoopTest(t *testing.T, s *teststorage.TestStorage, expectOutOfOrder bool) { - // Create an appender for adding samples to the storage. - app := s.Appender(context.Background()) - capp := &collectResultAppender{next: app} - sl := newBasicScrapeLoop(t, context.Background(), nil, func(context.Context) storage.Appender { return capp }, 0) + sl, _ := newTestScrapeLoop(t, withAppendable(s)) // Current time for generating timestamps. now := time.Now() @@ -142,37 +134,35 @@ func runScrapeLoopTest(t *testing.T, s *teststorage.TestStorage, expectOutOfOrde timestampOutOfOrder := now.Add(-5 * time.Minute) timestampInorder2 := now.Add(5 * time.Minute) - slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, []byte(`metric_total{a="1",b="1"} 1`), "text/plain", timestampInorder1) + app := sl.appender() + _, _, _, err := app.append([]byte(`metric_total{a="1",b="1"} 1`), "text/plain", timestampInorder1) require.NoError(t, err) - _, _, _, err = sl.append(slApp, []byte(`metric_total{a="1",b="1"} 2`), "text/plain", timestampOutOfOrder) + _, _, _, err = app.append([]byte(`metric_total{a="1",b="1"} 2`), "text/plain", timestampOutOfOrder) require.NoError(t, err) - _, _, _, err = sl.append(slApp, []byte(`metric_total{a="1",b="1"} 3`), "text/plain", timestampInorder2) + _, _, _, err = app.append([]byte(`metric_total{a="1",b="1"} 3`), "text/plain", timestampInorder2) require.NoError(t, err) - require.NoError(t, slApp.Commit()) + require.NoError(t, app.Commit()) // Query the samples back from the storage. - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() q, err := s.Querier(time.Time{}.UnixNano(), time.Now().UnixNano()) require.NoError(t, err) - defer q.Close() + t.Cleanup(func() { _ = q.Close() }) // Use a matcher to filter the metric name. - series := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "metric_total")) + series := q.Select(t.Context(), false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "metric_total")) - var results []floatSample + var results []sample for series.Next() { it := series.At().Iterator(nil) for it.Next() == chunkenc.ValFloat { t, v := it.At() - results = append(results, floatSample{ - metric: series.At().Labels(), - t: t, - f: v, + results = append(results, sample{ + L: series.At().Labels(), + T: t, + V: v, }) } require.NoError(t, it.Err()) @@ -180,16 +170,16 @@ func runScrapeLoopTest(t *testing.T, s *teststorage.TestStorage, expectOutOfOrde require.NoError(t, series.Err()) // Define the expected results - want := []floatSample{ + want := []sample{ { - metric: labels.FromStrings("__name__", "metric_total", "a", "1", "b", "1"), - t: timestamp.FromTime(timestampInorder1), - f: 1, + L: labels.FromStrings("__name__", "metric_total", "a", "1", "b", "1"), + T: timestamp.FromTime(timestampInorder1), + V: 1, }, { - metric: labels.FromStrings("__name__", "metric_total", "a", "1", "b", "1"), - t: timestamp.FromTime(timestampInorder2), - f: 3, + L: labels.FromStrings("__name__", "metric_total", "a", "1", "b", "1"), + T: timestamp.FromTime(timestampInorder2), + V: 3, }, } @@ -201,7 +191,7 @@ func runScrapeLoopTest(t *testing.T, s *teststorage.TestStorage, expectOutOfOrde } // Regression test against https://github.com/prometheus/prometheus/issues/15831. -func TestScrapeAppendMetadataUpdate(t *testing.T) { +func TestScrapeAppend_MetadataUpdate(t *testing.T) { const ( scrape1 = `# TYPE test_metric counter # HELP test_metric some help text @@ -224,60 +214,54 @@ test_metric2{foo="bar"} 22 # EOF` ) - // Create an appender for adding samples to the storage. - capp := &collectResultAppender{next: nopAppender{}} - sl := newBasicScrapeLoop(t, context.Background(), nil, func(context.Context) storage.Appender { return capp }, 0) + appTest := teststorage.NewAppendable() + sl, _ := newTestScrapeLoop(t, withAppendable(appTest)) now := time.Now() - slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, []byte(scrape1), "application/openmetrics-text", now) + app := sl.appender() + _, _, _, err := app.append([]byte(scrape1), "application/openmetrics-text", now) require.NoError(t, err) - require.NoError(t, slApp.Commit()) - testutil.RequireEqualWithOptions(t, []metadataEntry{ - {metric: labels.FromStrings("__name__", "test_metric_total"), m: metadata.Metadata{Type: "counter", Unit: "metric", Help: "some help text"}}, - {metric: labels.FromStrings("__name__", "test_metric2", "foo", "bar"), m: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}}, - }, capp.resultMetadata, []cmp.Option{cmp.Comparer(metadataEntryEqual)}) - capp.resultMetadata = nil - - // Next (the same) scrape should not add new metadata entries. - slApp = sl.appender(context.Background()) - _, _, _, err = sl.append(slApp, []byte(scrape1), "application/openmetrics-text", now.Add(15*time.Second)) + require.NoError(t, app.Commit()) + testutil.RequireEqual(t, []sample{ + {L: labels.FromStrings("__name__", "test_metric_total"), M: metadata.Metadata{Type: "counter", Unit: "metric", Help: "some help text"}}, + {L: labels.FromStrings("__name__", "test_metric2", "foo", "bar"), M: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}}, + }, appTest.ResultMetadata()) + appTest.ResultReset() + + // Next (the same) scrape should not new metadata entries. + app = sl.appender() + _, _, _, err = app.append([]byte(scrape1), "application/openmetrics-text", now.Add(15*time.Second)) require.NoError(t, err) - require.NoError(t, slApp.Commit()) - testutil.RequireEqualWithOptions(t, []metadataEntry(nil), capp.resultMetadata, []cmp.Option{cmp.Comparer(metadataEntryEqual)}) + require.NoError(t, app.Commit()) + require.Empty(t, appTest.ResultMetadata()) + appTest.ResultReset() - slApp = sl.appender(context.Background()) - _, _, _, err = sl.append(slApp, []byte(scrape2), "application/openmetrics-text", now.Add(15*time.Second)) + app = sl.appender() + _, _, _, err = app.append([]byte(scrape2), "application/openmetrics-text", now.Add(15*time.Second)) require.NoError(t, err) - require.NoError(t, slApp.Commit()) - testutil.RequireEqualWithOptions(t, []metadataEntry{ - {metric: labels.FromStrings("__name__", "test_metric_total"), m: metadata.Metadata{Type: "counter", Unit: "metric", Help: "different help text"}}, // Here, technically we should have no unit, but it's a known limitation of the current implementation. - {metric: labels.FromStrings("__name__", "test_metric2", "foo", "bar"), m: metadata.Metadata{Type: "gauge", Unit: "metric2", Help: "other help text"}}, - }, capp.resultMetadata, []cmp.Option{cmp.Comparer(metadataEntryEqual)}) + require.NoError(t, app.Commit()) + testutil.RequireEqual(t, []sample{ + {L: labels.FromStrings("__name__", "test_metric_total"), M: metadata.Metadata{Type: "counter", Unit: "metric", Help: "different help text"}}, // Here, technically we should have no unit, but it's a known limitation of the current implementation. + {L: labels.FromStrings("__name__", "test_metric2", "foo", "bar"), M: metadata.Metadata{Type: "gauge", Unit: "metric2", Help: "other help text"}}, + }, appTest.ResultMetadata()) + appTest.ResultReset() } -type nopScraper struct { - scraper -} - -func (nopScraper) Report(time.Time, time.Duration, error) {} +func TestScrapeReportMetadata(t *testing.T) { + appTest := teststorage.NewAppendable() + sl, _ := newTestScrapeLoop(t, withAppendable(appTest)) + app := sl.appender() -func TestScrapeReportMetadataUpdate(t *testing.T) { - // Create an appender for adding samples to the storage. - capp := &collectResultAppender{next: nopAppender{}} - sl := newBasicScrapeLoop(t, context.Background(), nopScraper{}, func(context.Context) storage.Appender { return capp }, 0) now := time.Now() - slApp := sl.appender(context.Background()) - - require.NoError(t, sl.report(slApp, now, 2*time.Second, 1, 1, 1, 512, nil)) - require.NoError(t, slApp.Commit()) - testutil.RequireEqualWithOptions(t, []metadataEntry{ - {metric: labels.FromStrings("__name__", "up"), m: scrapeHealthMetric.Metadata}, - {metric: labels.FromStrings("__name__", "scrape_duration_seconds"), m: scrapeDurationMetric.Metadata}, - {metric: labels.FromStrings("__name__", "scrape_samples_scraped"), m: scrapeSamplesMetric.Metadata}, - {metric: labels.FromStrings("__name__", "scrape_samples_post_metric_relabeling"), m: samplesPostRelabelMetric.Metadata}, - {metric: labels.FromStrings("__name__", "scrape_series_added"), m: scrapeSeriesAddedMetric.Metadata}, - }, capp.resultMetadata, []cmp.Option{cmp.Comparer(metadataEntryEqual)}) + require.NoError(t, sl.report(app, now, 2*time.Second, 1, 1, 1, 512, nil)) + require.NoError(t, app.Commit()) + testutil.RequireEqual(t, []sample{ + {L: labels.FromStrings("__name__", "up"), M: scrapeHealthMetric.Metadata}, + {L: labels.FromStrings("__name__", "scrape_duration_seconds"), M: scrapeDurationMetric.Metadata}, + {L: labels.FromStrings("__name__", "scrape_samples_scraped"), M: scrapeSamplesMetric.Metadata}, + {L: labels.FromStrings("__name__", "scrape_samples_post_metric_relabeling"), M: samplesPostRelabelMetric.Metadata}, + {L: labels.FromStrings("__name__", "scrape_series_added"), M: scrapeSeriesAddedMetric.Metadata}, + }, appTest.ResultMetadata()) } func TestIsSeriesPartOfFamily(t *testing.T) { @@ -330,7 +314,7 @@ func TestIsSeriesPartOfFamily(t *testing.T) { func TestDroppedTargetsList(t *testing.T) { var ( - app = &nopAppendable{} + app = teststorage.NewAppendable() cfg = &config.ScrapeConfig{ JobName: "dropMe", ScrapeInterval: model.Duration(1), @@ -374,9 +358,7 @@ func TestDroppedTargetsList(t *testing.T) { // TestDiscoveredLabelsUpdate checks that DiscoveredLabels are updated // even when new labels don't affect the target `hash`. func TestDiscoveredLabelsUpdate(t *testing.T) { - sp := &scrapePool{ - metrics: newTestScrapeMetrics(t), - } + sp := newTestScrapePool(t, nil) // These are used when syncing so need this to avoid a panic. sp.config = &config.ScrapeConfig{ @@ -448,13 +430,8 @@ func (*testLoop) getCache() *scrapeCache { func TestScrapePoolStop(t *testing.T) { t.Parallel() - sp := &scrapePool{ - activeTargets: map[uint64]*Target{}, - loops: map[uint64]loop{}, - cancel: func() {}, - client: http.DefaultClient, - metrics: newTestScrapeMetrics(t), - } + sp := newTestScrapePool(t, nil) + var mtx sync.Mutex stopped := map[uint64]bool{} numTargets := 20 @@ -506,26 +483,42 @@ func TestScrapePoolStop(t *testing.T) { require.Empty(t, sp.loops, "Loops were not cleared on stopping: %d left", len(sp.loops)) } +// TestScrapePoolReload tests reloading logic, so: +// * all loops are reloaded, reusing cache if scrape config changed. +// * reloaded loops are stopped before new ones are started. +// * new scrapeLoops are configured with the updated scrape config. func TestScrapePoolReload(t *testing.T) { t.Parallel() - var mtx sync.Mutex - numTargets := 20 - stopped := map[uint64]bool{} + var ( + mtx sync.Mutex + numTargets = 20 + stopped = map[uint64]bool{} + ) - reloadCfg := &config.ScrapeConfig{ + cfg0 := &config.ScrapeConfig{} + cfg1 := &config.ScrapeConfig{ ScrapeInterval: model.Duration(3 * time.Second), ScrapeTimeout: model.Duration(2 * time.Second), MetricNameValidationScheme: model.UTF8Validation, MetricNameEscapingScheme: model.AllowUTF8, + + // Test a few example options. + SampleLimit: 123, + ScrapeFallbackProtocol: "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", } - // On starting to run, new loops created on reload check whether their preceding - // equivalents have been stopped. - newLoop := func(opts scrapeLoopOptions) loop { - l := &testLoop{interval: time.Duration(reloadCfg.ScrapeInterval), timeout: time.Duration(reloadCfg.ScrapeTimeout)} + newLoopCfg1 := func(opts scrapeLoopOptions) loop { + // Test cfg1 is being used. + require.Equal(t, cfg1, opts.sp.config) + + // Inject out testLoop that allows mocking start and stop. + l := &testLoop{interval: opts.interval, timeout: opts.timeout} + + // On start, expect previous loop instances for the same target to be stopped. l.startFunc = func(interval, timeout time.Duration, _ chan<- error) { - require.Equal(t, 3*time.Second, interval, "Unexpected scrape interval") - require.Equal(t, 2*time.Second, timeout, "Unexpected scrape timeout") + // Ensure cfg1 interval and timeout are correctly configured. + require.Equal(t, time.Duration(cfg1.ScrapeInterval), interval, "Unexpected scrape interval") + require.Equal(t, time.Duration(cfg1.ScrapeTimeout), timeout, "Unexpected scrape timeout") mtx.Lock() targetScraper := opts.scraper.(*targetScraper) @@ -535,32 +528,21 @@ func TestScrapePoolReload(t *testing.T) { return l } + // Create test pool. reg, metrics := newTestRegistryAndScrapeMetrics(t) - sp := &scrapePool{ - appendable: &nopAppendable{}, - activeTargets: map[uint64]*Target{}, - loops: map[uint64]loop{}, - newLoop: newLoop, - logger: nil, - client: http.DefaultClient, - metrics: metrics, - symbolTable: labels.NewSymbolTable(), - } - - // Reloading a scrape pool with a new scrape configuration must stop all scrape - // loops and start new ones. A new loop must not be started before the preceding - // one terminated. + sp := newTestScrapePool(t, newLoopCfg1) + sp.metrics = metrics + // Prefill pool with 20 loops, simulating 20 scrape targets. for i := range numTargets { - labels := labels.FromStrings(model.AddressLabel, fmt.Sprintf("example.com:%d", i)) t := &Target{ - labels: labels, - scrapeConfig: &config.ScrapeConfig{}, + labels: labels.FromStrings(model.AddressLabel, fmt.Sprintf("example.com:%d", i)), + scrapeConfig: cfg0, } l := &testLoop{} d := time.Duration((i+1)*20) * time.Millisecond l.stopFunc = func() { - time.Sleep(d) + time.Sleep(d) // Sleep uneven time on stop. mtx.Lock() stopped[t.hash()] = true @@ -570,36 +552,26 @@ func TestScrapePoolReload(t *testing.T) { sp.activeTargets[t.hash()] = t sp.loops[t.hash()] = l } - done := make(chan struct{}) beforeTargets := map[uint64]*Target{} maps.Copy(beforeTargets, sp.activeTargets) - reloadTime := time.Now() - - go func() { - sp.reload(reloadCfg) - close(done) - }() - - select { - case <-time.After(5 * time.Second): - require.FailNow(t, "scrapeLoop.reload() did not return as expected") - case <-done: - // This should have taken at least as long as the last target slept. - require.GreaterOrEqual(t, time.Since(reloadTime), time.Duration(numTargets*20)*time.Millisecond, "scrapeLoop.stop() exited before all targets stopped") - } - + // Reloading a scrape pool with a new scrape configuration must stop all scrape + // loops and start new ones. A new loop must not be started before the preceding + // one terminated. + require.NoError(t, sp.reload(cfg1)) + var stoppedCount int mtx.Lock() - require.Len(t, stopped, numTargets, "Unexpected number of stopped loops") + stoppedCount = len(stopped) mtx.Unlock() - + require.Equal(t, numTargets, stoppedCount, "Unexpected number of stopped loops") require.Equal(t, sp.activeTargets, beforeTargets, "Reloading affected target states unexpectedly") - require.Len(t, sp.loops, numTargets, "Unexpected number of stopped loops after reload") + require.Len(t, sp.loops, numTargets, "Unexpected number of loops after reload") + // Check if prometheus_target_reload_length_seconds points to cfg1.ScrapeInterval. got, err := gatherLabels(reg, "prometheus_target_reload_length_seconds") require.NoError(t, err) - expectedName, expectedValue := "interval", "3s" + expectedName, expectedValue := "interval", cfg1.ScrapeInterval.String() require.Equal(t, [][]*dto.LabelPair{{{Name: &expectedName, Value: &expectedValue}}}, got) require.Equal(t, 1.0, prom_testutil.ToFloat64(sp.metrics.targetScrapePoolReloads)) } @@ -620,22 +592,12 @@ func TestScrapePoolReloadPreserveRelabeledIntervalTimeout(t *testing.T) { return l } reg, metrics := newTestRegistryAndScrapeMetrics(t) - sp := &scrapePool{ - appendable: &nopAppendable{}, - activeTargets: map[uint64]*Target{ - 1: { - labels: labels.FromStrings(model.ScrapeIntervalLabel, "5s", model.ScrapeTimeoutLabel, "3s"), - }, - }, - loops: map[uint64]loop{ - 1: noopLoop(), - }, - newLoop: newLoop, - logger: nil, - client: http.DefaultClient, - metrics: metrics, - symbolTable: labels.NewSymbolTable(), + sp := newTestScrapePool(t, newLoop) + sp.activeTargets[1] = &Target{ + labels: labels.FromStrings(model.ScrapeIntervalLabel, "5s", model.ScrapeTimeoutLabel, "3s"), } + sp.metrics = metrics + sp.loops[1] = noopLoop() err := sp.reload(reloadCfg) if err != nil { @@ -681,18 +643,10 @@ func TestScrapePoolTargetLimit(t *testing.T) { } return l } - sp := &scrapePool{ - appendable: &nopAppendable{}, - activeTargets: map[uint64]*Target{}, - loops: map[uint64]loop{}, - newLoop: newLoop, - logger: promslog.NewNopLogger(), - client: http.DefaultClient, - metrics: newTestScrapeMetrics(t), - symbolTable: labels.NewSymbolTable(), - } - tgs := []*targetgroup.Group{} + sp := newTestScrapePool(t, newLoop) + + var tgs []*targetgroup.Group for i := range 50 { tgs = append(tgs, &targetgroup.Group{ @@ -782,12 +736,12 @@ func TestScrapePoolTargetLimit(t *testing.T) { tgs = append(tgs, &targetgroup.Group{ Targets: []model.LabelSet{ - {model.AddressLabel: model.LabelValue("127.0.0.1:1090")}, + {model.AddressLabel: "127.0.0.1:1090"}, }, }, &targetgroup.Group{ Targets: []model.LabelSet{ - {model.AddressLabel: model.LabelValue("127.0.0.1:1090")}, + {model.AddressLabel: "127.0.0.1:1090"}, }, }, ) @@ -797,62 +751,48 @@ func TestScrapePoolTargetLimit(t *testing.T) { validateErrorMessage(false) } -func TestScrapePoolAppender(t *testing.T) { - cfg := &config.ScrapeConfig{ - MetricNameValidationScheme: model.UTF8Validation, - MetricNameEscapingScheme: model.AllowUTF8, - } - app := &nopAppendable{} - sp, _ := newScrapePool(cfg, app, 0, nil, nil, &Options{}, newTestScrapeMetrics(t)) - - loop := sp.newLoop(scrapeLoopOptions{ - target: &Target{}, - }) - appl, ok := loop.(*scrapeLoop) - require.True(t, ok, "Expected scrapeLoop but got %T", loop) +func TestScrapePoolAppenderWithLimits(t *testing.T) { + // Create a unique value, to validate the correct chain of appenders. + baseAppender := struct{ storage.Appender }{} + appendable := appendableFunc(func(context.Context) storage.Appender { return baseAppender }) - wrapped := appender(appl.appender(context.Background()), 0, 0, histogram.ExponentialSchemaMax) + sl, _ := newTestScrapeLoop(t, withAppendable(appendable)) + wrapped := appenderWithLimits(sl.appendable.Appender(context.Background()), 0, 0, histogram.ExponentialSchemaMax) tl, ok := wrapped.(*timeLimitAppender) require.True(t, ok, "Expected timeLimitAppender but got %T", wrapped) - _, ok = tl.Appender.(nopAppender) - require.True(t, ok, "Expected base appender but got %T", tl.Appender) + require.Equal(t, baseAppender, tl.Appender, "Expected base appender but got %T", tl.Appender) sampleLimit := 100 - loop = sp.newLoop(scrapeLoopOptions{ - target: &Target{}, - sampleLimit: sampleLimit, + sl, _ = newTestScrapeLoop(t, func(sl *scrapeLoop) { + sl.appendable = appendable + sl.sampleLimit = sampleLimit }) - appl, ok = loop.(*scrapeLoop) - require.True(t, ok, "Expected scrapeLoop but got %T", loop) + wrapped = appenderWithLimits(sl.appendable.Appender(context.Background()), sampleLimit, 0, histogram.ExponentialSchemaMax) - wrapped = appender(appl.appender(context.Background()), sampleLimit, 0, histogram.ExponentialSchemaMax) - - sl, ok := wrapped.(*limitAppender) + la, ok := wrapped.(*limitAppender) require.True(t, ok, "Expected limitAppender but got %T", wrapped) - tl, ok = sl.Appender.(*timeLimitAppender) - require.True(t, ok, "Expected timeLimitAppender but got %T", sl.Appender) + tl, ok = la.Appender.(*timeLimitAppender) + require.True(t, ok, "Expected timeLimitAppender but got %T", la.Appender) - _, ok = tl.Appender.(nopAppender) - require.True(t, ok, "Expected base appender but got %T", tl.Appender) + require.Equal(t, baseAppender, tl.Appender, "Expected base appender but got %T", tl.Appender) - wrapped = appender(appl.appender(context.Background()), sampleLimit, 100, histogram.ExponentialSchemaMax) + wrapped = appenderWithLimits(sl.appendable.Appender(context.Background()), sampleLimit, 100, histogram.ExponentialSchemaMax) bl, ok := wrapped.(*bucketLimitAppender) require.True(t, ok, "Expected bucketLimitAppender but got %T", wrapped) - sl, ok = bl.Appender.(*limitAppender) + la, ok = bl.Appender.(*limitAppender) require.True(t, ok, "Expected limitAppender but got %T", bl) - tl, ok = sl.Appender.(*timeLimitAppender) - require.True(t, ok, "Expected timeLimitAppender but got %T", sl.Appender) + tl, ok = la.Appender.(*timeLimitAppender) + require.True(t, ok, "Expected timeLimitAppender but got %T", la.Appender) - _, ok = tl.Appender.(nopAppender) - require.True(t, ok, "Expected base appender but got %T", tl.Appender) + require.Equal(t, baseAppender, tl.Appender, "Expected base appender but got %T", tl.Appender) - wrapped = appender(appl.appender(context.Background()), sampleLimit, 100, 0) + wrapped = appenderWithLimits(sl.appendable.Appender(context.Background()), sampleLimit, 100, 0) ml, ok := wrapped.(*maxSchemaAppender) require.True(t, ok, "Expected maxSchemaAppender but got %T", wrapped) @@ -860,14 +800,13 @@ func TestScrapePoolAppender(t *testing.T) { bl, ok = ml.Appender.(*bucketLimitAppender) require.True(t, ok, "Expected bucketLimitAppender but got %T", wrapped) - sl, ok = bl.Appender.(*limitAppender) + la, ok = bl.Appender.(*limitAppender) require.True(t, ok, "Expected limitAppender but got %T", bl) - tl, ok = sl.Appender.(*timeLimitAppender) - require.True(t, ok, "Expected timeLimitAppender but got %T", sl.Appender) + tl, ok = la.Appender.(*timeLimitAppender) + require.True(t, ok, "Expected timeLimitAppender but got %T", la.Appender) - _, ok = tl.Appender.(nopAppender) - require.True(t, ok, "Expected base appender but got %T", tl.Appender) + require.Equal(t, baseAppender, tl.Appender, "Expected base appender but got %T", tl.Appender) } func TestScrapePoolRaces(t *testing.T) { @@ -882,7 +821,7 @@ func TestScrapePoolRaces(t *testing.T) { MetricNameEscapingScheme: model.AllowUTF8, } } - sp, _ := newScrapePool(newConfig(), &nopAppendable{}, 0, nil, nil, &Options{}, newTestScrapeMetrics(t)) + sp, _ := newScrapePool(newConfig(), teststorage.NewAppendable(), 0, nil, nil, &Options{}, newTestScrapeMetrics(t)) tgts := []*targetgroup.Group{ { Targets: []model.LabelSet{ @@ -908,7 +847,7 @@ func TestScrapePoolRaces(t *testing.T) { for range 20 { time.Sleep(10 * time.Millisecond) - sp.reload(newConfig()) + _ = sp.reload(newConfig()) } sp.stop() } @@ -925,16 +864,7 @@ func TestScrapePoolScrapeLoopsStarted(t *testing.T) { } return l } - sp := &scrapePool{ - appendable: &nopAppendable{}, - activeTargets: map[uint64]*Target{}, - loops: map[uint64]loop{}, - newLoop: newLoop, - logger: nil, - client: http.DefaultClient, - metrics: newTestScrapeMetrics(t), - symbolTable: labels.NewSymbolTable(), - } + sp := newTestScrapePool(t, newLoop) tgs := []*targetgroup.Group{ { @@ -965,51 +895,13 @@ func TestScrapePoolScrapeLoopsStarted(t *testing.T) { } } -func newBasicScrapeLoop(t testing.TB, ctx context.Context, scraper scraper, app func(ctx context.Context) storage.Appender, interval time.Duration) *scrapeLoop { - return newBasicScrapeLoopWithFallback(t, ctx, scraper, app, interval, "") -} - -func newBasicScrapeLoopWithFallback(t testing.TB, ctx context.Context, scraper scraper, app func(ctx context.Context) storage.Appender, interval time.Duration, fallback string) *scrapeLoop { - return newScrapeLoop(ctx, - scraper, - nil, nil, - nopMutator, - nopMutator, - app, - nil, - labels.NewSymbolTable(), - 0, - true, - false, - true, - 0, 0, histogram.ExponentialSchemaMax, - nil, - interval, - time.Hour, - false, - false, - false, - false, - false, - false, - true, - nil, - false, - newTestScrapeMetrics(t), - false, - model.UTF8Validation, - model.NoEscaping, - fallback, - ) -} - func TestScrapeLoopStopBeforeRun(t *testing.T) { t.Parallel() - scraper := &testScraper{} - sl := newBasicScrapeLoop(t, context.Background(), scraper, nil, 1) + + sl, scraper := newTestScrapeLoop(t) // The scrape pool synchronizes on stopping scrape loops. However, new scrape - // loops are started asynchronously. Thus it's possible, that a loop is stopped + // loops are started asynchronously. Thus, it's possible, that a loop is stopped // again before having started properly. // Stopping not-yet-started loops must block until the run method was called and exited. // The run method must exit immediately. @@ -1054,26 +946,24 @@ func TestScrapeLoopStopBeforeRun(t *testing.T) { func nopMutator(l labels.Labels) labels.Labels { return l } func TestScrapeLoopStop(t *testing.T) { - var ( - signal = make(chan struct{}, 1) - appender = &collectResultAppender{} - scraper = &testScraper{} - app = func(context.Context) storage.Appender { return appender } - ) + signal := make(chan struct{}, 1) - // Since we're writing samples directly below we need to provide a protocol fallback. - sl := newBasicScrapeLoopWithFallback(t, context.Background(), scraper, app, 10*time.Millisecond, "text/plain") + appTest := teststorage.NewAppendable() + sl, scraper := newTestScrapeLoop(t, func(sl *scrapeLoop) { + sl.appendable = appTest + // Since we're writing samples directly below we need to provide a protocol fallback. + sl.fallbackScrapeProtocol = "text/plain" + }) // Terminate loop after 2 scrapes. numScrapes := 0 - scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { numScrapes++ if numScrapes == 2 { go sl.stop() <-sl.ctx.Done() } - w.Write([]byte("metric_a 42\n")) + _, _ = w.Write([]byte("metric_a 42\n")) return ctx.Err() } @@ -1088,23 +978,24 @@ func TestScrapeLoopStop(t *testing.T) { require.FailNow(t, "Scrape wasn't stopped.") } + got := appTest.ResultSamples() // We expected 1 actual sample for each scrape plus 5 for report samples. // At least 2 scrapes were made, plus the final stale markers. - require.GreaterOrEqual(t, len(appender.resultFloats), 6*3, "Expected at least 3 scrapes with 6 samples each.") - require.Zero(t, len(appender.resultFloats)%6, "There is a scrape with missing samples.") + require.GreaterOrEqual(t, len(got), 6*3, "Expected at least 3 scrapes with 6 samples each.") + require.Zero(t, len(got)%6, "There is a scrape with missing samples.") // All samples in a scrape must have the same timestamp. var ts int64 - for i, s := range appender.resultFloats { + for i, s := range got { switch { case i%6 == 0: - ts = s.t - case s.t != ts: + ts = s.T + case s.T != ts: t.Fatalf("Unexpected multiple timestamps within single scrape") } } // All samples from the last scrape must be stale markers. - for _, s := range appender.resultFloats[len(appender.resultFloats)-5:] { - require.True(t, value.IsStaleNaN(s.f), "Appended last sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(s.f)) + for _, s := range got[len(got)-5:] { + require.True(t, value.IsStaleNaN(s.V), "Appended last sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(s.V)) } } @@ -1113,45 +1004,10 @@ func TestScrapeLoopRun(t *testing.T) { var ( signal = make(chan struct{}, 1) errc = make(chan error) - - scraper = &testScraper{} - app = func(context.Context) storage.Appender { return &nopAppender{} } - scrapeMetrics = newTestScrapeMetrics(t) - ) - - ctx, cancel := context.WithCancel(context.Background()) - sl := newScrapeLoop(ctx, - scraper, - nil, nil, - nopMutator, - nopMutator, - app, - nil, - nil, - 0, - true, - false, - true, - 0, 0, histogram.ExponentialSchemaMax, - nil, - time.Second, - time.Hour, - false, - false, - false, - false, - false, - false, - false, - nil, - false, - scrapeMetrics, - false, - model.UTF8Validation, - model.NoEscaping, - "", ) + ctx, cancel := context.WithCancel(t.Context()) + sl, scraper := newTestScrapeLoop(t, withCtx(ctx)) // The loop must terminate during the initial offset if the context // is canceled. scraper.offsetDur = time.Hour @@ -1173,24 +1029,26 @@ func TestScrapeLoopRun(t *testing.T) { require.FailNow(t, "Unexpected error", "err: %s", err) } + ctx, cancel = context.WithCancel(t.Context()) + sl, scraper = newTestScrapeLoop(t, func(sl *scrapeLoop) { + sl.ctx = ctx + sl.timeout = 100 * time.Millisecond + }) // The provided timeout must cause cancellation of the context passed down to the // scraper. The scraper has to respect the context. scraper.offsetDur = 0 - block := make(chan struct{}) + blockCtx, blockCancel := context.WithCancel(t.Context()) scraper.scrapeFunc = func(ctx context.Context, _ io.Writer) error { select { - case <-block: + case <-blockCtx.Done(): + cancel() case <-ctx.Done(): return ctx.Err() } return nil } - ctx, cancel = context.WithCancel(context.Background()) - sl = newBasicScrapeLoop(t, ctx, scraper, app, time.Second) - sl.timeout = 100 * time.Millisecond - go func() { sl.run(errc) signal <- struct{}{} @@ -1206,9 +1064,7 @@ func TestScrapeLoopRun(t *testing.T) { // We already caught the timeout error and are certainly in the loop. // Let the scrapes returns immediately to cause no further timeout errors // and check whether canceling the parent context terminates the loop. - close(block) - cancel() - + blockCancel() select { case <-signal: // Loop terminated as expected. @@ -1223,13 +1079,10 @@ func TestScrapeLoopForcedErr(t *testing.T) { var ( signal = make(chan struct{}, 1) errc = make(chan error) - - scraper = &testScraper{} - app = func(context.Context) storage.Appender { return &nopAppender{} } ) - ctx, cancel := context.WithCancel(context.Background()) - sl := newBasicScrapeLoop(t, ctx, scraper, app, time.Second) + ctx, cancel := context.WithCancel(t.Context()) + sl, scraper := newTestScrapeLoop(t, withCtx(ctx)) forcedErr := errors.New("forced err") sl.setForcedError(forcedErr) @@ -1264,15 +1117,12 @@ func TestScrapeLoopRun_ContextCancelTerminatesBlockedSend(t *testing.T) { defer goleak.VerifyNone(t) var ( - signal = make(chan struct{}) - errc = make(chan error) - scraper = &testScraper{} - app = func(context.Context) storage.Appender { return &nopAppender{} } + signal = make(chan struct{}) + errc = make(chan error) ) - ctx, cancel := context.WithCancel(context.Background()) - - sl := newBasicScrapeLoop(t, ctx, scraper, app, 100*time.Millisecond) + ctx, cancel := context.WithCancel(t.Context()) + sl, scraper := newTestScrapeLoop(t, withCtx(ctx)) forcedErr := errors.New("forced err") sl.setForcedError(forcedErr) @@ -1299,50 +1149,10 @@ func TestScrapeLoopRun_ContextCancelTerminatesBlockedSend(t *testing.T) { } func TestScrapeLoopMetadata(t *testing.T) { - var ( - signal = make(chan struct{}) - scraper = &testScraper{} - scrapeMetrics = newTestScrapeMetrics(t) - cache = newScrapeCache(scrapeMetrics) - ) - defer close(signal) - - ctx, cancel := context.WithCancel(context.Background()) - sl := newScrapeLoop(ctx, - scraper, - nil, nil, - nopMutator, - nopMutator, - func(context.Context) storage.Appender { return nopAppender{} }, - cache, - labels.NewSymbolTable(), - 0, - true, - false, - true, - 0, 0, histogram.ExponentialSchemaMax, - nil, - 0, - 0, - false, - false, - false, - false, - false, - false, - false, - nil, - false, - scrapeMetrics, - false, - model.UTF8Validation, - model.NoEscaping, - "", - ) - defer cancel() + sl, _ := newTestScrapeLoop(t) - slApp := sl.appender(ctx) - total, _, _, err := sl.append(slApp, []byte(`# TYPE test_metric counter + app := sl.appender() + total, _, _, err := app.append([]byte(`# TYPE test_metric counter # HELP test_metric some help text # UNIT test_metric metric test_metric_total 1 @@ -1350,54 +1160,42 @@ test_metric_total 1 # HELP test_metric_no_type other help text # EOF`), "application/openmetrics-text", time.Now()) require.NoError(t, err) - require.NoError(t, slApp.Commit()) + require.NoError(t, app.Commit()) require.Equal(t, 1, total) - md, ok := cache.GetMetadata("test_metric") + md, ok := sl.cache.GetMetadata("test_metric") require.True(t, ok, "expected metadata to be present") require.Equal(t, model.MetricTypeCounter, md.Type, "unexpected metric type") require.Equal(t, "some help text", md.Help) require.Equal(t, "metric", md.Unit) - md, ok = cache.GetMetadata("test_metric_no_help") + md, ok = sl.cache.GetMetadata("test_metric_no_help") require.True(t, ok, "expected metadata to be present") require.Equal(t, model.MetricTypeGauge, md.Type, "unexpected metric type") require.Empty(t, md.Help) require.Empty(t, md.Unit) - md, ok = cache.GetMetadata("test_metric_no_type") + md, ok = sl.cache.GetMetadata("test_metric_no_type") require.True(t, ok, "expected metadata to be present") require.Equal(t, model.MetricTypeUnknown, md.Type, "unexpected metric type") require.Equal(t, "other help text", md.Help) require.Empty(t, md.Unit) } -func simpleTestScrapeLoop(t testing.TB) (context.Context, *scrapeLoop) { - // Need a full storage for correct Add/AddFast semantics. - s := teststorage.New(t) - t.Cleanup(func() { s.Close() }) - - ctx, cancel := context.WithCancel(context.Background()) - sl := newBasicScrapeLoop(t, ctx, &testScraper{}, s.Appender, 0) - t.Cleanup(func() { cancel() }) - - return ctx, sl -} - func TestScrapeLoopSeriesAdded(t *testing.T) { - ctx, sl := simpleTestScrapeLoop(t) + sl, _ := newTestScrapeLoop(t) - slApp := sl.appender(ctx) - total, added, seriesAdded, err := sl.append(slApp, []byte("test_metric 1\n"), "text/plain", time.Time{}) + app := sl.appender() + total, added, seriesAdded, err := app.append([]byte("test_metric 1\n"), "text/plain", time.Time{}) require.NoError(t, err) - require.NoError(t, slApp.Commit()) + require.NoError(t, app.Commit()) require.Equal(t, 1, total) require.Equal(t, 1, added) require.Equal(t, 1, seriesAdded) - slApp = sl.appender(ctx) - total, added, seriesAdded, err = sl.append(slApp, []byte("test_metric 1\n"), "text/plain", time.Time{}) - require.NoError(t, slApp.Commit()) + app = sl.appender() + total, added, seriesAdded, err = app.append([]byte("test_metric 1\n"), "text/plain", time.Time{}) + require.NoError(t, app.Commit()) require.NoError(t, err) require.Equal(t, 1, total) require.Equal(t, 1, added) @@ -1405,10 +1203,6 @@ func TestScrapeLoopSeriesAdded(t *testing.T) { } func TestScrapeLoopFailWithInvalidLabelsAfterRelabel(t *testing.T) { - s := teststorage.New(t) - defer s.Close() - ctx := t.Context() - target := &Target{ labels: labels.FromStrings("pod_label_invalid_012\xff", "test"), } @@ -1419,43 +1213,41 @@ func TestScrapeLoopFailWithInvalidLabelsAfterRelabel(t *testing.T) { Replacement: "$1", NameValidationScheme: model.UTF8Validation, }} - sl := newBasicScrapeLoop(t, ctx, &testScraper{}, s.Appender, 0) - sl.sampleMutator = func(l labels.Labels) labels.Labels { - return mutateSampleLabels(l, target, true, relabelConfig) - } + sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) { + sl.sampleMutator = func(l labels.Labels) labels.Labels { + return mutateSampleLabels(l, target, true, relabelConfig) + } + }) - slApp := sl.appender(ctx) - total, added, seriesAdded, err := sl.append(slApp, []byte("test_metric 1\n"), "text/plain", time.Time{}) + app := sl.appender() + total, added, seriesAdded, err := app.append([]byte("test_metric 1\n"), "text/plain", time.Time{}) require.ErrorContains(t, err, "invalid metric name or label names") - require.NoError(t, slApp.Rollback()) + require.NoError(t, app.Rollback()) require.Equal(t, 1, total) require.Equal(t, 0, added) require.Equal(t, 0, seriesAdded) } func TestScrapeLoopFailLegacyUnderUTF8(t *testing.T) { - // Test that scrapes fail when default validation is utf8 but scrape config is - // legacy. - s := teststorage.New(t) - defer s.Close() - ctx := t.Context() - - sl := newBasicScrapeLoop(t, ctx, &testScraper{}, s.Appender, 0) - sl.validationScheme = model.LegacyValidation + sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) { + sl.validationScheme = model.LegacyValidation + }) - slApp := sl.appender(ctx) - total, added, seriesAdded, err := sl.append(slApp, []byte("{\"test.metric\"} 1\n"), "text/plain", time.Time{}) + app := sl.appender() + total, added, seriesAdded, err := app.append([]byte("{\"test.metric\"} 1\n"), "text/plain", time.Time{}) require.ErrorContains(t, err, "invalid metric name or label names") - require.NoError(t, slApp.Rollback()) + require.NoError(t, app.Rollback()) require.Equal(t, 1, total) require.Equal(t, 0, added) require.Equal(t, 0, seriesAdded) // When scrapeloop has validation set to UTF-8, the metric is allowed. - sl.validationScheme = model.UTF8Validation + sl, _ = newTestScrapeLoop(t, func(sl *scrapeLoop) { + sl.validationScheme = model.UTF8Validation + }) - slApp = sl.appender(ctx) - total, added, seriesAdded, err = sl.append(slApp, []byte("{\"test.metric\"} 1\n"), "text/plain", time.Time{}) + app = sl.appender() + total, added, seriesAdded, err = app.append([]byte("{\"test.metric\"} 1\n"), "text/plain", time.Time{}) require.NoError(t, err) require.Equal(t, 1, total) require.Equal(t, 1, added) @@ -1474,12 +1266,12 @@ func readTextParseTestMetrics(t testing.TB) []byte { func makeTestGauges(n int) []byte { sb := bytes.Buffer{} - fmt.Fprintf(&sb, "# TYPE metric_a gauge\n") - fmt.Fprintf(&sb, "# HELP metric_a help text\n") + sb.WriteString("# TYPE metric_a gauge\n") + sb.WriteString("# HELP metric_a help text\n") for i := range n { - fmt.Fprintf(&sb, "metric_a{foo=\"%d\",bar=\"%d\"} 1\n", i, i*100) + _, _ = fmt.Fprintf(&sb, "metric_a{foo=\"%d\",bar=\"%d\"} 1\n", i, i*100) } - fmt.Fprintf(&sb, "# EOF\n") + sb.WriteString("# EOF\n") return sb.Bytes() } @@ -1550,7 +1342,7 @@ func TestPromTextToProto(t *testing.T) { // // Recommended CLI invocation: /* - export bench=append-v1 && go test ./scrape/... \ + export bench=append && go test ./scrape/... \ -run '^$' -bench '^BenchmarkScrapeLoopAppend' \ -benchtime 5s -count 6 -cpu 2 -timeout 999m \ | tee ${bench}.txt @@ -1576,16 +1368,19 @@ func BenchmarkScrapeLoopAppend(b *testing.B) { {name: "PromProto", contentType: "application/vnd.google.protobuf", parsable: metricsProto}, } { b.Run(fmt.Sprintf("fmt=%v", bcase.name), func(b *testing.B) { - ctx, sl := simpleTestScrapeLoop(b) + // Need a full storage for correct Add/AddFast semantics. + s := teststorage.New(b) + b.Cleanup(func() { _ = s.Close() }) - slApp := sl.appender(ctx) + sl, _ := newTestScrapeLoop(b, withAppendable(s)) + app := sl.appender() ts := time.Time{} b.ReportAllocs() b.ResetTimer() for b.Loop() { ts = ts.Add(time.Second) - _, _, _, err := sl.append(slApp, bcase.parsable, bcase.contentType, ts) + _, _, _, err := app.append(bcase.parsable, bcase.contentType, ts) if err != nil { b.Fatal(err) } @@ -1596,30 +1391,85 @@ func BenchmarkScrapeLoopAppend(b *testing.B) { } } +func TestScrapeLoopScrapeAndReport(t *testing.T) { + parsableText := readTextParseTestMetrics(t) + // On windows \r is added when reading, but parsers do not support this. Kill it. + parsableText = bytes.ReplaceAll(parsableText, []byte("\r"), nil) + + appTest := teststorage.NewAppendable() + sl, scraper := newTestScrapeLoop(t, func(sl *scrapeLoop) { + sl.appendable = appTest + sl.fallbackScrapeProtocol = "application/openmetrics-text" + }) + scraper.scrapeFunc = func(_ context.Context, writer io.Writer) error { + _, err := writer.Write(parsableText) + return err + } + + ts := time.Time{} + + sl.scrapeAndReport(time.Time{}, ts, nil) + require.NoError(t, scraper.lastError) + + require.Len(t, appTest.ResultSamples(), 1862) + require.Len(t, appTest.ResultMetadata(), 1862) +} + +// Recommended CLI invocation: +/* + export bench=scrapeAndReport && go test ./scrape/... \ + -run '^$' -bench '^BenchmarkScrapeLoopScrapeAndReport' \ + -benchtime 5s -count 6 -cpu 2 -timeout 999m \ + | tee ${bench}.txt +*/ +func BenchmarkScrapeLoopScrapeAndReport(b *testing.B) { + parsableText := readTextParseTestMetrics(b) + + s := teststorage.New(b) + b.Cleanup(func() { _ = s.Close() }) + + sl, scraper := newTestScrapeLoop(b, func(sl *scrapeLoop) { + sl.appendable = s + sl.fallbackScrapeProtocol = "application/openmetrics-text" + }) + scraper.scrapeFunc = func(_ context.Context, writer io.Writer) error { + _, err := writer.Write(parsableText) + return err + } + + ts := time.Time{} + + b.ReportAllocs() + b.ResetTimer() + for b.Loop() { + ts = ts.Add(time.Second) + sl.scrapeAndReport(time.Time{}, ts, nil) + require.NoError(b, scraper.lastError) + } +} + func TestSetOptionsHandlingStaleness(t *testing.T) { s := teststorage.New(t, 600000) - defer s.Close() + t.Cleanup(func() { _ = s.Close() }) signal := make(chan struct{}, 1) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() // Function to run the scrape loop runScrapeLoop := func(ctx context.Context, t *testing.T, cue int, action func(*scrapeLoop)) { - var ( - scraper = &testScraper{} - app = func(ctx context.Context) storage.Appender { - return s.Appender(ctx) - } - ) - sl := newBasicScrapeLoop(t, ctx, scraper, app, 10*time.Millisecond) + sl, scraper := newTestScrapeLoop(t, func(sl *scrapeLoop) { + sl.ctx = ctx + sl.appendable = s + }) + numScrapes := 0 scraper.scrapeFunc = func(_ context.Context, w io.Writer) error { numScrapes++ if numScrapes == cue { action(sl) } - fmt.Fprintf(w, "metric_a{a=\"1\",b=\"1\"} %d\n", 42+numScrapes) + _, _ = fmt.Fprintf(w, "metric_a{a=\"1\",b=\"1\"} %d\n", 42+numScrapes) return nil } sl.run(nil) @@ -1644,25 +1494,25 @@ func TestSetOptionsHandlingStaleness(t *testing.T) { t.Fatalf("Scrape wasn't stopped.") } - ctx1, cancel := context.WithCancel(context.Background()) + ctx1, cancel := context.WithCancel(t.Context()) defer cancel() q, err := s.Querier(0, time.Now().UnixNano()) require.NoError(t, err) - defer q.Close() + t.Cleanup(func() { _ = q.Close() }) series := q.Select(ctx1, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "metric_a")) - var results []floatSample + var results []sample for series.Next() { it := series.At().Iterator(nil) for it.Next() == chunkenc.ValFloat { t, v := it.At() - results = append(results, floatSample{ - metric: series.At().Labels(), - t: t, - f: v, + results = append(results, sample{ + L: series.At().Labels(), + T: t, + V: v, }) } require.NoError(t, it.Err()) @@ -1670,7 +1520,7 @@ func TestSetOptionsHandlingStaleness(t *testing.T) { require.NoError(t, series.Err()) var c int for _, s := range results { - if value.IsStaleNaN(s.f) { + if value.IsStaleNaN(s.V) { c++ } } @@ -1678,25 +1528,25 @@ func TestSetOptionsHandlingStaleness(t *testing.T) { } func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) { - appender := &collectResultAppender{} - var ( - signal = make(chan struct{}, 1) - scraper = &testScraper{} - app = func(context.Context) storage.Appender { return appender } - ) + signal := make(chan struct{}, 1) + + ctx, cancel := context.WithCancel(t.Context()) + appTest := teststorage.NewAppendable() + sl, scraper := newTestScrapeLoop(t, func(sl *scrapeLoop) { + sl.ctx = ctx + sl.appendable = appTest + // Since we're writing samples directly below we need to provide a protocol fallback. + sl.fallbackScrapeProtocol = "text/plain" + }) - ctx, cancel := context.WithCancel(context.Background()) - // Since we're writing samples directly below we need to provide a protocol fallback. - sl := newBasicScrapeLoopWithFallback(t, ctx, scraper, app, 10*time.Millisecond, "text/plain") // Succeed once, several failures, then stop. numScrapes := 0 - scraper.scrapeFunc = func(_ context.Context, w io.Writer) error { numScrapes++ switch numScrapes { case 1: - w.Write([]byte("metric_a 42\n")) + _, _ = w.Write([]byte("metric_a 42\n")) return nil case 5: cancel() @@ -1715,36 +1565,39 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) { require.FailNow(t, "Scrape wasn't stopped.") } - // 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for - // each scrape successful or not. - require.Len(t, appender.resultFloats, 27, "Appended samples not as expected:\n%s", appender) - require.Equal(t, 42.0, appender.resultFloats[0].f, "Appended first sample not as expected") - require.True(t, value.IsStaleNaN(appender.resultFloats[6].f), - "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.resultFloats[6].f)) + got := appTest.ResultSamples() + // 1 successfully scraped sample + // 1 stale marker after first fail + // 5x 5 report samples for each scrape successful or not. + require.Len(t, got, 27, "Appended samples not as expected:\n%s", appTest) + require.Equal(t, 42.0, got[0].V, "Appended first sample not as expected") + require.True(t, value.IsStaleNaN(got[6].V), + "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(got[6].V)) } func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) { - appender := &collectResultAppender{} - var ( - signal = make(chan struct{}, 1) - scraper = &testScraper{} - app = func(context.Context) storage.Appender { return appender } - numScrapes = 0 - ) + signal := make(chan struct{}, 1) - ctx, cancel := context.WithCancel(context.Background()) - // Since we're writing samples directly below we need to provide a protocol fallback. - sl := newBasicScrapeLoopWithFallback(t, ctx, scraper, app, 10*time.Millisecond, "text/plain") + ctx, cancel := context.WithCancel(t.Context()) + appTest := teststorage.NewAppendable() + sl, scraper := newTestScrapeLoop(t, func(sl *scrapeLoop) { + sl.ctx = ctx + sl.appendable = appTest + // Since we're writing samples directly below we need to provide a protocol fallback. + sl.fallbackScrapeProtocol = "text/plain" + }) // Succeed once, several failures, then stop. + numScrapes := 0 scraper.scrapeFunc = func(_ context.Context, w io.Writer) error { numScrapes++ + switch numScrapes { case 1: - w.Write([]byte("metric_a 42\n")) + _, _ = w.Write([]byte("metric_a 42\n")) return nil case 2: - w.Write([]byte("7&-\n")) + _, _ = w.Write([]byte("7&-\n")) return nil case 3: cancel() @@ -1759,46 +1612,49 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) { select { case <-signal: + // TODO(bwplotka): Prone to flakiness, depend on atomic numScrapes. case <-time.After(5 * time.Second): require.FailNow(t, "Scrape wasn't stopped.") } - // 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for - // each scrape successful or not. - require.Len(t, appender.resultFloats, 17, "Appended samples not as expected:\n%s", appender) - require.Equal(t, 42.0, appender.resultFloats[0].f, "Appended first sample not as expected") - require.True(t, value.IsStaleNaN(appender.resultFloats[6].f), - "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.resultFloats[6].f)) + got := appTest.ResultSamples() + // 1 successfully scraped sample + // 1 stale marker after first fail + // 3x 5 report samples for each scrape successful or not. + require.Len(t, got, 17, "Appended samples not as expected:\n%s", appTest) + require.Equal(t, 42.0, got[0].V, "Appended first sample not as expected") + require.True(t, value.IsStaleNaN(got[6].V), + "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(got[6].V)) } -// If we have a target with sample_limit set and scrape initially works but then we hit the sample_limit error, +// If we have a target with sample_limit set and scrape initially works, but then we hit the sample_limit error, // then we don't expect to see any StaleNaNs appended for the series that disappeared due to sample_limit error. func TestScrapeLoopRunCreatesStaleMarkersOnSampleLimit(t *testing.T) { - appender := &collectResultAppender{} - var ( - signal = make(chan struct{}, 1) - scraper = &testScraper{} - app = func(_ context.Context) storage.Appender { return appender } - numScrapes = 0 - ) + signal := make(chan struct{}, 1) - ctx, cancel := context.WithCancel(context.Background()) - // Since we're writing samples directly below we need to provide a protocol fallback. - sl := newBasicScrapeLoopWithFallback(t, ctx, scraper, app, 10*time.Millisecond, "text/plain") - sl.sampleLimit = 4 + ctx, cancel := context.WithCancel(t.Context()) + appTest := teststorage.NewAppendable() + sl, scraper := newTestScrapeLoop(t, func(sl *scrapeLoop) { + sl.ctx = ctx + sl.appendable = appTest + // Since we're writing samples directly below we need to provide a protocol fallback. + sl.fallbackScrapeProtocol = "text/plain" + sl.sampleLimit = 4 + }) // Succeed once, several failures, then stop. + numScrapes := 0 scraper.scrapeFunc = func(_ context.Context, w io.Writer) error { numScrapes++ switch numScrapes { case 1: - w.Write([]byte("metric_a 10\nmetric_b 10\nmetric_c 10\nmetric_d 10\n")) + _, _ = w.Write([]byte("metric_a 10\nmetric_b 10\nmetric_c 10\nmetric_d 10\n")) return nil case 2: - w.Write([]byte("metric_a 20\nmetric_b 20\nmetric_c 20\nmetric_d 20\nmetric_e 999\n")) + _, _ = w.Write([]byte("metric_a 20\nmetric_b 20\nmetric_c 20\nmetric_d 20\nmetric_e 999\n")) return nil case 3: - w.Write([]byte("metric_a 30\nmetric_b 30\nmetric_c 30\nmetric_d 30\n")) + _, _ = w.Write([]byte("metric_a 30\nmetric_b 30\nmetric_c 30\nmetric_d 30\n")) return nil case 4: cancel() @@ -1817,49 +1673,52 @@ func TestScrapeLoopRunCreatesStaleMarkersOnSampleLimit(t *testing.T) { require.FailNow(t, "Scrape wasn't stopped.") } + got := appTest.ResultSamples() + // 4 scrapes in total: // #1 - success - 4 samples appended + 5 report series // #2 - sample_limit exceeded - no samples appended, only 5 report series // #3 - success - 4 samples appended + 5 report series // #4 - scrape canceled - 4 StaleNaNs appended because of scrape error + 5 report series - require.Len(t, appender.resultFloats, (4+5)+5+(4+5)+(4+5), "Appended samples not as expected:\n%s", appender) + require.Len(t, got, (4+5)+5+(4+5)+(4+5), "Appended samples not as expected:\n%s", appTest) // Expect first 4 samples to be metric_X [0-3]. for i := range 4 { - require.Equal(t, 10.0, appender.resultFloats[i].f, "Appended %d sample not as expected", i) + require.Equal(t, 10.0, got[i].V, "Appended %d sample not as expected", i) } // Next 5 samples are report series [4-8]. // Next 5 samples are report series for the second scrape [9-13]. // Expect first 4 samples to be metric_X from the third scrape [14-17]. for i := 14; i <= 17; i++ { - require.Equal(t, 30.0, appender.resultFloats[i].f, "Appended %d sample not as expected", i) + require.Equal(t, 30.0, got[i].V, "Appended %d sample not as expected", i) } // Next 5 samples are report series [18-22]. // Next 5 samples are report series [23-26]. for i := 23; i <= 26; i++ { - require.True(t, value.IsStaleNaN(appender.resultFloats[i].f), - "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.resultFloats[i].f)) + require.True(t, value.IsStaleNaN(got[i].V), + "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(got[i].V)) } } func TestScrapeLoopCache(t *testing.T) { s := teststorage.New(t) - defer s.Close() + t.Cleanup(func() { _ = s.Close() }) - appender := &collectResultAppender{} - var ( - signal = make(chan struct{}, 1) - scraper = &testScraper{} - app = func(ctx context.Context) storage.Appender { appender.next = s.Appender(ctx); return appender } - ) + signal := make(chan struct{}, 1) - ctx, cancel := context.WithCancel(context.Background()) - // Decreasing the scrape interval could make the test fail, as multiple scrapes might be initiated at identical millisecond timestamps. - // See https://github.com/prometheus/prometheus/issues/12727. - // Since we're writing samples directly below we need to provide a protocol fallback. - sl := newBasicScrapeLoopWithFallback(t, ctx, scraper, app, 100*time.Millisecond, "text/plain") + ctx, cancel := context.WithCancel(t.Context()) + appTest := teststorage.NewAppendable().Then(s) + sl, scraper := newTestScrapeLoop(t, func(sl *scrapeLoop) { + sl.ctx = ctx + sl.l = promslog.New(&promslog.Config{}) + sl.appendable = appTest + // Since we're writing samples directly below we need to provide a protocol fallback. + sl.fallbackScrapeProtocol = "text/plain" + // Decreasing the scrape interval could make the test fail, as multiple scrapes might be initiated at identical millisecond timestamps. + // See https://github.com/prometheus/prometheus/issues/12727. + sl.interval = 100 * time.Millisecond + }) numScrapes := 0 - scraper.scrapeFunc = func(_ context.Context, w io.Writer) error { switch numScrapes { case 1, 2: @@ -1877,10 +1736,10 @@ func TestScrapeLoopCache(t *testing.T) { numScrapes++ switch numScrapes { case 1: - w.Write([]byte("metric_a 42\nmetric_b 43\n")) + _, _ = w.Write([]byte("metric_a 42\nmetric_b 43\n")) return nil case 3: - w.Write([]byte("metric_a 44\n")) + _, _ = w.Write([]byte("metric_a 44\n")) return nil case 4: cancel() @@ -1899,29 +1758,23 @@ func TestScrapeLoopCache(t *testing.T) { require.FailNow(t, "Scrape wasn't stopped.") } - // 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for - // each scrape successful or not. - require.Len(t, appender.resultFloats, 26, "Appended samples not as expected:\n%s", appender) + // 3 successfully scraped samples + // 3 stale marker after samples were missing. + // 4x 5 report samples for each scrape successful or not. + require.Len(t, appTest.ResultSamples(), 26, "Appended samples not as expected:\n%s", appTest) } func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) { s := teststorage.New(t) - defer s.Close() - - sapp := s.Appender(context.Background()) + t.Cleanup(func() { _ = s.Close() }) - appender := &collectResultAppender{next: sapp} - var ( - signal = make(chan struct{}, 1) - scraper = &testScraper{} - app = func(context.Context) storage.Appender { return appender } - ) - - ctx, cancel := context.WithCancel(context.Background()) - sl := newBasicScrapeLoop(t, ctx, scraper, app, 10*time.Millisecond) + signal := make(chan struct{}, 1) + ctx, cancel := context.WithCancel(t.Context()) + sl, scraper := newTestScrapeLoop(t, func(sl *scrapeLoop) { + sl.ctx = ctx + }) numScrapes := 0 - scraper.scrapeFunc = func(_ context.Context, w io.Writer) error { numScrapes++ if numScrapes < 5 { @@ -1929,7 +1782,7 @@ func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) { for i := range 500 { s = fmt.Sprintf("%smetric_%d_%d 42\n", s, i, numScrapes) } - w.Write([]byte(s + "&")) + _, _ = w.Write([]byte(s + "&")) } else { cancel() } @@ -2004,37 +1857,38 @@ func TestScrapeLoopAppend(t *testing.T) { } for _, test := range tests { - app := &collectResultAppender{} - discoveryLabels := &Target{ labels: labels.FromStrings(test.discoveryLabels...), } - sl := newBasicScrapeLoop(t, context.Background(), nil, func(context.Context) storage.Appender { return app }, 0) - sl.sampleMutator = func(l labels.Labels) labels.Labels { - return mutateSampleLabels(l, discoveryLabels, test.honorLabels, nil) - } - sl.reportSampleMutator = func(l labels.Labels) labels.Labels { - return mutateReportSampleLabels(l, discoveryLabels) - } + appTest := teststorage.NewAppendable() + sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) { + sl.appendable = appTest + sl.sampleMutator = func(l labels.Labels) labels.Labels { + return mutateSampleLabels(l, discoveryLabels, test.honorLabels, nil) + } + sl.reportSampleMutator = func(l labels.Labels) labels.Labels { + return mutateReportSampleLabels(l, discoveryLabels) + } + }) now := time.Now() - slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, []byte(test.scrapeLabels), "text/plain", now) + app := sl.appender() + _, _, _, err := app.append([]byte(test.scrapeLabels), "text/plain", now) require.NoError(t, err) - require.NoError(t, slApp.Commit()) + require.NoError(t, app.Commit()) - expected := []floatSample{ + expected := []sample{ { - metric: test.expLset, - t: timestamp.FromTime(now), - f: test.expValue, + L: test.expLset, + T: timestamp.FromTime(now), + V: test.expValue, }, } t.Logf("Test:%s", test.title) - requireEqual(t, expected, app.resultFloats) + requireEqual(t, expected, appTest.ResultSamples()) } } @@ -2042,13 +1896,12 @@ func requireEqual(t *testing.T, expected, actual any, msgAndArgs ...any) { t.Helper() testutil.RequireEqualWithOptions(t, expected, actual, []cmp.Option{ - cmp.Comparer(equalFloatSamples), - cmp.AllowUnexported(histogramSample{}), + cmp.Comparer(func(a, b sample) bool { return a.Equals(b) }), // StaleNaN samples are generated by iterating over a map, which means that the order // of samples might be different on every test run. Sort series by label to avoid // test failures because of that. - cmpopts.SortSlices(func(a, b floatSample) int { - return labels.Compare(a.metric, b.metric) + cmpopts.SortSlices(func(a, b sample) int { + return labels.Compare(a.L, b.L) }), }, msgAndArgs...) @@ -2106,32 +1959,34 @@ func TestScrapeLoopAppendForConflictingPrefixedLabels(t *testing.T) { for name, tc := range testcases { t.Run(name, func(t *testing.T) { - app := &collectResultAppender{} - sl := newBasicScrapeLoop(t, context.Background(), nil, func(context.Context) storage.Appender { return app }, 0) - sl.sampleMutator = func(l labels.Labels) labels.Labels { - return mutateSampleLabels(l, &Target{labels: labels.FromStrings(tc.targetLabels...)}, false, nil) - } - slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, []byte(tc.exposedLabels), "text/plain", time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC)) + appTest := teststorage.NewAppendable() + sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) { + sl.appendable = appTest + sl.sampleMutator = func(l labels.Labels) labels.Labels { + return mutateSampleLabels(l, &Target{labels: labels.FromStrings(tc.targetLabels...)}, false, nil) + } + }) + + app := sl.appender() + _, _, _, err := app.append([]byte(tc.exposedLabels), "text/plain", time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC)) require.NoError(t, err) - require.NoError(t, slApp.Commit()) + require.NoError(t, app.Commit()) - requireEqual(t, []floatSample{ + requireEqual(t, []sample{ { - metric: labels.FromStrings(tc.expected...), - t: timestamp.FromTime(time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC)), - f: 0, + L: labels.FromStrings(tc.expected...), + T: timestamp.FromTime(time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC)), + V: 0, }, - }, app.resultFloats) + }, appTest.ResultSamples()) }) } } func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) { - // collectResultAppender's AddFast always returns ErrNotFound if we don't give it a next. - app := &collectResultAppender{} - sl := newBasicScrapeLoop(t, context.Background(), nil, func(context.Context) storage.Appender { return app }, 0) + appTest := teststorage.NewAppendable() + sl, _ := newTestScrapeLoop(t, withAppendable(appTest)) fakeRef := storage.SeriesRef(1) expValue := float64(1) @@ -2141,7 +1996,8 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) { require.NoError(t, warning) var lset labels.Labels - p.Next() + _, err := p.Next() + require.NoError(t, err) p.Labels(&lset) hash := lset.Hash() @@ -2149,36 +2005,43 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) { sl.cache.addRef(metric, fakeRef, lset, hash) now := time.Now() - slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, metric, "text/plain", now) + app := sl.appender() + _, _, _, err = app.append(metric, "text/plain", now) require.NoError(t, err) - require.NoError(t, slApp.Commit()) + require.NoError(t, app.Commit()) - expected := []floatSample{ + expected := []sample{ { - metric: lset, - t: timestamp.FromTime(now), - f: expValue, + L: lset, + T: timestamp.FromTime(now), + V: expValue, }, } - require.Equal(t, expected, app.resultFloats) + require.Equal(t, expected, appTest.ResultSamples()) } -func TestScrapeLoopAppendSampleLimit(t *testing.T) { - resApp := &collectResultAppender{} - app := &limitAppender{Appender: resApp, limit: 1} +type appendableFunc func(ctx context.Context) storage.Appender - sl := newBasicScrapeLoop(t, context.Background(), nil, func(context.Context) storage.Appender { return app }, 0) - sl.sampleMutator = func(l labels.Labels) labels.Labels { - if l.Has("deleteme") { - return labels.EmptyLabels() +func (a appendableFunc) Appender(ctx context.Context) storage.Appender { return a(ctx) } + +func TestScrapeLoopAppendSampleLimit(t *testing.T) { + appTest := teststorage.NewAppendable() + sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) { + sl.appendable = appendableFunc(func(ctx context.Context) storage.Appender { + // Chain appTest to verify what samples passed through. + return &limitAppender{Appender: appTest.Appender(ctx), limit: 1} + }) + sl.sampleMutator = func(l labels.Labels) labels.Labels { + if l.Has("deleteme") { + return labels.EmptyLabels() + } + return l } - return l - } - sl.sampleLimit = app.limit + sl.sampleLimit = 1 // Same as limitAppender.limit + }) - // Get the value of the Counter before performing the append. + // Get the value of the Counter before performing append. beforeMetric := dto.Metric{} err := sl.metrics.targetScrapeSampleLimit.Write(&beforeMetric) require.NoError(t, err) @@ -2186,10 +2049,10 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) { beforeMetricValue := beforeMetric.GetCounter().GetValue() now := time.Now() - slApp := sl.appender(context.Background()) - total, added, seriesAdded, err := sl.append(app, []byte("metric_a 1\nmetric_b 1\nmetric_c 1\n"), "text/plain", now) + app := sl.appender() + total, added, seriesAdded, err := app.append([]byte("metric_a 1\nmetric_b 1\nmetric_c 1\n"), "text/plain", now) require.ErrorIs(t, err, errSampleLimit) - require.NoError(t, slApp.Rollback()) + require.NoError(t, app.Rollback()) require.Equal(t, 3, total) require.Equal(t, 3, added) require.Equal(t, 1, seriesAdded) @@ -2200,42 +2063,44 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) { err = sl.metrics.targetScrapeSampleLimit.Write(&metric) require.NoError(t, err) - value := metric.GetCounter().GetValue() - change := value - beforeMetricValue + v := metric.GetCounter().GetValue() + change := v - beforeMetricValue require.Equal(t, 1.0, change, "Unexpected change of sample limit metric: %f", change) // And verify that we got the samples that fit under the limit. - want := []floatSample{ + want := []sample{ { - metric: labels.FromStrings(model.MetricNameLabel, "metric_a"), - t: timestamp.FromTime(now), - f: 1, + L: labels.FromStrings(model.MetricNameLabel, "metric_a"), + T: timestamp.FromTime(now), + V: 1, }, } - requireEqual(t, want, resApp.rolledbackFloats, "Appended samples not as expected:\n%s", appender) + requireEqual(t, want, appTest.RolledbackSamples(), "Appended samples not as expected:\n%s", appTest) now = time.Now() - slApp = sl.appender(context.Background()) - total, added, seriesAdded, err = sl.append(slApp, []byte("metric_a 1\nmetric_b 1\nmetric_c{deleteme=\"yes\"} 1\nmetric_d 1\nmetric_e 1\nmetric_f 1\nmetric_g 1\nmetric_h{deleteme=\"yes\"} 1\nmetric_i{deleteme=\"yes\"} 1\n"), "text/plain", now) + app = sl.appender() + total, added, seriesAdded, err = app.append([]byte("metric_a 1\nmetric_b 1\nmetric_c{deleteme=\"yes\"} 1\nmetric_d 1\nmetric_e 1\nmetric_f 1\nmetric_g 1\nmetric_h{deleteme=\"yes\"} 1\nmetric_i{deleteme=\"yes\"} 1\n"), "text/plain", now) require.ErrorIs(t, err, errSampleLimit) - require.NoError(t, slApp.Rollback()) + require.NoError(t, app.Rollback()) require.Equal(t, 9, total) require.Equal(t, 6, added) - require.Equal(t, 0, seriesAdded) + require.Equal(t, 1, seriesAdded) } func TestScrapeLoop_HistogramBucketLimit(t *testing.T) { - resApp := &collectResultAppender{} - app := &bucketLimitAppender{Appender: resApp, limit: 2} - - sl := newBasicScrapeLoop(t, context.Background(), nil, func(context.Context) storage.Appender { return app }, 0) - sl.enableNativeHistogramScraping = true - sl.sampleMutator = func(l labels.Labels) labels.Labels { - if l.Has("deleteme") { - return labels.EmptyLabels() + sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) { + sl.appendable = appendableFunc(func(ctx context.Context) storage.Appender { + return &bucketLimitAppender{Appender: teststorage.NewAppendable().Appender(ctx), limit: 2} + }) + sl.enableNativeHistogramScraping = true + sl.sampleMutator = func(l labels.Labels) labels.Labels { + if l.Has("deleteme") { + return labels.EmptyLabels() + } + return l } - return l - } + }) + app := sl.appender() metric := dto.Metric{} err := sl.metrics.targetScrapeNativeHistogramBucketLimit.Write(&metric) @@ -2254,7 +2119,7 @@ func TestScrapeLoop_HistogramBucketLimit(t *testing.T) { []string{"size"}, ) registry := prometheus.NewRegistry() - registry.Register(nativeHistogram) + require.NoError(t, registry.Register(nativeHistogram)) nativeHistogram.WithLabelValues("S").Observe(1.0) nativeHistogram.WithLabelValues("M").Observe(1.0) nativeHistogram.WithLabelValues("L").Observe(1.0) @@ -2270,7 +2135,7 @@ func TestScrapeLoop_HistogramBucketLimit(t *testing.T) { require.NoError(t, err) now := time.Now() - total, added, seriesAdded, err := sl.append(app, msg, "application/vnd.google.protobuf", now) + total, added, seriesAdded, err := app.append(msg, "application/vnd.google.protobuf", now) require.NoError(t, err) require.Equal(t, 3, total) require.Equal(t, 3, added) @@ -2293,11 +2158,11 @@ func TestScrapeLoop_HistogramBucketLimit(t *testing.T) { require.NoError(t, err) now = time.Now() - total, added, seriesAdded, err = sl.append(app, msg, "application/vnd.google.protobuf", now) + total, added, seriesAdded, err = app.append(msg, "application/vnd.google.protobuf", now) require.NoError(t, err) require.Equal(t, 3, total) require.Equal(t, 3, added) - require.Equal(t, 3, seriesAdded) + require.Equal(t, 0, seriesAdded) // Series are cached. err = sl.metrics.targetScrapeNativeHistogramBucketLimit.Write(&metric) require.NoError(t, err) @@ -2316,14 +2181,14 @@ func TestScrapeLoop_HistogramBucketLimit(t *testing.T) { require.NoError(t, err) now = time.Now() - total, added, seriesAdded, err = sl.append(app, msg, "application/vnd.google.protobuf", now) + total, added, seriesAdded, err = app.append(msg, "application/vnd.google.protobuf", now) if !errors.Is(err, errBucketLimit) { t.Fatalf("Did not see expected histogram bucket limit error: %s", err) } require.NoError(t, app.Rollback()) require.Equal(t, 3, total) require.Equal(t, 3, added) - require.Equal(t, 0, seriesAdded) + require.Equal(t, 0, seriesAdded) // Series are cached. err = sl.metrics.targetScrapeNativeHistogramBucketLimit.Write(&metric) require.NoError(t, err) @@ -2333,151 +2198,149 @@ func TestScrapeLoop_HistogramBucketLimit(t *testing.T) { func TestScrapeLoop_ChangingMetricString(t *testing.T) { // This is a regression test for the scrape loop cache not properly maintaining - // IDs when the string representation of a metric changes across a scrape. Thus + // IDs when the string representation of a metric changes across a scrape. Thus, // we use a real storage appender here. - s := teststorage.New(t) - defer s.Close() - - capp := &collectResultAppender{} - sl := newBasicScrapeLoop(t, context.Background(), nil, func(context.Context) storage.Appender { return capp }, 0) + appTest := teststorage.NewAppendable() + sl, _ := newTestScrapeLoop(t, withAppendable(appTest)) now := time.Now() - slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, []byte(`metric_a{a="1",b="1"} 1`), "text/plain", now) + app := sl.appender() + _, _, _, err := app.append([]byte(`metric_a{a="1",b="1"} 1`), "text/plain", now) require.NoError(t, err) - require.NoError(t, slApp.Commit()) + require.NoError(t, app.Commit()) - slApp = sl.appender(context.Background()) - _, _, _, err = sl.append(slApp, []byte(`metric_a{b="1",a="1"} 2`), "text/plain", now.Add(time.Minute)) + app = sl.appender() + _, _, _, err = app.append([]byte(`metric_a{b="1",a="1"} 2`), "text/plain", now.Add(time.Minute)) require.NoError(t, err) - require.NoError(t, slApp.Commit()) + require.NoError(t, app.Commit()) - want := []floatSample{ + want := []sample{ { - metric: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"), - t: timestamp.FromTime(now), - f: 1, + L: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"), + T: timestamp.FromTime(now), + V: 1, }, { - metric: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"), - t: timestamp.FromTime(now.Add(time.Minute)), - f: 2, + L: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"), + T: timestamp.FromTime(now.Add(time.Minute)), + V: 2, }, } - require.Equal(t, want, capp.resultFloats, "Appended samples not as expected:\n%s", appender) + require.Equal(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", appTest) } func TestScrapeLoopAppendFailsWithNoContentType(t *testing.T) { - app := &collectResultAppender{} - - // Explicitly setting the lack of fallback protocol here to make it obvious. - sl := newBasicScrapeLoopWithFallback(t, context.Background(), nil, func(context.Context) storage.Appender { return app }, 0, "") + sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) { + // Explicitly setting the lack of fallback protocol here to make it obvious. + sl.fallbackScrapeProtocol = "" + }) now := time.Now() - slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, []byte("metric_a 1\n"), "", now) - // We expect the appropriate error. + app := sl.appender() + _, _, _, err := app.append([]byte("metric_a 1\n"), "", now) + // We expected the appropriate error. require.ErrorContains(t, err, "non-compliant scrape target sending blank Content-Type and no fallback_scrape_protocol specified for target", "Expected \"non-compliant scrape\" error but got: %s", err) } +// TestScrapeLoopAppendEmptyWithNoContentType ensures we there are no errors when we get a blank scrape or just want to append a stale marker. func TestScrapeLoopAppendEmptyWithNoContentType(t *testing.T) { - // This test ensures we there are no errors when we get a blank scrape or just want to append a stale marker. - app := &collectResultAppender{} - - // Explicitly setting the lack of fallback protocol here to make it obvious. - sl := newBasicScrapeLoopWithFallback(t, context.Background(), nil, func(context.Context) storage.Appender { return app }, 0, "") + sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) { + // Explicitly setting the lack of fallback protocol here to make it obvious. + sl.fallbackScrapeProtocol = "" + }) now := time.Now() - slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, []byte(""), "", now) + app := sl.appender() + _, _, _, err := app.append([]byte(""), "", now) require.NoError(t, err) - require.NoError(t, slApp.Commit()) + require.NoError(t, app.Commit()) } func TestScrapeLoopAppendStaleness(t *testing.T) { - app := &collectResultAppender{} - - sl := newBasicScrapeLoop(t, context.Background(), nil, func(context.Context) storage.Appender { return app }, 0) + appTest := teststorage.NewAppendable() + sl, _ := newTestScrapeLoop(t, withAppendable(appTest)) now := time.Now() - slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, []byte("metric_a 1\n"), "text/plain", now) + app := sl.appender() + _, _, _, err := app.append([]byte("metric_a 1\n"), "text/plain", now) require.NoError(t, err) - require.NoError(t, slApp.Commit()) + require.NoError(t, app.Commit()) - slApp = sl.appender(context.Background()) - _, _, _, err = sl.append(slApp, []byte(""), "", now.Add(time.Second)) + app = sl.appender() + _, _, _, err = app.append([]byte(""), "", now.Add(time.Second)) require.NoError(t, err) - require.NoError(t, slApp.Commit()) + require.NoError(t, app.Commit()) - want := []floatSample{ + want := []sample{ { - metric: labels.FromStrings(model.MetricNameLabel, "metric_a"), - t: timestamp.FromTime(now), - f: 1, + L: labels.FromStrings(model.MetricNameLabel, "metric_a"), + T: timestamp.FromTime(now), + V: 1, }, { - metric: labels.FromStrings(model.MetricNameLabel, "metric_a"), - t: timestamp.FromTime(now.Add(time.Second)), - f: math.Float64frombits(value.StaleNaN), + L: labels.FromStrings(model.MetricNameLabel, "metric_a"), + T: timestamp.FromTime(now.Add(time.Second)), + V: math.Float64frombits(value.StaleNaN), }, } - requireEqual(t, want, app.resultFloats, "Appended samples not as expected:\n%s", appender) + requireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", appTest) } func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) { - app := &collectResultAppender{} - sl := newBasicScrapeLoop(t, context.Background(), nil, func(context.Context) storage.Appender { return app }, 0) + appTest := teststorage.NewAppendable() + sl, _ := newTestScrapeLoop(t, withAppendable(appTest)) now := time.Now() - slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, []byte("metric_a 1 1000\n"), "text/plain", now) + app := sl.appender() + _, _, _, err := app.append([]byte("metric_a 1 1000\n"), "text/plain", now) require.NoError(t, err) - require.NoError(t, slApp.Commit()) + require.NoError(t, app.Commit()) - slApp = sl.appender(context.Background()) - _, _, _, err = sl.append(slApp, []byte(""), "", now.Add(time.Second)) + app = sl.appender() + _, _, _, err = app.append([]byte(""), "", now.Add(time.Second)) require.NoError(t, err) - require.NoError(t, slApp.Commit()) + require.NoError(t, app.Commit()) - want := []floatSample{ + want := []sample{ { - metric: labels.FromStrings(model.MetricNameLabel, "metric_a"), - t: 1000, - f: 1, + L: labels.FromStrings(model.MetricNameLabel, "metric_a"), + T: 1000, + V: 1, }, } - require.Equal(t, want, app.resultFloats, "Appended samples not as expected:\n%s", appender) + require.Equal(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", appTest) } func TestScrapeLoopAppendStalenessIfTrackTimestampStaleness(t *testing.T) { - app := &collectResultAppender{} - sl := newBasicScrapeLoop(t, context.Background(), nil, func(context.Context) storage.Appender { return app }, 0) - sl.trackTimestampsStaleness = true + appTest := teststorage.NewAppendable() + sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) { + sl.appendable = appTest + sl.trackTimestampsStaleness = true + }) now := time.Now() - slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, []byte("metric_a 1 1000\n"), "text/plain", now) + app := sl.appender() + _, _, _, err := app.append([]byte("metric_a 1 1000\n"), "text/plain", now) require.NoError(t, err) - require.NoError(t, slApp.Commit()) + require.NoError(t, app.Commit()) - slApp = sl.appender(context.Background()) - _, _, _, err = sl.append(slApp, []byte(""), "", now.Add(time.Second)) + app = sl.appender() + _, _, _, err = app.append([]byte(""), "", now.Add(time.Second)) require.NoError(t, err) - require.NoError(t, slApp.Commit()) + require.NoError(t, app.Commit()) - want := []floatSample{ + want := []sample{ { - metric: labels.FromStrings(model.MetricNameLabel, "metric_a"), - t: 1000, - f: 1, + L: labels.FromStrings(model.MetricNameLabel, "metric_a"), + T: 1000, + V: 1, }, { - metric: labels.FromStrings(model.MetricNameLabel, "metric_a"), - t: timestamp.FromTime(now.Add(time.Second)), - f: math.Float64frombits(value.StaleNaN), + L: labels.FromStrings(model.MetricNameLabel, "metric_a"), + T: timestamp.FromTime(now.Add(time.Second)), + V: math.Float64frombits(value.StaleNaN), }, } - requireEqual(t, want, app.resultFloats, "Appended samples not as expected:\n%s", appender) + requireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", appTest) } func TestScrapeLoopAppendExemplar(t *testing.T) { @@ -2488,18 +2351,16 @@ func TestScrapeLoopAppendExemplar(t *testing.T) { scrapeText string contentType string discoveryLabels []string - floats []floatSample - histograms []histogramSample - exemplars []exemplar.Exemplar + samples []sample }{ { title: "Metric without exemplars", scrapeText: "metric_total{n=\"1\"} 0\n# EOF", contentType: "application/openmetrics-text", discoveryLabels: []string{"n", "2"}, - floats: []floatSample{{ - metric: labels.FromStrings("__name__", "metric_total", "exported_n", "1", "n", "2"), - f: 0, + samples: []sample{{ + L: labels.FromStrings("__name__", "metric_total", "exported_n", "1", "n", "2"), + V: 0, }}, }, { @@ -2507,26 +2368,24 @@ func TestScrapeLoopAppendExemplar(t *testing.T) { scrapeText: "metric_total{n=\"1\"} 0 # {a=\"abc\"} 1.0\n# EOF", contentType: "application/openmetrics-text", discoveryLabels: []string{"n", "2"}, - floats: []floatSample{{ - metric: labels.FromStrings("__name__", "metric_total", "exported_n", "1", "n", "2"), - f: 0, + samples: []sample{{ + L: labels.FromStrings("__name__", "metric_total", "exported_n", "1", "n", "2"), + V: 0, + ES: []exemplar.Exemplar{ + {Labels: labels.FromStrings("a", "abc"), Value: 1}, + }, }}, - exemplars: []exemplar.Exemplar{ - {Labels: labels.FromStrings("a", "abc"), Value: 1}, - }, }, { title: "Metric with exemplars and TS", scrapeText: "metric_total{n=\"1\"} 0 # {a=\"abc\"} 1.0 10000\n# EOF", contentType: "application/openmetrics-text", discoveryLabels: []string{"n", "2"}, - floats: []floatSample{{ - metric: labels.FromStrings("__name__", "metric_total", "exported_n", "1", "n", "2"), - f: 0, + samples: []sample{{ + L: labels.FromStrings("__name__", "metric_total", "exported_n", "1", "n", "2"), + V: 0, + ES: []exemplar.Exemplar{{Labels: labels.FromStrings("a", "abc"), Value: 1, Ts: 10000000, HasTs: true}}, }}, - exemplars: []exemplar.Exemplar{ - {Labels: labels.FromStrings("a", "abc"), Value: 1, Ts: 10000000, HasTs: true}, - }, }, { title: "Two metrics and exemplars", @@ -2534,17 +2393,15 @@ func TestScrapeLoopAppendExemplar(t *testing.T) { metric_total{n="2"} 2 # {t="2"} 2.0 20000 # EOF`, contentType: "application/openmetrics-text", - floats: []floatSample{{ - metric: labels.FromStrings("__name__", "metric_total", "n", "1"), - f: 1, + samples: []sample{{ + L: labels.FromStrings("__name__", "metric_total", "n", "1"), + V: 1, + ES: []exemplar.Exemplar{{Labels: labels.FromStrings("t", "1"), Value: 1, Ts: 10000000, HasTs: true}}, }, { - metric: labels.FromStrings("__name__", "metric_total", "n", "2"), - f: 2, + L: labels.FromStrings("__name__", "metric_total", "n", "2"), + V: 2, + ES: []exemplar.Exemplar{{Labels: labels.FromStrings("t", "2"), Value: 2, Ts: 20000000, HasTs: true}}, }}, - exemplars: []exemplar.Exemplar{ - {Labels: labels.FromStrings("t", "1"), Value: 1, Ts: 10000000, HasTs: true}, - {Labels: labels.FromStrings("t", "2"), Value: 2, Ts: 20000000, HasTs: true}, - }, }, { title: "Native histogram with three exemplars from classic buckets", @@ -2636,10 +2493,10 @@ metric: < `, contentType: "application/vnd.google.protobuf", - histograms: []histogramSample{{ - t: 1234568, - metric: labels.FromStrings("__name__", "test_histogram"), - h: &histogram.Histogram{ + samples: []sample{{ + T: 1234568, + L: labels.FromStrings("__name__", "test_histogram"), + H: &histogram.Histogram{ Count: 175, ZeroCount: 2, Sum: 0.0008280461746287094, @@ -2656,12 +2513,12 @@ metric: < PositiveBuckets: []int64{1, 2, -1, -1}, NegativeBuckets: []int64{1, 3, -2, -1, 1}, }, + ES: []exemplar.Exemplar{ + // Native histogram exemplars are arranged by timestamp, and those with missing timestamps are dropped. + {Labels: labels.FromStrings("dummyID", "58215"), Value: -0.00019, Ts: 1625851055146, HasTs: true}, + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, Ts: 1625851155146, HasTs: true}, + }, }}, - exemplars: []exemplar.Exemplar{ - // Native histogram exemplars are arranged by timestamp, and those with missing timestamps are dropped. - {Labels: labels.FromStrings("dummyID", "58215"), Value: -0.00019, Ts: 1625851055146, HasTs: true}, - {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, Ts: 1625851155146, HasTs: true}, - }, }, { title: "Native histogram with three exemplars scraped as classic histogram", @@ -2754,46 +2611,50 @@ metric: < `, alwaysScrapeClassicHist: true, contentType: "application/vnd.google.protobuf", - floats: []floatSample{ - {metric: labels.FromStrings("__name__", "test_histogram_count"), t: 1234568, f: 175}, - {metric: labels.FromStrings("__name__", "test_histogram_sum"), t: 1234568, f: 0.0008280461746287094}, - {metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0004899999999999998"), t: 1234568, f: 2}, - {metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0003899999999999998"), t: 1234568, f: 4}, - {metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0002899999999999998"), t: 1234568, f: 16}, - {metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0001899999999999998"), t: 1234568, f: 32}, - {metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "+Inf"), t: 1234568, f: 175}, - }, - histograms: []histogramSample{{ - t: 1234568, - metric: labels.FromStrings("__name__", "test_histogram"), - h: &histogram.Histogram{ - Count: 175, - ZeroCount: 2, - Sum: 0.0008280461746287094, - ZeroThreshold: 2.938735877055719e-39, - Schema: 3, - PositiveSpans: []histogram.Span{ - {Offset: -161, Length: 1}, - {Offset: 8, Length: 3}, + samples: []sample{ + {L: labels.FromStrings("__name__", "test_histogram_count"), T: 1234568, V: 175}, + {L: labels.FromStrings("__name__", "test_histogram_sum"), T: 1234568, V: 0.0008280461746287094}, + {L: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0004899999999999998"), T: 1234568, V: 2}, + { + L: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0003899999999999998"), T: 1234568, V: 4, + ES: []exemplar.Exemplar{{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, Ts: 1625851155146, HasTs: true}}, + }, + { + L: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0002899999999999998"), T: 1234568, V: 16, + ES: []exemplar.Exemplar{{Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, Ts: 1234568, HasTs: false}}, + }, + { + L: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0001899999999999998"), T: 1234568, V: 32, + ES: []exemplar.Exemplar{{Labels: labels.FromStrings("dummyID", "58215"), Value: -0.00019, Ts: 1625851055146, HasTs: true}}, + }, + {L: labels.FromStrings("__name__", "test_histogram_bucket", "le", "+Inf"), T: 1234568, V: 175}, + { + T: 1234568, + L: labels.FromStrings("__name__", "test_histogram"), + H: &histogram.Histogram{ + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, }, - NegativeSpans: []histogram.Span{ - {Offset: -162, Length: 1}, - {Offset: 23, Length: 4}, + ES: []exemplar.Exemplar{ + // Native histogram one is arranged by timestamp. + // Exemplars with missing timestamps are dropped for native histograms. + {Labels: labels.FromStrings("dummyID", "58215"), Value: -0.00019, Ts: 1625851055146, HasTs: true}, + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, Ts: 1625851155146, HasTs: true}, }, - PositiveBuckets: []int64{1, 2, -1, -1}, - NegativeBuckets: []int64{1, 3, -2, -1, 1}, }, - }}, - exemplars: []exemplar.Exemplar{ - // Native histogram one is arranged by timestamp. - // Exemplars with missing timestamps are dropped for native histograms. - {Labels: labels.FromStrings("dummyID", "58215"), Value: -0.00019, Ts: 1625851055146, HasTs: true}, - {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, Ts: 1625851155146, HasTs: true}, - // Classic histogram one is in order of appearance. - // Exemplars with missing timestamps are supported for classic histograms. - {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, Ts: 1625851155146, HasTs: true}, - {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, Ts: 1234568, HasTs: false}, - {Labels: labels.FromStrings("dummyID", "58215"), Value: -0.00019, Ts: 1625851055146, HasTs: true}, }, }, { @@ -2869,10 +2730,10 @@ metric: < > `, - histograms: []histogramSample{{ - t: 1234568, - metric: labels.FromStrings("__name__", "test_histogram"), - h: &histogram.Histogram{ + samples: []sample{{ + T: 1234568, + L: labels.FromStrings("__name__", "test_histogram"), + H: &histogram.Histogram{ Count: 175, ZeroCount: 2, Sum: 0.0008280461746287094, @@ -2889,12 +2750,12 @@ metric: < PositiveBuckets: []int64{1, 2, -1, -1}, NegativeBuckets: []int64{1, 3, -2, -1, 1}, }, + ES: []exemplar.Exemplar{ + // Exemplars with missing timestamps are dropped for native histograms. + {Labels: labels.FromStrings("dummyID", "58242"), Value: -0.00019, Ts: 1625851055146, HasTs: true}, + {Labels: labels.FromStrings("dummyID", "59732"), Value: -0.00039, Ts: 1625851155146, HasTs: true}, + }, }}, - exemplars: []exemplar.Exemplar{ - // Exemplars with missing timestamps are dropped for native histograms. - {Labels: labels.FromStrings("dummyID", "58242"), Value: -0.00019, Ts: 1625851055146, HasTs: true}, - {Labels: labels.FromStrings("dummyID", "59732"), Value: -0.00039, Ts: 1625851155146, HasTs: true}, - }, }, { title: "Native histogram with exemplars but ingestion disabled", @@ -2969,45 +2830,50 @@ metric: < > `, - floats: []floatSample{ - {metric: labels.FromStrings("__name__", "test_histogram_count"), t: 1234568, f: 175}, - {metric: labels.FromStrings("__name__", "test_histogram_sum"), t: 1234568, f: 0.0008280461746287094}, - {metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "+Inf"), t: 1234568, f: 175}, + samples: []sample{ + {L: labels.FromStrings("__name__", "test_histogram_count"), T: 1234568, V: 175}, + {L: labels.FromStrings("__name__", "test_histogram_sum"), T: 1234568, V: 0.0008280461746287094}, + {L: labels.FromStrings("__name__", "test_histogram_bucket", "le", "+Inf"), T: 1234568, V: 175}, }, }, } for _, test := range tests { t.Run(test.title, func(t *testing.T) { - app := &collectResultAppender{} - discoveryLabels := &Target{ labels: labels.FromStrings(test.discoveryLabels...), } - sl := newBasicScrapeLoop(t, context.Background(), nil, func(context.Context) storage.Appender { return app }, 0) - sl.enableNativeHistogramScraping = test.enableNativeHistogramsIngestion - sl.sampleMutator = func(l labels.Labels) labels.Labels { - return mutateSampleLabels(l, discoveryLabels, false, nil) - } - sl.reportSampleMutator = func(l labels.Labels) labels.Labels { - return mutateReportSampleLabels(l, discoveryLabels) - } - sl.alwaysScrapeClassicHist = test.alwaysScrapeClassicHist + appTest := teststorage.NewAppendable() + sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) { + sl.appendable = appTest + sl.enableNativeHistogramScraping = test.enableNativeHistogramsIngestion + sl.sampleMutator = func(l labels.Labels) labels.Labels { + return mutateSampleLabels(l, discoveryLabels, false, nil) + } + sl.reportSampleMutator = func(l labels.Labels) labels.Labels { + return mutateReportSampleLabels(l, discoveryLabels) + } + sl.alwaysScrapeClassicHist = test.alwaysScrapeClassicHist + // This test does not care about metadata. Having this true would mean we need to add metadata to sample + // expectations. + sl.appendMetadataToWAL = false + }) + app := sl.appender() now := time.Now() - for i := range test.floats { - if test.floats[i].t != 0 { + for i := range test.samples { + if test.samples[i].T != 0 { continue } - test.floats[i].t = timestamp.FromTime(now) - } + test.samples[i].T = timestamp.FromTime(now) - // We need to set the timestamp for expected exemplars that does not have a timestamp. - for i := range test.exemplars { - if test.exemplars[i].Ts == 0 { - test.exemplars[i].Ts = timestamp.FromTime(now) + // We need to set the timestamp for expected exemplars that does not have a timestamp. + for j := range test.samples[i].ES { + if test.samples[i].ES[j].Ts == 0 { + test.samples[i].ES[j].Ts = timestamp.FromTime(now) + } } } @@ -3018,12 +2884,10 @@ metric: < buf.WriteString(test.scrapeText) } - _, _, _, err := sl.append(app, buf.Bytes(), test.contentType, now) + _, _, _, err := app.append(buf.Bytes(), test.contentType, now) require.NoError(t, err) require.NoError(t, app.Commit()) - requireEqual(t, test.floats, app.resultFloats) - requireEqual(t, test.histograms, app.resultHistograms) - requireEqual(t, test.exemplars, app.resultExemplars) + requireEqual(t, test.samples, appTest.ResultSamples()) }) } } @@ -3052,152 +2916,136 @@ func TestScrapeLoopAppendExemplarSeries(t *testing.T) { scrapeText := []string{`metric_total{n="1"} 1 # {t="1"} 1.0 10000 # EOF`, `metric_total{n="1"} 2 # {t="2"} 2.0 20000 # EOF`} - samples := []floatSample{{ - metric: labels.FromStrings("__name__", "metric_total", "n", "1"), - f: 1, + samples := []sample{{ + L: labels.FromStrings("__name__", "metric_total", "n", "1"), + V: 1, + ES: []exemplar.Exemplar{ + {Labels: labels.FromStrings("t", "1"), Value: 1, Ts: 10000000, HasTs: true}, + }, }, { - metric: labels.FromStrings("__name__", "metric_total", "n", "1"), - f: 2, + L: labels.FromStrings("__name__", "metric_total", "n", "1"), + V: 2, + ES: []exemplar.Exemplar{ + {Labels: labels.FromStrings("t", "2"), Value: 2, Ts: 20000000, HasTs: true}, + }, }} - exemplars := []exemplar.Exemplar{ - {Labels: labels.FromStrings("t", "1"), Value: 1, Ts: 10000000, HasTs: true}, - {Labels: labels.FromStrings("t", "2"), Value: 2, Ts: 20000000, HasTs: true}, - } discoveryLabels := &Target{ labels: labels.FromStrings(), } - app := &collectResultAppender{} - - sl := newBasicScrapeLoop(t, context.Background(), nil, func(context.Context) storage.Appender { return app }, 0) - sl.sampleMutator = func(l labels.Labels) labels.Labels { - return mutateSampleLabels(l, discoveryLabels, false, nil) - } - sl.reportSampleMutator = func(l labels.Labels) labels.Labels { - return mutateReportSampleLabels(l, discoveryLabels) - } + appTest := teststorage.NewAppendable() + sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) { + sl.appendable = appTest + sl.sampleMutator = func(l labels.Labels) labels.Labels { + return mutateSampleLabels(l, discoveryLabels, false, nil) + } + sl.reportSampleMutator = func(l labels.Labels) labels.Labels { + return mutateReportSampleLabels(l, discoveryLabels) + } + // This test does not care about metadata. Having this true would mean we need to add metadata to sample + // expectations. + sl.appendMetadataToWAL = false + }) now := time.Now() - for i := range samples { ts := now.Add(time.Second * time.Duration(i)) - samples[i].t = timestamp.FromTime(ts) - } - - // We need to set the timestamp for expected exemplars that does not have a timestamp. - for i := range exemplars { - if exemplars[i].Ts == 0 { - ts := now.Add(time.Second * time.Duration(i)) - exemplars[i].Ts = timestamp.FromTime(ts) - } + samples[i].T = timestamp.FromTime(ts) } for i, st := range scrapeText { - _, _, _, err := sl.append(app, []byte(st), "application/openmetrics-text", timestamp.Time(samples[i].t)) + app := sl.appender() + _, _, _, err := app.append([]byte(st), "application/openmetrics-text", timestamp.Time(samples[i].T)) require.NoError(t, err) require.NoError(t, app.Commit()) } - requireEqual(t, samples, app.resultFloats) - requireEqual(t, exemplars, app.resultExemplars) + requireEqual(t, samples, appTest.ResultSamples()) } func TestScrapeLoopRunReportsTargetDownOnScrapeError(t *testing.T) { - var ( - scraper = &testScraper{} - appender = &collectResultAppender{} - app = func(context.Context) storage.Appender { return appender } - ) - - ctx, cancel := context.WithCancel(context.Background()) - sl := newBasicScrapeLoop(t, ctx, scraper, app, 10*time.Millisecond) - + ctx, cancel := context.WithCancel(t.Context()) + appTest := teststorage.NewAppendable() + sl, scraper := newTestScrapeLoop(t, func(sl *scrapeLoop) { + sl.ctx = ctx + sl.appendable = appTest + }) scraper.scrapeFunc = func(context.Context, io.Writer) error { cancel() return errors.New("scrape failed") } sl.run(nil) - require.Equal(t, 0.0, appender.resultFloats[0].f, "bad 'up' value") + require.Equal(t, 0.0, appTest.ResultSamples()[0].V, "bad 'up' value") } func TestScrapeLoopRunReportsTargetDownOnInvalidUTF8(t *testing.T) { - var ( - scraper = &testScraper{} - appender = &collectResultAppender{} - app = func(context.Context) storage.Appender { return appender } - ) - - ctx, cancel := context.WithCancel(context.Background()) - sl := newBasicScrapeLoop(t, ctx, scraper, app, 10*time.Millisecond) - + ctx, cancel := context.WithCancel(t.Context()) + appTest := teststorage.NewAppendable() + sl, scraper := newTestScrapeLoop(t, func(sl *scrapeLoop) { + sl.ctx = ctx + sl.appendable = appTest + }) scraper.scrapeFunc = func(_ context.Context, w io.Writer) error { cancel() - w.Write([]byte("a{l=\"\xff\"} 1\n")) + _, _ = w.Write([]byte("a{l=\"\xff\"} 1\n")) return nil } sl.run(nil) - require.Equal(t, 0.0, appender.resultFloats[0].f, "bad 'up' value") -} - -type errorAppender struct { - collectResultAppender -} - -func (app *errorAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) { - switch lset.Get(model.MetricNameLabel) { - case "out_of_order": - return 0, storage.ErrOutOfOrderSample - case "amend": - return 0, storage.ErrDuplicateSampleForTimestamp - case "out_of_bounds": - return 0, storage.ErrOutOfBounds - default: - return app.collectResultAppender.Append(ref, lset, t, v) - } + require.Equal(t, 0.0, appTest.ResultSamples()[0].V, "bad 'up' value") } func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T) { - app := &errorAppender{} - sl := newBasicScrapeLoop(t, context.Background(), nil, func(context.Context) storage.Appender { return app }, 0) + appTest := teststorage.NewAppendable().WithErrs( + func(ls labels.Labels) error { + switch ls.Get(model.MetricNameLabel) { + case "out_of_order": + return storage.ErrOutOfOrderSample + case "amend": + return storage.ErrDuplicateSampleForTimestamp + case "out_of_bounds": + return storage.ErrOutOfBounds + default: + return nil + } + }, nil, nil) + sl, _ := newTestScrapeLoop(t, withAppendable(appTest)) now := time.Unix(1, 0) - slApp := sl.appender(context.Background()) - total, added, seriesAdded, err := sl.append(slApp, []byte("out_of_order 1\namend 1\nnormal 1\nout_of_bounds 1\n"), "text/plain", now) + app := sl.appender() + total, added, seriesAdded, err := app.append([]byte("out_of_order 1\namend 1\nnormal 1\nout_of_bounds 1\n"), "text/plain", now) require.NoError(t, err) - require.NoError(t, slApp.Commit()) + require.NoError(t, app.Commit()) - want := []floatSample{ + want := []sample{ { - metric: labels.FromStrings(model.MetricNameLabel, "normal"), - t: timestamp.FromTime(now), - f: 1, + L: labels.FromStrings(model.MetricNameLabel, "normal"), + T: timestamp.FromTime(now), + V: 1, }, } - requireEqual(t, want, app.resultFloats, "Appended samples not as expected:\n%s", appender) + requireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", appTest) require.Equal(t, 4, total) require.Equal(t, 4, added) require.Equal(t, 1, seriesAdded) } func TestScrapeLoopOutOfBoundsTimeError(t *testing.T) { - app := &collectResultAppender{} - sl := newBasicScrapeLoop(t, context.Background(), nil, - func(context.Context) storage.Appender { + sl, _ := newTestScrapeLoop(t, withAppendable( + appendableFunc(func(ctx context.Context) storage.Appender { return &timeLimitAppender{ - Appender: app, + Appender: teststorage.NewAppendable().Appender(ctx), maxTime: timestamp.FromTime(time.Now().Add(10 * time.Minute)), } - }, - 0, - ) + }), + )) now := time.Now().Add(20 * time.Minute) - slApp := sl.appender(context.Background()) - total, added, seriesAdded, err := sl.append(slApp, []byte("normal 1\n"), "text/plain", now) + app := sl.appender() + total, added, seriesAdded, err := app.append([]byte("normal 1\n"), "text/plain", now) require.NoError(t, err) - require.NoError(t, slApp.Commit()) + require.NoError(t, app.Commit()) require.Equal(t, 1, total) require.Equal(t, 1, added) require.Equal(t, 0, seriesAdded) @@ -3292,7 +3140,7 @@ func TestRequestTraceparentHeader(t *testing.T) { resp, err := ts.scrape(context.Background()) require.NoError(t, err) require.NotNil(t, resp) - defer resp.Body.Close() + t.Cleanup(func() { _ = resp.Body.Close() }) } func TestTargetScraperScrapeOK(t *testing.T) { @@ -3339,7 +3187,7 @@ func TestTargetScraperScrapeOK(t *testing.T) { } else { w.Header().Set("Content-Type", `text/plain; version=0.0.4`) } - w.Write([]byte("metric_a 1\nmetric_b 2\n")) + _, _ = w.Write([]byte("metric_a 1\nmetric_b 2\n")) }), ) defer server.Close() @@ -3454,9 +3302,9 @@ func TestTargetScrapeScrapeCancel(t *testing.T) { _, err := ts.scrape(ctx) switch { case err == nil: - errc <- errors.New("Expected error but got nil") + errc <- errors.New("expected error but got nil") case !errors.Is(ctx.Err(), context.Canceled): - errc <- fmt.Errorf("Expected context cancellation error but got: %w", ctx.Err()) + errc <- fmt.Errorf("expected context cancellation error but got: %w", ctx.Err()) default: close(errc) } @@ -3516,11 +3364,11 @@ func TestTargetScraperBodySizeLimit(t *testing.T) { if gzipResponse { w.Header().Set("Content-Encoding", "gzip") gw := gzip.NewWriter(w) - defer gw.Close() - gw.Write([]byte(responseBody)) + defer func() { _ = gw.Close() }() + _, _ = gw.Write([]byte(responseBody)) return } - w.Write([]byte(responseBody)) + _, _ = w.Write([]byte(responseBody)) }), ) defer server.Close() @@ -3614,87 +3462,84 @@ func (ts *testScraper) readResponse(ctx context.Context, _ *http.Response, w io. func TestScrapeLoop_RespectTimestamps(t *testing.T) { s := teststorage.New(t) - defer s.Close() + t.Cleanup(func() { _ = s.Close() }) - app := s.Appender(context.Background()) - capp := &collectResultAppender{next: app} - sl := newBasicScrapeLoop(t, context.Background(), nil, func(context.Context) storage.Appender { return capp }, 0) + appTest := teststorage.NewAppendable().Then(s) + sl, _ := newTestScrapeLoop(t, withAppendable(appTest)) now := time.Now() - slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, []byte(`metric_a{a="1",b="1"} 1 0`), "text/plain", now) + app := sl.appender() + _, _, _, err := app.append([]byte(`metric_a{a="1",b="1"} 1 0`), "text/plain", now) require.NoError(t, err) - require.NoError(t, slApp.Commit()) + require.NoError(t, app.Commit()) - want := []floatSample{ + want := []sample{ { - metric: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"), - t: 0, - f: 1, + L: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"), + T: 0, + V: 1, }, } - require.Equal(t, want, capp.resultFloats, "Appended samples not as expected:\n%s", appender) + require.Equal(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", appTest) } func TestScrapeLoop_DiscardTimestamps(t *testing.T) { s := teststorage.New(t) - defer s.Close() - - app := s.Appender(context.Background()) - - capp := &collectResultAppender{next: app} + t.Cleanup(func() { _ = s.Close() }) - sl := newBasicScrapeLoop(t, context.Background(), nil, func(context.Context) storage.Appender { return capp }, 0) - sl.honorTimestamps = false + appTest := teststorage.NewAppendable().Then(s) + sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) { + sl.appendable = appTest + sl.honorTimestamps = false + }) now := time.Now() - slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, []byte(`metric_a{a="1",b="1"} 1 0`), "text/plain", now) + app := sl.appender() + _, _, _, err := app.append([]byte(`metric_a{a="1",b="1"} 1 0`), "text/plain", now) require.NoError(t, err) - require.NoError(t, slApp.Commit()) + require.NoError(t, app.Commit()) - want := []floatSample{ + want := []sample{ { - metric: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"), - t: timestamp.FromTime(now), - f: 1, + L: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"), + T: timestamp.FromTime(now), + V: 1, }, } - require.Equal(t, want, capp.resultFloats, "Appended samples not as expected:\n%s", appender) + require.Equal(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", appTest) } func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) { s := teststorage.New(t) - defer s.Close() + t.Cleanup(func() { _ = s.Close() }) - ctx, cancel := context.WithCancel(context.Background()) - sl := newBasicScrapeLoop(t, ctx, &testScraper{}, s.Appender, 0) - defer cancel() + appTest := teststorage.NewAppendable().Then(s) + sl, _ := newTestScrapeLoop(t, withAppendable(appTest)) // We add a good and a bad metric to check that both are discarded. - slApp := sl.appender(ctx) - _, _, _, err := sl.append(slApp, []byte("test_metric{le=\"500\"} 1\ntest_metric{le=\"600\",le=\"700\"} 1\n"), "text/plain", time.Time{}) + app := sl.appender() + _, _, _, err := app.append([]byte("test_metric{le=\"500\"} 1\ntest_metric{le=\"600\",le=\"700\"} 1\n"), "text/plain", time.Time{}) require.Error(t, err) - require.NoError(t, slApp.Rollback()) - // We need to cycle staleness cache maps after a manual rollback. Otherwise they will have old entries in them, + require.NoError(t, app.Rollback()) + // We need to cycle staleness cache maps after a manual rollback. Otherwise, they will have old entries in them, // which would cause ErrDuplicateSampleForTimestamp errors on the next append. sl.cache.iterDone(true) q, err := s.Querier(time.Time{}.UnixNano(), 0) require.NoError(t, err) - series := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*")) + series := q.Select(sl.ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*")) require.False(t, series.Next(), "series found in tsdb") require.NoError(t, series.Err()) // We add a good metric to check that it is recorded. - slApp = sl.appender(ctx) - _, _, _, err = sl.append(slApp, []byte("test_metric{le=\"500\"} 1\n"), "text/plain", time.Time{}) + app = sl.appender() + _, _, _, err = app.append([]byte("test_metric{le=\"500\"} 1\n"), "text/plain", time.Time{}) require.NoError(t, err) - require.NoError(t, slApp.Commit()) + require.NoError(t, app.Commit()) q, err = s.Querier(time.Time{}.UnixNano(), 0) require.NoError(t, err) - series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "le", "500")) + series = q.Select(sl.ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "le", "500")) require.True(t, series.Next(), "series not found in tsdb") require.NoError(t, series.Err()) require.False(t, series.Next(), "more than one series found in tsdb") @@ -3702,29 +3547,28 @@ func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) { func TestScrapeLoopDiscardUnnamedMetrics(t *testing.T) { s := teststorage.New(t) - defer s.Close() - - app := s.Appender(context.Background()) + t.Cleanup(func() { _ = s.Close() }) - ctx, cancel := context.WithCancel(context.Background()) - sl := newBasicScrapeLoop(t, context.Background(), &testScraper{}, func(context.Context) storage.Appender { return app }, 0) - sl.sampleMutator = func(l labels.Labels) labels.Labels { - if l.Has("drop") { - return labels.FromStrings("no", "name") // This label set will trigger an error. + appTest := teststorage.NewAppendable().Then(s) + sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) { + sl.appendable = appTest + sl.sampleMutator = func(l labels.Labels) labels.Labels { + if l.Has("drop") { + return labels.FromStrings("no", "name") // This label set will trigger an error. + } + return l } - return l - } - defer cancel() + }) - slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, []byte("nok 1\nnok2{drop=\"drop\"} 1\n"), "text/plain", time.Time{}) + app := sl.appender() + _, _, _, err := app.append([]byte("nok 1\nnok2{drop=\"drop\"} 1\n"), "text/plain", time.Time{}) require.Error(t, err) - require.NoError(t, slApp.Rollback()) + require.NoError(t, app.Rollback()) require.Equal(t, errNameLabelMandatory, err) q, err := s.Querier(time.Time{}.UnixNano(), 0) require.NoError(t, err) - series := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*")) + series := q.Select(sl.ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*")) require.False(t, series.Next(), "series found in tsdb") require.NoError(t, series.Err()) } @@ -3798,7 +3642,7 @@ func TestReusableConfig(t *testing.T) { func TestReuseScrapeCache(t *testing.T) { var ( - app = &nopAppendable{} + app = teststorage.NewAppendable() cfg = &config.ScrapeConfig{ JobName: "Prometheus", ScrapeTimeout: model.Duration(5 * time.Second), @@ -3964,7 +3808,7 @@ func TestReuseScrapeCache(t *testing.T) { for i, s := range steps { initCacheAddr := cacheAddr(sp) - sp.reload(s.newConfig) + require.NoError(t, sp.reload(s.newConfig)) for fp, newCacheAddr := range cacheAddr(sp) { if s.keep { require.Equal(t, initCacheAddr[fp], newCacheAddr, "step %d: old cache and new cache are not the same", i) @@ -3973,7 +3817,7 @@ func TestReuseScrapeCache(t *testing.T) { } } initCacheAddr = cacheAddr(sp) - sp.reload(s.newConfig) + require.NoError(t, sp.reload(s.newConfig)) for fp, newCacheAddr := range cacheAddr(sp) { require.Equal(t, initCacheAddr[fp], newCacheAddr, "step %d: reloading the exact config invalidates the cache", i) } @@ -3982,16 +3826,14 @@ func TestReuseScrapeCache(t *testing.T) { func TestScrapeAddFast(t *testing.T) { s := teststorage.New(t) - defer s.Close() + t.Cleanup(func() { _ = s.Close() }) - ctx, cancel := context.WithCancel(context.Background()) - sl := newBasicScrapeLoop(t, ctx, &testScraper{}, s.Appender, 0) - defer cancel() + sl, _ := newTestScrapeLoop(t, withAppendable(s)) - slApp := sl.appender(ctx) - _, _, _, err := sl.append(slApp, []byte("up 1\n"), "text/plain", time.Time{}) + app := sl.appender() + _, _, _, err := app.append([]byte("up 1\n"), "text/plain", time.Time{}) require.NoError(t, err) - require.NoError(t, slApp.Commit()) + require.NoError(t, app.Commit()) // Poison the cache. There is just one entry, and one series in the // storage. Changing the ref will create a 'not found' error. @@ -3999,15 +3841,14 @@ func TestScrapeAddFast(t *testing.T) { v.ref++ } - slApp = sl.appender(ctx) - _, _, _, err = sl.append(slApp, []byte("up 1\n"), "text/plain", time.Time{}.Add(time.Second)) + app = sl.appender() + _, _, _, err = app.append([]byte("up 1\n"), "text/plain", time.Time{}.Add(time.Second)) require.NoError(t, err) - require.NoError(t, slApp.Commit()) + require.NoError(t, app.Commit()) } func TestReuseCacheRace(t *testing.T) { var ( - app = &nopAppendable{} cfg = &config.ScrapeConfig{ JobName: "Prometheus", ScrapeTimeout: model.Duration(5 * time.Second), @@ -4017,7 +3858,7 @@ func TestReuseCacheRace(t *testing.T) { MetricNameEscapingScheme: model.AllowUTF8, } buffers = pool.New(1e3, 100e6, 3, func(sz int) any { return make([]byte, 0, sz) }) - sp, _ = newScrapePool(cfg, app, 0, nil, buffers, &Options{}, newTestScrapeMetrics(t)) + sp, _ = newScrapePool(cfg, teststorage.NewAppendable(), 0, nil, buffers, &Options{}, newTestScrapeMetrics(t)) t1 = &Target{ labels: labels.FromStrings("labelNew", "nameNew"), scrapeConfig: &config.ScrapeConfig{}, @@ -4031,7 +3872,7 @@ func TestReuseCacheRace(t *testing.T) { if time.Since(start) > 5*time.Second { break } - sp.reload(&config.ScrapeConfig{ + require.NoError(t, sp.reload(&config.ScrapeConfig{ JobName: "Prometheus", ScrapeTimeout: model.Duration(1 * time.Millisecond), ScrapeInterval: model.Duration(1 * time.Millisecond), @@ -4039,39 +3880,42 @@ func TestReuseCacheRace(t *testing.T) { SampleLimit: i, MetricNameValidationScheme: model.UTF8Validation, MetricNameEscapingScheme: model.AllowUTF8, - }) + })) } } func TestCheckAddError(t *testing.T) { var appErrs appendErrors - sl := scrapeLoop{l: promslog.NewNopLogger(), metrics: newTestScrapeMetrics(t)} - sl.checkAddError(nil, storage.ErrOutOfOrderSample, nil, nil, &appErrs) + sl, _ := newTestScrapeLoop(t) + // TODO: Check err etc + _, _ = sl.checkAddError(nil, storage.ErrOutOfOrderSample, nil, nil, &appErrs) require.Equal(t, 1, appErrs.numOutOfOrder) + + // TODO(bwplotka): Test partial error check and other cases } func TestScrapeReportSingleAppender(t *testing.T) { t.Parallel() s := teststorage.New(t) - defer s.Close() + t.Cleanup(func() { _ = s.Close() }) - var ( - signal = make(chan struct{}, 1) - scraper = &testScraper{} - ) + signal := make(chan struct{}, 1) - ctx, cancel := context.WithCancel(context.Background()) - // Since we're writing samples directly below we need to provide a protocol fallback. - sl := newBasicScrapeLoopWithFallback(t, ctx, scraper, s.Appender, 10*time.Millisecond, "text/plain") + ctx, cancel := context.WithCancel(t.Context()) + sl, scraper := newTestScrapeLoop(t, func(sl *scrapeLoop) { + sl.ctx = ctx + sl.appendable = s + // Since we're writing samples directly below we need to provide a protocol fallback. + sl.fallbackScrapeProtocol = "text/plain" + }) numScrapes := 0 - scraper.scrapeFunc = func(_ context.Context, w io.Writer) error { numScrapes++ if numScrapes%4 == 0 { return errors.New("scrape failed") } - w.Write([]byte("metric_a 44\nmetric_b 44\nmetric_c 44\nmetric_d 44\n")) + _, _ = w.Write([]byte("metric_a 44\nmetric_b 44\nmetric_c 44\nmetric_d 44\n")) return nil } @@ -4095,7 +3939,7 @@ func TestScrapeReportSingleAppender(t *testing.T) { } require.Equal(t, 0, c%9, "Appended samples not as expected: %d", c) - q.Close() + require.NoError(t, q.Close()) } cancel() @@ -4108,7 +3952,7 @@ func TestScrapeReportSingleAppender(t *testing.T) { func TestScrapeReportLimit(t *testing.T) { s := teststorage.New(t) - defer s.Close() + t.Cleanup(func() { _ = s.Close() }) cfg := &config.ScrapeConfig{ JobName: "test", @@ -4146,7 +3990,7 @@ func TestScrapeReportLimit(t *testing.T) { ctx := t.Context() q, err := s.Querier(time.Time{}.UnixNano(), time.Now().UnixNano()) require.NoError(t, err) - defer q.Close() + t.Cleanup(func() { _ = q.Close() }) series := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "up")) var found bool @@ -4164,7 +4008,7 @@ func TestScrapeReportLimit(t *testing.T) { func TestScrapeUTF8(t *testing.T) { s := teststorage.New(t) - defer s.Close() + t.Cleanup(func() { _ = s.Close() }) cfg := &config.ScrapeConfig{ JobName: "test", @@ -4200,7 +4044,7 @@ func TestScrapeUTF8(t *testing.T) { ctx := t.Context() q, err := s.Querier(time.Time{}.UnixNano(), time.Now().UnixNano()) require.NoError(t, err) - defer q.Close() + t.Cleanup(func() { _ = q.Close() }) series := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "with.dots")) require.True(t, series.Next(), "series not found in tsdb") @@ -4272,30 +4116,29 @@ func TestScrapeLoopLabelLimit(t *testing.T) { } for _, test := range tests { - app := &collectResultAppender{} - discoveryLabels := &Target{ labels: labels.FromStrings(test.discoveryLabels...), } - sl := newBasicScrapeLoop(t, context.Background(), nil, func(context.Context) storage.Appender { return app }, 0) - sl.sampleMutator = func(l labels.Labels) labels.Labels { - return mutateSampleLabels(l, discoveryLabels, false, nil) - } - sl.reportSampleMutator = func(l labels.Labels) labels.Labels { - return mutateReportSampleLabels(l, discoveryLabels) - } - sl.labelLimits = &test.labelLimits + sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) { + sl.sampleMutator = func(l labels.Labels) labels.Labels { + return mutateSampleLabels(l, discoveryLabels, false, nil) + } + sl.reportSampleMutator = func(l labels.Labels) labels.Labels { + return mutateReportSampleLabels(l, discoveryLabels) + } + sl.labelLimits = &test.labelLimits + }) - slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, []byte(test.scrapeLabels), "text/plain", time.Now()) + app := sl.appender() + _, _, _, err := app.append([]byte(test.scrapeLabels), "text/plain", time.Now()) t.Logf("Test:%s", test.title) if test.expectErr { require.Error(t, err) } else { require.NoError(t, err) - require.NoError(t, slApp.Commit()) + require.NoError(t, app.Commit()) } } } @@ -4303,7 +4146,7 @@ func TestScrapeLoopLabelLimit(t *testing.T) { func TestTargetScrapeIntervalAndTimeoutRelabel(t *testing.T) { interval, _ := model.ParseDuration("2s") timeout, _ := model.ParseDuration("500ms") - config := &config.ScrapeConfig{ + cfg := &config.ScrapeConfig{ ScrapeInterval: interval, ScrapeTimeout: timeout, MetricNameValidationScheme: model.UTF8Validation, @@ -4327,7 +4170,7 @@ func TestTargetScrapeIntervalAndTimeoutRelabel(t *testing.T) { }, }, } - sp, _ := newScrapePool(config, &nopAppendable{}, 0, nil, nil, &Options{}, newTestScrapeMetrics(t)) + sp, _ := newScrapePool(cfg, teststorage.NewAppendable(), 0, nil, nil, &Options{}, newTestScrapeMetrics(t)) tgts := []*targetgroup.Group{ { Targets: []model.LabelSet{{model.AddressLabel: "127.0.0.1:9090"}}, @@ -4343,10 +4186,10 @@ func TestTargetScrapeIntervalAndTimeoutRelabel(t *testing.T) { // Testing whether we can remove trailing .0 from histogram 'le' and summary 'quantile' labels. func TestLeQuantileReLabel(t *testing.T) { - simpleStorage := teststorage.New(t) - defer simpleStorage.Close() + s := teststorage.New(t) + t.Cleanup(func() { _ = s.Close() }) - config := &config.ScrapeConfig{ + cfg := &config.ScrapeConfig{ JobName: "test", MetricRelabelConfigs: []*relabel.Config{ { @@ -4413,7 +4256,7 @@ test_summary_count 199 ts, scrapedTwice := newScrapableServer(metricsText) defer ts.Close() - sp, err := newScrapePool(config, simpleStorage, 0, nil, nil, &Options{}, newTestScrapeMetrics(t)) + sp, err := newScrapePool(cfg, s, 0, nil, nil, &Options{}, newTestScrapeMetrics(t)) require.NoError(t, err) defer sp.stop() @@ -4433,9 +4276,9 @@ test_summary_count 199 } ctx := t.Context() - q, err := simpleStorage.Querier(time.Time{}.UnixNano(), time.Now().UnixNano()) + q, err := s.Querier(time.Time{}.UnixNano(), time.Now().UnixNano()) require.NoError(t, err) - defer q.Close() + t.Cleanup(func() { _ = q.Close() }) checkValues := func(labelName string, expectedValues []string, series storage.SeriesSet) { foundLeValues := map[string]bool{} @@ -4463,30 +4306,22 @@ test_summary_count 199 // Testing whether we can automatically convert scraped classic histograms into native histograms with custom buckets. func TestConvertClassicHistogramsToNHCB(t *testing.T) { t.Parallel() - genTestCounterText := func(name string, value int, withMetadata bool) string { - if withMetadata { - return fmt.Sprintf(` + + genTestCounterText := func(name string) string { + return fmt.Sprintf(` # HELP %s some help text # TYPE %s counter -%s{address="0.0.0.0",port="5001"} %d -`, name, name, name, value) - } - return fmt.Sprintf(` -%s %d -`, name, value) +%s{address="0.0.0.0",port="5001"} 1 +`, name, name, name) } - genTestHistText := func(name string, withMetadata bool) string { + genTestHistText := func(name string) string { data := map[string]any{ "name": name, } b := &bytes.Buffer{} - if withMetadata { - template.Must(template.New("").Parse(` + require.NoError(t, template.Must(template.New("").Parse(` # HELP {{.name}} This is a histogram with default buckets # TYPE {{.name}} histogram -`)).Execute(b, data) - } - template.Must(template.New("").Parse(` {{.name}}_bucket{address="0.0.0.0",port="5001",le="0.005"} 0 {{.name}}_bucket{address="0.0.0.0",port="5001",le="0.01"} 0 {{.name}}_bucket{address="0.0.0.0",port="5001",le="0.025"} 0 @@ -4501,10 +4336,10 @@ func TestConvertClassicHistogramsToNHCB(t *testing.T) { {{.name}}_bucket{address="0.0.0.0",port="5001",le="+Inf"} 1 {{.name}}_sum{address="0.0.0.0",port="5001"} 10 {{.name}}_count{address="0.0.0.0",port="5001"} 1 -`)).Execute(b, data) +`)).Execute(b, data)) return b.String() } - genTestCounterProto := func(name string, value int) string { + genTestCounterProto := func(name string) string { return fmt.Sprintf(` name: "%s" help: "some help text" @@ -4522,7 +4357,7 @@ metric: < value: %d > > -`, name, value) +`, name, 1) } genTestHistProto := func(name string, hasClassic, hasExponential bool) string { var classic string @@ -4616,60 +4451,60 @@ metric: < }{ "text": { text: []string{ - genTestCounterText("test_metric_1", 1, true), - genTestCounterText("test_metric_1_count", 1, true), - genTestCounterText("test_metric_1_sum", 1, true), - genTestCounterText("test_metric_1_bucket", 1, true), - genTestHistText("test_histogram_1", true), - genTestCounterText("test_metric_2", 1, true), - genTestCounterText("test_metric_2_count", 1, true), - genTestCounterText("test_metric_2_sum", 1, true), - genTestCounterText("test_metric_2_bucket", 1, true), - genTestHistText("test_histogram_2", true), - genTestCounterText("test_metric_3", 1, true), - genTestCounterText("test_metric_3_count", 1, true), - genTestCounterText("test_metric_3_sum", 1, true), - genTestCounterText("test_metric_3_bucket", 1, true), - genTestHistText("test_histogram_3", true), + genTestCounterText("test_metric_1"), + genTestCounterText("test_metric_1_count"), + genTestCounterText("test_metric_1_sum"), + genTestCounterText("test_metric_1_bucket"), + genTestHistText("test_histogram_1"), + genTestCounterText("test_metric_2"), + genTestCounterText("test_metric_2_count"), + genTestCounterText("test_metric_2_sum"), + genTestCounterText("test_metric_2_bucket"), + genTestHistText("test_histogram_2"), + genTestCounterText("test_metric_3"), + genTestCounterText("test_metric_3_count"), + genTestCounterText("test_metric_3_sum"), + genTestCounterText("test_metric_3_bucket"), + genTestHistText("test_histogram_3"), }, hasClassic: true, }, "text, in different order": { text: []string{ - genTestCounterText("test_metric_1", 1, true), - genTestCounterText("test_metric_1_count", 1, true), - genTestCounterText("test_metric_1_sum", 1, true), - genTestCounterText("test_metric_1_bucket", 1, true), - genTestHistText("test_histogram_1", true), - genTestCounterText("test_metric_2", 1, true), - genTestCounterText("test_metric_2_count", 1, true), - genTestCounterText("test_metric_2_sum", 1, true), - genTestCounterText("test_metric_2_bucket", 1, true), - genTestHistText("test_histogram_2", true), - genTestHistText("test_histogram_3", true), - genTestCounterText("test_metric_3", 1, true), - genTestCounterText("test_metric_3_count", 1, true), - genTestCounterText("test_metric_3_sum", 1, true), - genTestCounterText("test_metric_3_bucket", 1, true), + genTestCounterText("test_metric_1"), + genTestCounterText("test_metric_1_count"), + genTestCounterText("test_metric_1_sum"), + genTestCounterText("test_metric_1_bucket"), + genTestHistText("test_histogram_1"), + genTestCounterText("test_metric_2"), + genTestCounterText("test_metric_2_count"), + genTestCounterText("test_metric_2_sum"), + genTestCounterText("test_metric_2_bucket"), + genTestHistText("test_histogram_2"), + genTestHistText("test_histogram_3"), + genTestCounterText("test_metric_3"), + genTestCounterText("test_metric_3_count"), + genTestCounterText("test_metric_3_sum"), + genTestCounterText("test_metric_3_bucket"), }, hasClassic: true, }, "protobuf": { text: []string{ - genTestCounterProto("test_metric_1", 1), - genTestCounterProto("test_metric_1_count", 1), - genTestCounterProto("test_metric_1_sum", 1), - genTestCounterProto("test_metric_1_bucket", 1), + genTestCounterProto("test_metric_1"), + genTestCounterProto("test_metric_1_count"), + genTestCounterProto("test_metric_1_sum"), + genTestCounterProto("test_metric_1_bucket"), genTestHistProto("test_histogram_1", true, false), - genTestCounterProto("test_metric_2", 1), - genTestCounterProto("test_metric_2_count", 1), - genTestCounterProto("test_metric_2_sum", 1), - genTestCounterProto("test_metric_2_bucket", 1), + genTestCounterProto("test_metric_2"), + genTestCounterProto("test_metric_2_count"), + genTestCounterProto("test_metric_2_sum"), + genTestCounterProto("test_metric_2_bucket"), genTestHistProto("test_histogram_2", true, false), - genTestCounterProto("test_metric_3", 1), - genTestCounterProto("test_metric_3_count", 1), - genTestCounterProto("test_metric_3_sum", 1), - genTestCounterProto("test_metric_3_bucket", 1), + genTestCounterProto("test_metric_3"), + genTestCounterProto("test_metric_3_count"), + genTestCounterProto("test_metric_3_sum"), + genTestCounterProto("test_metric_3_bucket"), genTestHistProto("test_histogram_3", true, false), }, contentType: "application/vnd.google.protobuf", @@ -4678,40 +4513,40 @@ metric: < "protobuf, in different order": { text: []string{ genTestHistProto("test_histogram_1", true, false), - genTestCounterProto("test_metric_1", 1), - genTestCounterProto("test_metric_1_count", 1), - genTestCounterProto("test_metric_1_sum", 1), - genTestCounterProto("test_metric_1_bucket", 1), + genTestCounterProto("test_metric_1"), + genTestCounterProto("test_metric_1_count"), + genTestCounterProto("test_metric_1_sum"), + genTestCounterProto("test_metric_1_bucket"), genTestHistProto("test_histogram_2", true, false), - genTestCounterProto("test_metric_2", 1), - genTestCounterProto("test_metric_2_count", 1), - genTestCounterProto("test_metric_2_sum", 1), - genTestCounterProto("test_metric_2_bucket", 1), + genTestCounterProto("test_metric_2"), + genTestCounterProto("test_metric_2_count"), + genTestCounterProto("test_metric_2_sum"), + genTestCounterProto("test_metric_2_bucket"), genTestHistProto("test_histogram_3", true, false), - genTestCounterProto("test_metric_3", 1), - genTestCounterProto("test_metric_3_count", 1), - genTestCounterProto("test_metric_3_sum", 1), - genTestCounterProto("test_metric_3_bucket", 1), + genTestCounterProto("test_metric_3"), + genTestCounterProto("test_metric_3_count"), + genTestCounterProto("test_metric_3_sum"), + genTestCounterProto("test_metric_3_bucket"), }, contentType: "application/vnd.google.protobuf", hasClassic: true, }, "protobuf, with additional native exponential histogram": { text: []string{ - genTestCounterProto("test_metric_1", 1), - genTestCounterProto("test_metric_1_count", 1), - genTestCounterProto("test_metric_1_sum", 1), - genTestCounterProto("test_metric_1_bucket", 1), + genTestCounterProto("test_metric_1"), + genTestCounterProto("test_metric_1_count"), + genTestCounterProto("test_metric_1_sum"), + genTestCounterProto("test_metric_1_bucket"), genTestHistProto("test_histogram_1", true, true), - genTestCounterProto("test_metric_2", 1), - genTestCounterProto("test_metric_2_count", 1), - genTestCounterProto("test_metric_2_sum", 1), - genTestCounterProto("test_metric_2_bucket", 1), + genTestCounterProto("test_metric_2"), + genTestCounterProto("test_metric_2_count"), + genTestCounterProto("test_metric_2_sum"), + genTestCounterProto("test_metric_2_bucket"), genTestHistProto("test_histogram_2", true, true), - genTestCounterProto("test_metric_3", 1), - genTestCounterProto("test_metric_3_count", 1), - genTestCounterProto("test_metric_3_sum", 1), - genTestCounterProto("test_metric_3_bucket", 1), + genTestCounterProto("test_metric_3"), + genTestCounterProto("test_metric_3_count"), + genTestCounterProto("test_metric_3_sum"), + genTestCounterProto("test_metric_3_bucket"), genTestHistProto("test_histogram_3", true, true), }, contentType: "application/vnd.google.protobuf", @@ -4720,20 +4555,20 @@ metric: < }, "protobuf, with only native exponential histogram": { text: []string{ - genTestCounterProto("test_metric_1", 1), - genTestCounterProto("test_metric_1_count", 1), - genTestCounterProto("test_metric_1_sum", 1), - genTestCounterProto("test_metric_1_bucket", 1), + genTestCounterProto("test_metric_1"), + genTestCounterProto("test_metric_1_count"), + genTestCounterProto("test_metric_1_sum"), + genTestCounterProto("test_metric_1_bucket"), genTestHistProto("test_histogram_1", false, true), - genTestCounterProto("test_metric_2", 1), - genTestCounterProto("test_metric_2_count", 1), - genTestCounterProto("test_metric_2_sum", 1), - genTestCounterProto("test_metric_2_bucket", 1), + genTestCounterProto("test_metric_2"), + genTestCounterProto("test_metric_2_count"), + genTestCounterProto("test_metric_2_sum"), + genTestCounterProto("test_metric_2_bucket"), genTestHistProto("test_histogram_2", false, true), - genTestCounterProto("test_metric_3", 1), - genTestCounterProto("test_metric_3_count", 1), - genTestCounterProto("test_metric_3_sum", 1), - genTestCounterProto("test_metric_3_bucket", 1), + genTestCounterProto("test_metric_3"), + genTestCounterProto("test_metric_3_count"), + genTestCounterProto("test_metric_3_sum"), + genTestCounterProto("test_metric_3_bucket"), genTestHistProto("test_histogram_3", false, true), }, contentType: "application/vnd.google.protobuf", @@ -4741,7 +4576,7 @@ metric: < }, } - checkBucketValues := func(expectedCount int, series storage.SeriesSet) { + checkBucketValues := func(t testing.TB, expectedCount int, series storage.SeriesSet) { labelName := "le" var expectedValues []string if expectedCount > 0 { @@ -4763,7 +4598,7 @@ metric: < } // Checks that the expected series is present and runs a basic sanity check of the float values. - checkFloatSeries := func(series storage.SeriesSet, expectedCount int, expectedFloat float64) { + checkFloatSeries := func(t testing.TB, series storage.SeriesSet, expectedCount int, expectedFloat float64) { count := 0 for series.Next() { i := series.At().Iterator(nil) @@ -4789,7 +4624,7 @@ metric: < } // Checks that the expected series is present and runs a basic sanity check of the histogram values. - checkHistSeries := func(series storage.SeriesSet, expectedCount int, expectedSchema int32) { + checkHistSeries := func(t testing.TB, series storage.SeriesSet, expectedCount int, expectedSchema int32) { count := 0 for series.Next() { i := series.At().Iterator(nil) @@ -4871,14 +4706,15 @@ metric: < t.Run(fmt.Sprintf("%s with %s", name, metricsTextName), func(t *testing.T) { t.Parallel() - simpleStorage := teststorage.New(t) - defer simpleStorage.Close() - - sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return simpleStorage.Appender(ctx) }, 0) - sl.alwaysScrapeClassicHist = tc.alwaysScrapeClassicHistograms - sl.convertClassicHistToNHCB = tc.convertClassicHistToNHCB - sl.enableNativeHistogramScraping = true - app := simpleStorage.Appender(context.Background()) + s := teststorage.New(t) + t.Cleanup(func() { _ = s.Close() }) + + sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) { + sl.appendable = s + sl.alwaysScrapeClassicHist = tc.alwaysScrapeClassicHistograms + sl.convertClassicHistToNHCB = tc.convertClassicHistToNHCB + sl.enableNativeHistogramScraping = true + }) var content []byte contentType := metricsText.contentType @@ -4902,47 +4738,50 @@ metric: < default: t.Error("unexpected content type") } - sl.append(app, content, contentType, time.Now()) + now := time.Now() + app := sl.appender() + _, _, _, err := app.append(content, contentType, now) + require.NoError(t, err) require.NoError(t, app.Commit()) + var expectedSchema int32 + if expectCustomBuckets { + expectedSchema = histogram.CustomBucketsSchema + } else { + expectedSchema = 3 + } + + // Validated what was appended can be queried. ctx := t.Context() - q, err := simpleStorage.Querier(time.Time{}.UnixNano(), time.Now().UnixNano()) + q, err := s.Querier(time.Time{}.UnixNano(), time.Now().UnixNano()) require.NoError(t, err) - defer q.Close() + t.Cleanup(func() { _ = q.Close() }) var series storage.SeriesSet - for i := 1; i <= 3; i++ { series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", fmt.Sprintf("test_metric_%d", i))) - checkFloatSeries(series, 1, 1.) + checkFloatSeries(t, series, 1, 1.) series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", fmt.Sprintf("test_metric_%d_count", i))) - checkFloatSeries(series, 1, 1.) + checkFloatSeries(t, series, 1, 1.) series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", fmt.Sprintf("test_metric_%d_sum", i))) - checkFloatSeries(series, 1, 1.) + checkFloatSeries(t, series, 1, 1.) series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", fmt.Sprintf("test_metric_%d_bucket", i))) - checkFloatSeries(series, 1, 1.) + checkFloatSeries(t, series, 1, 1.) series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", fmt.Sprintf("test_histogram_%d_count", i))) - checkFloatSeries(series, expectedClassicHistCount, 1.) + checkFloatSeries(t, series, expectedClassicHistCount, 1.) series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", fmt.Sprintf("test_histogram_%d_sum", i))) - checkFloatSeries(series, expectedClassicHistCount, 10.) + checkFloatSeries(t, series, expectedClassicHistCount, 10.) series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", fmt.Sprintf("test_histogram_%d_bucket", i))) - checkBucketValues(expectedClassicHistCount, series) + checkBucketValues(t, expectedClassicHistCount, series) series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", fmt.Sprintf("test_histogram_%d", i))) - - var expectedSchema int32 - if expectCustomBuckets { - expectedSchema = histogram.CustomBucketsSchema - } else { - expectedSchema = 3 - } - checkHistSeries(series, expectedNativeHistCount, expectedSchema) + checkHistSeries(t, series, expectedNativeHistCount, expectedSchema) } }) } @@ -4950,10 +4789,10 @@ metric: < } func TestTypeUnitReLabel(t *testing.T) { - simpleStorage := teststorage.New(t) - defer simpleStorage.Close() + s := teststorage.New(t) + t.Cleanup(func() { _ = s.Close() }) - config := &config.ScrapeConfig{ + cfg := &config.ScrapeConfig{ JobName: "test", MetricRelabelConfigs: []*relabel.Config{ { @@ -4998,7 +4837,7 @@ disk_usage_bytes 456 ts, scrapedTwice := newScrapableServer(metricsText) defer ts.Close() - sp, err := newScrapePool(config, simpleStorage, 0, nil, nil, &Options{}, newTestScrapeMetrics(t)) + sp, err := newScrapePool(cfg, s, 0, nil, nil, &Options{}, newTestScrapeMetrics(t)) require.NoError(t, err) defer sp.stop() @@ -5018,9 +4857,9 @@ disk_usage_bytes 456 } ctx := t.Context() - q, err := simpleStorage.Querier(time.Time{}.UnixNano(), time.Now().UnixNano()) + q, err := s.Querier(time.Time{}.UnixNano(), time.Now().UnixNano()) require.NoError(t, err) - defer q.Close() + t.Cleanup(func() { _ = q.Close() }) series := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*_total$")) for series.Next() { @@ -5036,26 +4875,25 @@ disk_usage_bytes 456 } func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrapeForTimestampedMetrics(t *testing.T) { - appender := &collectResultAppender{} - var ( - signal = make(chan struct{}, 1) - scraper = &testScraper{} - app = func(context.Context) storage.Appender { return appender } - ) + signal := make(chan struct{}, 1) + + ctx, cancel := context.WithCancel(t.Context()) + appTest := teststorage.NewAppendable() + sl, scraper := newTestScrapeLoop(t, func(sl *scrapeLoop) { + sl.ctx = ctx + sl.appendable = appTest // Since we're writing samples directly below we need to provide a protocol fallback. + sl.fallbackScrapeProtocol = "text/plain" + sl.trackTimestampsStaleness = true + }) - ctx, cancel := context.WithCancel(context.Background()) - // Since we're writing samples directly below we need to provide a protocol fallback. - sl := newBasicScrapeLoopWithFallback(t, ctx, scraper, app, 10*time.Millisecond, "text/plain") - sl.trackTimestampsStaleness = true // Succeed once, several failures, then stop. numScrapes := 0 - scraper.scrapeFunc = func(_ context.Context, w io.Writer) error { numScrapes++ switch numScrapes { case 1: - fmt.Fprintf(w, "metric_a 42 %d\n", time.Now().UnixNano()/int64(time.Millisecond)) + _, _ = fmt.Fprintf(w, "metric_a 42 %d\n", time.Now().UnixNano()/int64(time.Millisecond)) return nil case 5: cancel() @@ -5073,17 +4911,19 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrapeForTimestampedMetrics(t * case <-time.After(5 * time.Second): t.Fatalf("Scrape wasn't stopped.") } + + got := appTest.ResultSamples() // 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for // each scrape successful or not. - require.Len(t, appender.resultFloats, 27, "Appended samples not as expected:\n%s", appender) - require.Equal(t, 42.0, appender.resultFloats[0].f, "Appended first sample not as expected") - require.True(t, value.IsStaleNaN(appender.resultFloats[6].f), - "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.resultFloats[6].f)) + require.Len(t, got, 27, "Appended samples not as expected:\n%s", appTest) + require.Equal(t, 42.0, got[0].V, "Appended first sample not as expected") + require.True(t, value.IsStaleNaN(got[6].V), + "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(got[6].V)) } func TestScrapeLoopCompression(t *testing.T) { - simpleStorage := teststorage.New(t) - defer simpleStorage.Close() + s := teststorage.New(t) + t.Cleanup(func() { _ = s.Close() }) metricsText := makeTestGauges(10) @@ -5105,12 +4945,12 @@ func TestScrapeLoopCompression(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { require.Equal(t, tc.acceptEncoding, r.Header.Get("Accept-Encoding"), "invalid value of the Accept-Encoding header") - fmt.Fprint(w, string(metricsText)) + _, _ = fmt.Fprint(w, string(metricsText)) close(scraped) })) defer ts.Close() - config := &config.ScrapeConfig{ + cfg := &config.ScrapeConfig{ JobName: "test", SampleLimit: 100, Scheme: "http", @@ -5121,7 +4961,7 @@ func TestScrapeLoopCompression(t *testing.T) { MetricNameEscapingScheme: model.AllowUTF8, } - sp, err := newScrapePool(config, simpleStorage, 0, nil, nil, &Options{}, newTestScrapeMetrics(t)) + sp, err := newScrapePool(cfg, s, 0, nil, nil, &Options{}, newTestScrapeMetrics(t)) require.NoError(t, err) defer sp.stop() @@ -5231,11 +5071,11 @@ func BenchmarkTargetScraperGzip(b *testing.B) { gw := gzip.NewWriter(&buf) for j := 0; j < scenarios[i].metricsCount; j++ { name = fmt.Sprintf("go_memstats_alloc_bytes_total_%d", j) - fmt.Fprintf(gw, "# HELP %s Total number of bytes allocated, even if freed.\n", name) - fmt.Fprintf(gw, "# TYPE %s counter\n", name) - fmt.Fprintf(gw, "%s %d\n", name, i*j) + _, _ = fmt.Fprintf(gw, "# HELP %s Total number of bytes allocated, even if freed.\n", name) + _, _ = fmt.Fprintf(gw, "# TYPE %s counter\n", name) + _, _ = fmt.Fprintf(gw, "%s %d\n", name, i*j) } - gw.Close() + require.NoError(b, gw.Close()) scenarios[i].body = buf.Bytes() } @@ -5244,7 +5084,7 @@ func BenchmarkTargetScraperGzip(b *testing.B) { w.Header().Set("Content-Encoding", "gzip") for _, scenario := range scenarios { if strconv.Itoa(scenario.metricsCount) == r.URL.Query()["count"][0] { - w.Write(scenario.body) + _, _ = w.Write(scenario.body) return } } @@ -5293,31 +5133,31 @@ func BenchmarkTargetScraperGzip(b *testing.B) { // When a scrape contains multiple instances for the same time series we should increment // prometheus_target_scrapes_sample_duplicate_timestamp_total metric. func TestScrapeLoopSeriesAddedDuplicates(t *testing.T) { - ctx, sl := simpleTestScrapeLoop(t) + sl, _ := newTestScrapeLoop(t) - slApp := sl.appender(ctx) - total, added, seriesAdded, err := sl.append(slApp, []byte("test_metric 1\ntest_metric 2\ntest_metric 3\n"), "text/plain", time.Time{}) + app := sl.appender() + total, added, seriesAdded, err := app.append([]byte("test_metric 1\ntest_metric 2\ntest_metric 3\n"), "text/plain", time.Time{}) require.NoError(t, err) - require.NoError(t, slApp.Commit()) + require.NoError(t, app.Commit()) require.Equal(t, 3, total) require.Equal(t, 3, added) require.Equal(t, 1, seriesAdded) require.Equal(t, 2.0, prom_testutil.ToFloat64(sl.metrics.targetScrapeSampleDuplicate)) - slApp = sl.appender(ctx) - total, added, seriesAdded, err = sl.append(slApp, []byte("test_metric 1\ntest_metric 1\ntest_metric 1\n"), "text/plain", time.Time{}) + app = sl.appender() + total, added, seriesAdded, err = app.append([]byte("test_metric 1\ntest_metric 1\ntest_metric 1\n"), "text/plain", time.Time{}) require.NoError(t, err) - require.NoError(t, slApp.Commit()) + require.NoError(t, app.Commit()) require.Equal(t, 3, total) require.Equal(t, 3, added) require.Equal(t, 0, seriesAdded) require.Equal(t, 4.0, prom_testutil.ToFloat64(sl.metrics.targetScrapeSampleDuplicate)) // When different timestamps are supplied, multiple samples are accepted. - slApp = sl.appender(ctx) - total, added, seriesAdded, err = sl.append(slApp, []byte("test_metric 1 1001\ntest_metric 1 1002\ntest_metric 1 1003\n"), "text/plain", time.Time{}) + app = sl.appender() + total, added, seriesAdded, err = app.append([]byte("test_metric 1 1001\ntest_metric 1 1002\ntest_metric 1 1003\n"), "text/plain", time.Time{}) require.NoError(t, err) - require.NoError(t, slApp.Commit()) + require.NoError(t, app.Commit()) require.Equal(t, 3, total) require.Equal(t, 3, added) require.Equal(t, 0, seriesAdded) @@ -5365,7 +5205,7 @@ func testNativeHistogramMaxSchemaSet(t *testing.T, minBucketFactor string, expec }, ) registry := prometheus.NewRegistry() - registry.Register(nativeHistogram) + require.NoError(t, registry.Register(nativeHistogram)) nativeHistogram.Observe(1.0) nativeHistogram.Observe(1.0) nativeHistogram.Observe(1.0) @@ -5379,10 +5219,10 @@ func testNativeHistogramMaxSchemaSet(t *testing.T, minBucketFactor string, expec histogramMetricFamily := gathered[0] buffer := protoMarshalDelimited(t, histogramMetricFamily) - // Create a HTTP server to serve /metrics via ProtoBuf + // Create an HTTP server to serve /metrics via ProtoBuf metricsServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Type", `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`) - w.Write(buffer) + _, _ = w.Write(buffer) })) defer metricsServer.Close() @@ -5401,18 +5241,17 @@ scrape_configs: `, minBucketFactor, strings.ReplaceAll(metricsServer.URL, "http://", "")) s := teststorage.New(t) - defer s.Close() + t.Cleanup(func() { _ = s.Close() }) reg := prometheus.NewRegistry() mng, err := NewManager(&Options{DiscoveryReloadInterval: model.Duration(10 * time.Millisecond)}, nil, nil, s, reg) require.NoError(t, err) cfg, err := config.Load(configStr, promslog.NewNopLogger()) require.NoError(t, err) - mng.ApplyConfig(cfg) + require.NoError(t, mng.ApplyConfig(cfg)) tsets := make(chan map[string][]*targetgroup.Group) go func() { - err = mng.Run(tsets) - require.NoError(t, err) + require.NoError(t, mng.Run(tsets)) }() defer mng.Stop() @@ -5441,7 +5280,7 @@ scrape_configs: q, err := s.Querier(0, math.MaxInt64) require.NoError(t, err) seriesS := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "__name__", "testing_example_native_histogram")) - histogramSamples := []*histogram.Histogram{} + var histogramSamples []*histogram.Histogram for seriesS.Next() { series := seriesS.At() it := series.Iterator(nil) @@ -5487,7 +5326,7 @@ func TestTargetScrapeConfigWithLabels(t *testing.T) { require.Equal(t, expectedPath, r.URL.Path) w.Header().Set("Content-Type", `text/plain; version=0.0.4`) - w.Write([]byte("metric_a 1\nmetric_b 2\n")) + _, _ = w.Write([]byte("metric_a 1\nmetric_b 2\n")) }), ) t.Cleanup(server.Close) @@ -5507,7 +5346,7 @@ func TestTargetScrapeConfigWithLabels(t *testing.T) { } } - sp, err := newScrapePool(cfg, &nopAppendable{}, 0, nil, nil, &Options{}, newTestScrapeMetrics(t)) + sp, err := newScrapePool(cfg, teststorage.NewAppendable(), 0, nil, nil, &Options{}, newTestScrapeMetrics(t)) require.NoError(t, err) t.Cleanup(sp.stop) @@ -5635,7 +5474,7 @@ func newScrapableServer(scrapeText string) (s *httptest.Server, scrapedTwice cha scrapedTwice = make(chan bool) return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { - fmt.Fprint(w, scrapeText) + _, _ = fmt.Fprint(w, scrapeText) scrapes++ if scrapes == 2 { close(scrapedTwice) @@ -5647,7 +5486,7 @@ func newScrapableServer(scrapeText string) (s *httptest.Server, scrapedTwice cha func TestScrapePoolScrapeAfterReload(t *testing.T) { h := httptest.NewServer(http.HandlerFunc( func(w http.ResponseWriter, _ *http.Request) { - w.Write([]byte{0x42, 0x42}) + _, _ = w.Write([]byte{0x42, 0x42}) }, )) t.Cleanup(h.Close) @@ -5670,7 +5509,7 @@ func TestScrapePoolScrapeAfterReload(t *testing.T) { }, } - p, err := newScrapePool(cfg, &nopAppendable{}, 0, nil, nil, &Options{}, newTestScrapeMetrics(t)) + p, err := newScrapePool(cfg, teststorage.NewAppendable(), 0, nil, nil, &Options{}, newTestScrapeMetrics(t)) require.NoError(t, err) t.Cleanup(p.stop) @@ -5697,103 +5536,105 @@ func TestScrapeAppendWithParseError(t *testing.T) { # EOF` ) - sl := newBasicScrapeLoop(t, context.Background(), nil, nil, 0) - sl.cache = newScrapeCache(sl.metrics) - + appTest := teststorage.NewAppendable() + sl, _ := newTestScrapeLoop(t, withAppendable(appTest)) now := time.Now() - capp := &collectResultAppender{next: nopAppender{}} - _, _, _, err := sl.append(capp, []byte(scrape1), "application/openmetrics-text", now) + + app := sl.appender() + _, _, _, err := app.append([]byte(scrape1), "application/openmetrics-text", now) require.Error(t, err) - _, _, _, err = sl.append(capp, nil, "application/openmetrics-text", now) + require.NoError(t, app.Rollback()) + + app = sl.appender() + _, _, _, err = app.append(nil, "application/openmetrics-text", now) require.NoError(t, err) - require.Empty(t, capp.resultFloats) + require.NoError(t, app.Commit()) + require.Empty(t, appTest.ResultSamples()) - capp = &collectResultAppender{next: nopAppender{}} - _, _, _, err = sl.append(capp, []byte(scrape2), "application/openmetrics-text", now.Add(15*time.Second)) + app = sl.appender() + _, _, _, err = app.append([]byte(scrape2), "application/openmetrics-text", now.Add(15*time.Second)) require.NoError(t, err) - require.NoError(t, capp.Commit()) + require.NoError(t, app.Commit()) - want := []floatSample{ + want := []sample{ { - metric: labels.FromStrings(model.MetricNameLabel, "metric_a"), - t: timestamp.FromTime(now.Add(15 * time.Second)), - f: 11, + L: labels.FromStrings(model.MetricNameLabel, "metric_a"), + T: timestamp.FromTime(now.Add(15 * time.Second)), + V: 11, }, } - requireEqual(t, want, capp.resultFloats, "Appended samples not as expected:\n%s", capp) + requireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", appTest) } -// This test covers a case where there's a target with sample_limit set and the some of exporter samples +// This test covers a case where there's a target with sample_limit set and some samples // changes between scrapes. func TestScrapeLoopAppendSampleLimitWithDisappearingSeries(t *testing.T) { const sampleLimit = 4 - resApp := &collectResultAppender{} - sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender { - return resApp - }, 0) - sl.sampleLimit = sampleLimit + + appTest := teststorage.NewAppendable() + sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) { + sl.appendable = appTest + sl.sampleLimit = sampleLimit + }) now := time.Now() - slApp := sl.appender(context.Background()) - samplesScraped, samplesAfterRelabel, createdSeries, err := sl.append( - slApp, + app := sl.appender() + samplesScraped, samplesAfterRelabel, createdSeries, err := app.append( // Start with 3 samples, all accepted. []byte("metric_a 1\nmetric_b 1\nmetric_c 1\n"), "text/plain", now, ) require.NoError(t, err) - require.NoError(t, slApp.Commit()) + require.NoError(t, app.Commit()) require.Equal(t, 3, samplesScraped) // All on scrape. require.Equal(t, 3, samplesAfterRelabel) // This is series after relabeling. require.Equal(t, 3, createdSeries) // Newly added to TSDB. - want := []floatSample{ + want := []sample{ { - metric: labels.FromStrings(model.MetricNameLabel, "metric_a"), - t: timestamp.FromTime(now), - f: 1, + L: labels.FromStrings(model.MetricNameLabel, "metric_a"), + T: timestamp.FromTime(now), + V: 1, }, { - metric: labels.FromStrings(model.MetricNameLabel, "metric_b"), - t: timestamp.FromTime(now), - f: 1, + L: labels.FromStrings(model.MetricNameLabel, "metric_b"), + T: timestamp.FromTime(now), + V: 1, }, { - metric: labels.FromStrings(model.MetricNameLabel, "metric_c"), - t: timestamp.FromTime(now), - f: 1, + L: labels.FromStrings(model.MetricNameLabel, "metric_c"), + T: timestamp.FromTime(now), + V: 1, }, } - requireEqual(t, want, resApp.resultFloats, "Appended samples not as expected:\n%s", slApp) + requireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", app) now = now.Add(time.Minute) - slApp = sl.appender(context.Background()) - samplesScraped, samplesAfterRelabel, createdSeries, err = sl.append( - slApp, + app = sl.appender() + samplesScraped, samplesAfterRelabel, createdSeries, err = app.append( // Start exporting 3 more samples, so we're over the limit now. []byte("metric_a 1\nmetric_b 1\nmetric_c 1\nmetric_d 1\nmetric_e 1\nmetric_f 1\n"), "text/plain", now, ) require.ErrorIs(t, err, errSampleLimit) - require.NoError(t, slApp.Rollback()) + require.NoError(t, app.Rollback()) require.Equal(t, 6, samplesScraped) require.Equal(t, 6, samplesAfterRelabel) require.Equal(t, 1, createdSeries) // We've added one series before hitting the limit. - requireEqual(t, want, resApp.resultFloats, "Appended samples not as expected:\n%s", slApp) + requireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", app) sl.cache.iterDone(false) now = now.Add(time.Minute) - slApp = sl.appender(context.Background()) - samplesScraped, samplesAfterRelabel, createdSeries, err = sl.append( - slApp, + app = sl.appender() + samplesScraped, samplesAfterRelabel, createdSeries, err = app.append( // Remove all samples except first 2. []byte("metric_a 1\nmetric_b 1\n"), "text/plain", now, ) require.NoError(t, err) - require.NoError(t, slApp.Commit()) + require.NoError(t, app.Commit()) require.Equal(t, 2, samplesScraped) require.Equal(t, 2, samplesAfterRelabel) require.Equal(t, 0, createdSeries) @@ -5802,152 +5643,147 @@ func TestScrapeLoopAppendSampleLimitWithDisappearingSeries(t *testing.T) { // - Append with stale markers for metric_c - this series was added during first scrape but disappeared during last scrape. // - Append with stale marker for metric_d - this series was added during second scrape before we hit the sample_limit. // We should NOT see: - // - Appends with stale markers for metric_e & metric_f - both over the limit during second scrape and so they never made it into TSDB. - want = append(want, []floatSample{ + // - Appends with stale markers for metric_e & metric_f - both over the limit during second scrape, and so they never made it into TSDB. + want = append(want, []sample{ { - metric: labels.FromStrings(model.MetricNameLabel, "metric_a"), - t: timestamp.FromTime(now), - f: 1, + L: labels.FromStrings(model.MetricNameLabel, "metric_a"), + T: timestamp.FromTime(now), + V: 1, }, { - metric: labels.FromStrings(model.MetricNameLabel, "metric_b"), - t: timestamp.FromTime(now), - f: 1, + L: labels.FromStrings(model.MetricNameLabel, "metric_b"), + T: timestamp.FromTime(now), + V: 1, }, { - metric: labels.FromStrings(model.MetricNameLabel, "metric_c"), - t: timestamp.FromTime(now), - f: math.Float64frombits(value.StaleNaN), + L: labels.FromStrings(model.MetricNameLabel, "metric_c"), + T: timestamp.FromTime(now), + V: math.Float64frombits(value.StaleNaN), }, { - metric: labels.FromStrings(model.MetricNameLabel, "metric_d"), - t: timestamp.FromTime(now), - f: math.Float64frombits(value.StaleNaN), + L: labels.FromStrings(model.MetricNameLabel, "metric_d"), + T: timestamp.FromTime(now), + V: math.Float64frombits(value.StaleNaN), }, }...) - requireEqual(t, want, resApp.resultFloats, "Appended samples not as expected:\n%s", slApp) + requireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", app) } // This test covers a case where there's a target with sample_limit set and each scrape sees a completely // different set of samples. func TestScrapeLoopAppendSampleLimitReplaceAllSamples(t *testing.T) { const sampleLimit = 4 - resApp := &collectResultAppender{} - sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender { - return resApp - }, 0) - sl.sampleLimit = sampleLimit + + appTest := teststorage.NewAppendable() + sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) { + sl.appendable = appTest + sl.sampleLimit = sampleLimit + }) now := time.Now() - slApp := sl.appender(context.Background()) - samplesScraped, samplesAfterRelabel, createdSeries, err := sl.append( - slApp, + app := sl.appender() + samplesScraped, samplesAfterRelabel, createdSeries, err := app.append( // Start with 4 samples, all accepted. []byte("metric_a 1\nmetric_b 1\nmetric_c 1\nmetric_d 1\n"), "text/plain", now, ) require.NoError(t, err) - require.NoError(t, slApp.Commit()) + require.NoError(t, app.Commit()) require.Equal(t, 4, samplesScraped) // All on scrape. require.Equal(t, 4, samplesAfterRelabel) // This is series after relabeling. require.Equal(t, 4, createdSeries) // Newly added to TSDB. - want := []floatSample{ + want := []sample{ { - metric: labels.FromStrings(model.MetricNameLabel, "metric_a"), - t: timestamp.FromTime(now), - f: 1, + L: labels.FromStrings(model.MetricNameLabel, "metric_a"), + T: timestamp.FromTime(now), + V: 1, }, { - metric: labels.FromStrings(model.MetricNameLabel, "metric_b"), - t: timestamp.FromTime(now), - f: 1, + L: labels.FromStrings(model.MetricNameLabel, "metric_b"), + T: timestamp.FromTime(now), + V: 1, }, { - metric: labels.FromStrings(model.MetricNameLabel, "metric_c"), - t: timestamp.FromTime(now), - f: 1, + L: labels.FromStrings(model.MetricNameLabel, "metric_c"), + T: timestamp.FromTime(now), + V: 1, }, { - metric: labels.FromStrings(model.MetricNameLabel, "metric_d"), - t: timestamp.FromTime(now), - f: 1, + L: labels.FromStrings(model.MetricNameLabel, "metric_d"), + T: timestamp.FromTime(now), + V: 1, }, } - requireEqual(t, want, resApp.resultFloats, "Appended samples not as expected:\n%s", slApp) + requireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", app) now = now.Add(time.Minute) - slApp = sl.appender(context.Background()) - samplesScraped, samplesAfterRelabel, createdSeries, err = sl.append( - slApp, + app = sl.appender() + samplesScraped, samplesAfterRelabel, createdSeries, err = app.append( // Replace all samples with new time series. []byte("metric_e 1\nmetric_f 1\nmetric_g 1\nmetric_h 1\n"), "text/plain", now, ) require.NoError(t, err) - require.NoError(t, slApp.Commit()) + require.NoError(t, app.Commit()) require.Equal(t, 4, samplesScraped) require.Equal(t, 4, samplesAfterRelabel) require.Equal(t, 4, createdSeries) // We replaced all samples from first scrape with new set of samples. - // We expect to see: + // We expected to see: // - 4 appends for new samples. // - 4 appends with staleness markers for old samples. - want = append(want, []floatSample{ + want = append(want, []sample{ { - metric: labels.FromStrings(model.MetricNameLabel, "metric_e"), - t: timestamp.FromTime(now), - f: 1, + L: labels.FromStrings(model.MetricNameLabel, "metric_e"), + T: timestamp.FromTime(now), + V: 1, }, { - metric: labels.FromStrings(model.MetricNameLabel, "metric_f"), - t: timestamp.FromTime(now), - f: 1, + L: labels.FromStrings(model.MetricNameLabel, "metric_f"), + T: timestamp.FromTime(now), + V: 1, }, { - metric: labels.FromStrings(model.MetricNameLabel, "metric_g"), - t: timestamp.FromTime(now), - f: 1, + L: labels.FromStrings(model.MetricNameLabel, "metric_g"), + T: timestamp.FromTime(now), + V: 1, }, { - metric: labels.FromStrings(model.MetricNameLabel, "metric_h"), - t: timestamp.FromTime(now), - f: 1, + L: labels.FromStrings(model.MetricNameLabel, "metric_h"), + T: timestamp.FromTime(now), + V: 1, }, { - metric: labels.FromStrings(model.MetricNameLabel, "metric_a"), - t: timestamp.FromTime(now), - f: math.Float64frombits(value.StaleNaN), + L: labels.FromStrings(model.MetricNameLabel, "metric_a"), + T: timestamp.FromTime(now), + V: math.Float64frombits(value.StaleNaN), }, { - metric: labels.FromStrings(model.MetricNameLabel, "metric_b"), - t: timestamp.FromTime(now), - f: math.Float64frombits(value.StaleNaN), + L: labels.FromStrings(model.MetricNameLabel, "metric_b"), + T: timestamp.FromTime(now), + V: math.Float64frombits(value.StaleNaN), }, { - metric: labels.FromStrings(model.MetricNameLabel, "metric_c"), - t: timestamp.FromTime(now), - f: math.Float64frombits(value.StaleNaN), + L: labels.FromStrings(model.MetricNameLabel, "metric_c"), + T: timestamp.FromTime(now), + V: math.Float64frombits(value.StaleNaN), }, { - metric: labels.FromStrings(model.MetricNameLabel, "metric_d"), - t: timestamp.FromTime(now), - f: math.Float64frombits(value.StaleNaN), + L: labels.FromStrings(model.MetricNameLabel, "metric_d"), + T: timestamp.FromTime(now), + V: math.Float64frombits(value.StaleNaN), }, }...) - requireEqual(t, want, resApp.resultFloats, "Appended samples not as expected:\n%s", slApp) + requireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", app) } func TestScrapeLoopDisableStalenessMarkerInjection(t *testing.T) { - var ( - loopDone = atomic.NewBool(false) - appender = &collectResultAppender{} - scraper = &testScraper{} - app = func(_ context.Context) storage.Appender { return appender } - ) + loopDone := atomic.NewBool(false) - sl := newBasicScrapeLoop(t, context.Background(), scraper, app, 10*time.Millisecond) + appTest := teststorage.NewAppendable() + sl, scraper := newTestScrapeLoop(t, withAppendable(appTest)) scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { if _, err := w.Write([]byte("metric_a 42\n")); err != nil { return err @@ -5963,9 +5799,7 @@ func TestScrapeLoopDisableStalenessMarkerInjection(t *testing.T) { // Wait for some samples to be appended. require.Eventually(t, func() bool { - appender.mtx.Lock() - defer appender.mtx.Unlock() - return len(appender.resultFloats) > 2 + return len(appTest.ResultSamples()) > 2 }, 5*time.Second, 100*time.Millisecond, "Scrape loop didn't append any samples.") // Disable end of run staleness markers and stop the loop. @@ -5976,9 +5810,125 @@ func TestScrapeLoopDisableStalenessMarkerInjection(t *testing.T) { }, 5*time.Second, 100*time.Millisecond, "Scrape loop didn't stop.") // No stale markers should be appended, since they were disabled. - for _, s := range appender.resultFloats { - if value.IsStaleNaN(s.f) { - t.Fatalf("Got stale NaN samples while end of run staleness is disabled: %x", math.Float64bits(s.f)) + for _, s := range appTest.ResultSamples() { + if value.IsStaleNaN(s.V) { + t.Fatalf("Got stale NaN samples while end of run staleness is disabled: %x", math.Float64bits(s.V)) } } } + +// Recommended CLI invocation: +/* + export bench=restartLoops && go test ./scrape/... \ + -run '^$' -bench '^BenchmarkScrapePoolRestartLoops' \ + -benchtime 5s -count 6 -cpu 2 -timeout 999m \ + | tee ${bench}.txt +*/ +func BenchmarkScrapePoolRestartLoops(b *testing.B) { + sp, err := newScrapePool( + &config.ScrapeConfig{ + MetricNameValidationScheme: model.UTF8Validation, + ScrapeInterval: model.Duration(1 * time.Hour), + ScrapeTimeout: model.Duration(1 * time.Hour), + }, + nil, + 0, + nil, + nil, + &Options{}, + newTestScrapeMetrics(b), + ) + require.NoError(b, err) + b.Cleanup(sp.stop) + + for i := range 1000 { + sp.activeTargets[uint64(i)] = &Target{scrapeConfig: &config.ScrapeConfig{}} + sp.loops[uint64(i)] = noopLoop() // First restart will supplement those with proper scrapeLoops. + } + sp.restartLoops(true) + + b.ReportAllocs() + b.ResetTimer() + for b.Loop() { + sp.restartLoops(true) + } +} + +// TestNewScrapeLoopHonorLabelsWiring verifies that newScrapeLoop correctly wires +// HonorLabels (not HonorTimestamps) to the sampleMutator. +func TestNewScrapeLoopHonorLabelsWiring(t *testing.T) { + // Scraped metric has label "lbl" with value "scraped". + // Discovery target has label "lbl" with value "discovery". + // With honor_labels=true, the scraped value should win. + // With honor_labels=false, the discovery value should win and scraped moves to exported_lbl. + testCases := []struct { + name string + honorLabels bool + expectedLbl string + expectedExpLbl string // exported_lbl value, empty if not expected + }{ + { + name: "honor_labels=true", + honorLabels: true, + expectedLbl: "scraped", + }, + { + name: "honor_labels=false", + honorLabels: false, + expectedLbl: "discovery", + expectedExpLbl: "scraped", + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ts, scrapedTwice := newScrapableServer(`metric{lbl="scraped"} 1`) + defer ts.Close() + + testURL, err := url.Parse(ts.URL) + require.NoError(t, err) + + s := teststorage.New(t) + defer s.Close() + + cfg := &config.ScrapeConfig{ + JobName: "test", + Scheme: "http", + HonorLabels: tc.honorLabels, + HonorTimestamps: !tc.honorLabels, // Opposite of HonorLabels to catch wiring bugs + ScrapeInterval: model.Duration(1 * time.Second), + ScrapeTimeout: model.Duration(100 * time.Millisecond), + MetricNameValidationScheme: model.UTF8Validation, + } + + sp, err := newScrapePool(cfg, s, 0, nil, nil, &Options{skipOffsetting: true}, newTestScrapeMetrics(t)) + require.NoError(t, err) + defer sp.stop() + + // Sync with a target that has a conflicting label. + sp.Sync([]*targetgroup.Group{{ + Targets: []model.LabelSet{{ + model.AddressLabel: model.LabelValue(testURL.Host), + "lbl": "discovery", + }}, + }}) + require.Len(t, sp.ActiveTargets(), 1) + + // Wait for scrape to complete. + select { + case <-time.After(5 * time.Second): + t.Fatal("scrape did not complete in time") + case <-scrapedTwice: + } + + // Query the storage to verify label values. + q, err := s.Querier(time.Time{}.UnixNano(), time.Now().UnixNano()) + require.NoError(t, err) + defer q.Close() + + series := q.Select(t.Context(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "__name__", "metric")) + require.True(t, series.Next(), "metric series not found") + require.Equal(t, tc.expectedLbl, series.At().Labels().Get("lbl")) + require.Equal(t, tc.expectedExpLbl, series.At().Labels().Get("exported_lbl")) + }) + } +} diff --git a/scrape/target.go b/scrape/target.go index 2aabff20e2..4265f9e782 100644 --- a/scrape/target.go +++ b/scrape/target.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/scrape/target_test.go b/scrape/target_test.go index 582b198c79..06227da816 100644 --- a/scrape/target_test.go +++ b/scrape/target_test.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -14,7 +14,6 @@ package scrape import ( - "context" "crypto/tls" "crypto/x509" "fmt" @@ -36,7 +35,7 @@ import ( "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" - "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/teststorage" ) const ( @@ -611,12 +610,12 @@ func TestBucketLimitAppender(t *testing.T) { }, } - resApp := &collectResultAppender{} + appTest := teststorage.NewAppendable() for _, c := range cases { for _, floatHisto := range []bool{true, false} { t.Run(fmt.Sprintf("floatHistogram=%t", floatHisto), func(t *testing.T) { - app := &bucketLimitAppender{Appender: resApp, limit: c.limit} + app := &bucketLimitAppender{Appender: appTest.Appender(t.Context()), limit: c.limit} ts := int64(10 * time.Minute / time.Millisecond) lbls := labels.FromStrings("__name__", "sparse_histogram_series") var err error @@ -697,12 +696,12 @@ func TestMaxSchemaAppender(t *testing.T) { }, } - resApp := &collectResultAppender{} + appTest := teststorage.NewAppendable() for _, c := range cases { for _, floatHisto := range []bool{true, false} { t.Run(fmt.Sprintf("floatHistogram=%t", floatHisto), func(t *testing.T) { - app := &maxSchemaAppender{Appender: resApp, maxSchema: c.maxSchema} + app := &maxSchemaAppender{Appender: appTest.Appender(t.Context()), maxSchema: c.maxSchema} ts := int64(10 * time.Minute / time.Millisecond) lbls := labels.FromStrings("__name__", "sparse_histogram_series") var err error @@ -723,17 +722,12 @@ func TestMaxSchemaAppender(t *testing.T) { } } -// Test sample_limit when a scrape containst Native Histograms. +// Test sample_limit when a scrape contains Native Histograms. func TestAppendWithSampleLimitAndNativeHistogram(t *testing.T) { - const sampleLimit = 2 - resApp := &collectResultAppender{} - sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender { - return resApp - }, 0) - sl.sampleLimit = sampleLimit + appTest := teststorage.NewAppendable() now := time.Now() - app := appender(sl.appender(context.Background()), sl.sampleLimit, sl.bucketLimit, sl.maxSchema) + app := appenderWithLimits(appTest.Appender(t.Context()), 2, 0, histogram.ExponentialSchemaMax) // sample_limit is set to 2, so first two scrapes should work _, err := app.Append(0, labels.FromStrings(model.MetricNameLabel, "foo"), timestamp.FromTime(now), 1) diff --git a/scripts/golangci-lint.yml b/scripts/golangci-lint.yml index 75f886d546..2736e69b78 100644 --- a/scripts/golangci-lint.yml +++ b/scripts/golangci-lint.yml @@ -24,11 +24,11 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: persist-credentials: false - name: Install Go - uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0 + uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: go-version: 1.25.x - name: Install snmp_exporter/generator dependencies @@ -38,7 +38,7 @@ jobs: id: golangci-lint-version run: echo "version=$(make print-golangci-lint-version)" >> $GITHUB_OUTPUT - name: Lint - uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0 + uses: golangci/golangci-lint-action@0a35821d5c230e903fcfe077583637dea1b27b47 # v9.0.0 with: args: --verbose version: ${{ steps.golangci-lint-version.outputs.version }} diff --git a/tsdb/fileutil/mmap_windows.go b/tsdb/fileutil/mmap_windows.go index b942264123..5704b3b96d 100644 --- a/tsdb/fileutil/mmap_windows.go +++ b/tsdb/fileutil/mmap_windows.go @@ -27,14 +27,15 @@ func mmap(f *os.File, size int) ([]byte, error) { } addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(size)) - if addr == 0 { - return nil, os.NewSyscallError("MapViewOfFile", errno) - } if err := syscall.CloseHandle(syscall.Handle(h)); err != nil { return nil, os.NewSyscallError("CloseHandle", err) } + if addr == 0 { + return nil, os.NewSyscallError("MapViewOfFile", errno) + } + return (*[maxMapSize]byte)(unsafe.Pointer(addr))[:size], nil } diff --git a/util/httputil/compression.go b/util/httputil/compression.go index d5bedb7fa9..e67f9ffd9f 100644 --- a/util/httputil/compression.go +++ b/util/httputil/compression.go @@ -56,6 +56,7 @@ func (c *compressedResponseWriter) Close() { // Constructs a new compressedResponseWriter based on client request headers. func newCompressedResponseWriter(writer http.ResponseWriter, req *http.Request) *compressedResponseWriter { + writer.Header().Add("Vary", acceptEncodingHeader) raw := req.Header.Get(acceptEncodingHeader) var ( encoding string @@ -65,13 +66,17 @@ func newCompressedResponseWriter(writer http.ResponseWriter, req *http.Request) encoding, raw, commaFound = strings.Cut(raw, ",") switch strings.TrimSpace(encoding) { case gzipEncoding: - writer.Header().Set(contentEncodingHeader, gzipEncoding) + h := writer.Header() + h.Del("Content-Length") // avoid stale length after compression + h.Set(contentEncodingHeader, gzipEncoding) return &compressedResponseWriter{ ResponseWriter: writer, writer: gzip.NewWriter(writer), } case deflateEncoding: - writer.Header().Set(contentEncodingHeader, deflateEncoding) + h := writer.Header() + h.Del("Content-Length") + h.Set(contentEncodingHeader, deflateEncoding) return &compressedResponseWriter{ ResponseWriter: writer, writer: zlib.NewWriter(writer), diff --git a/util/teststorage/appender.go b/util/teststorage/appender.go new file mode 100644 index 0000000000..058a09561c --- /dev/null +++ b/util/teststorage/appender.go @@ -0,0 +1,399 @@ +// Copyright The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package teststorage + +import ( + "context" + "errors" + "fmt" + "math" + "slices" + "strings" + "sync" + + "github.com/prometheus/common/model" + "go.uber.org/atomic" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/metadata" + "github.com/prometheus/prometheus/storage" +) + +// Sample represents test, combined sample for mocking storage.AppenderV2. +type Sample struct { + MF string + L labels.Labels + M metadata.Metadata + ST, T int64 + V float64 + H *histogram.Histogram + FH *histogram.FloatHistogram + ES []exemplar.Exemplar +} + +func (s Sample) String() string { + // Attempting to format similar to ~ OpenMetrics 2.0 for readability. + b := strings.Builder{} + if s.M.Help != "" { + b.WriteString("HELP ") + b.WriteString(s.M.Help) + b.WriteString("\n") + } + if s.M.Type != model.MetricTypeUnknown && s.M.Type != "" { + b.WriteString("type@") + b.WriteString(string(s.M.Type)) + b.WriteString(" ") + } + if s.M.Unit != "" { + b.WriteString("unit@") + b.WriteString(s.M.Unit) + b.WriteString(" ") + } + // Print all value types on purpose, to catch bugs for appending multiple sample types at once. + h := "" + if s.H != nil { + h = s.H.String() + } + fh := "" + if s.FH != nil { + fh = s.FH.String() + } + b.WriteString(fmt.Sprintf("%s %v%v%v st@%v t@%v\n", s.L.String(), s.V, h, fh, s.ST, s.T)) + return b.String() +} + +func (s Sample) Equals(other Sample) bool { + return strings.Compare(s.MF, other.MF) == 0 && + labels.Equal(s.L, other.L) && + s.M.Equals(other.M) && + s.ST == other.ST && + s.T == other.T && + math.Float64bits(s.V) == math.Float64bits(other.V) && // Compare Float64bits so NaN values which are exactly the same will compare equal. + s.H.Equals(other.H) && + s.FH.Equals(other.FH) && + slices.EqualFunc(s.ES, other.ES, exemplar.Exemplar.Equals) +} + +// Appendable is a storage.Appendable mock. +// It allows recording all samples that were added through the appender and injecting errors. +// Appendable will panic if more than one Appender is open. +type Appendable struct { + appendErrFn func(ls labels.Labels) error // If non-nil, inject appender error on every Append, AppendHistogram and ST zero calls. + appendExemplarsError error // If non-nil, inject exemplar error. + commitErr error // If non-nil, inject commit error. + + mtx sync.Mutex + openAppenders atomic.Int32 // Guard against multi-appender use. + + // Recorded results. + pendingSamples []Sample + resultSamples []Sample + rolledbackSamples []Sample + + // Optional chain (Appender will collect samples, then run next). + next storage.Appendable +} + +// NewAppendable returns mock Appendable. +func NewAppendable() *Appendable { + return &Appendable{} +} + +// Then chains another appender from the provided appendable for the Appender calls. +func (a *Appendable) Then(appendable storage.Appendable) *Appendable { + a.next = appendable + return a +} + +// WithErrs allows injecting errors to the appender. +func (a *Appendable) WithErrs(appendErrFn func(ls labels.Labels) error, appendExemplarsError, commitErr error) *Appendable { + a.appendErrFn = appendErrFn + a.appendExemplarsError = appendExemplarsError + a.commitErr = commitErr + return a +} + +// PendingSamples returns pending samples (samples appended without commit). +func (a *Appendable) PendingSamples() []Sample { + a.mtx.Lock() + defer a.mtx.Unlock() + + ret := make([]Sample, len(a.pendingSamples)) + copy(ret, a.pendingSamples) + return ret +} + +// ResultSamples returns committed samples. +func (a *Appendable) ResultSamples() []Sample { + a.mtx.Lock() + defer a.mtx.Unlock() + + ret := make([]Sample, len(a.resultSamples)) + copy(ret, a.resultSamples) + return ret +} + +// RolledbackSamples returns rolled back samples. +func (a *Appendable) RolledbackSamples() []Sample { + a.mtx.Lock() + defer a.mtx.Unlock() + + ret := make([]Sample, len(a.rolledbackSamples)) + copy(ret, a.rolledbackSamples) + return ret +} + +func (a *Appendable) ResultReset() { + a.mtx.Lock() + defer a.mtx.Unlock() + + a.pendingSamples = a.pendingSamples[:0] + a.resultSamples = a.resultSamples[:0] + a.rolledbackSamples = a.rolledbackSamples[:0] +} + +// ResultMetadata returns resultSamples with samples only containing L and M. +// This is for compatibility with tests that only focus on metadata. +// +// TODO: Rewrite tests to test metadata on resultSamples instead. +func (a *Appendable) ResultMetadata() []Sample { + a.mtx.Lock() + defer a.mtx.Unlock() + + var ret []Sample + for _, s := range a.resultSamples { + if s.M.IsEmpty() { + continue + } + ret = append(ret, Sample{L: s.L, M: s.M}) + } + return ret +} + +func (a *Appendable) String() string { + var sb strings.Builder + sb.WriteString("committed:\n") + for _, s := range a.resultSamples { + sb.WriteString("\n") + sb.WriteString(s.String()) + } + sb.WriteString("pending:\n") + for _, s := range a.pendingSamples { + sb.WriteString("\n") + sb.WriteString(s.String()) + } + sb.WriteString("rolledback:\n") + for _, s := range a.rolledbackSamples { + sb.WriteString("\n") + sb.WriteString(s.String()) + } + return sb.String() +} + +var errClosedAppender = errors.New("appender was already committed/rolledback") + +type appender struct { + err error + next storage.Appender + + a *Appendable +} + +func (a *appender) checkErr() error { + a.a.mtx.Lock() + defer a.a.mtx.Unlock() + + return a.err +} + +func (a *Appendable) Appender(ctx context.Context) storage.Appender { + ret := &appender{a: a} + if a.openAppenders.Inc() > 1 { + ret.err = errors.New("teststorage.Appendable.Appender() concurrent use is not supported; attempted opening new Appender() without Commit/Rollback of the previous one. Extend the implementation if concurrent mock is needed") + } + + if a.next != nil { + ret.next = a.next.Appender(ctx) + } + return ret +} + +func (*appender) SetOptions(*storage.AppendOptions) {} + +func (a *appender) Append(ref storage.SeriesRef, ls labels.Labels, t int64, v float64) (storage.SeriesRef, error) { + if err := a.checkErr(); err != nil { + return 0, err + } + + if a.a.appendErrFn != nil { + if err := a.a.appendErrFn(ls); err != nil { + return 0, err + } + } + + a.a.mtx.Lock() + a.a.pendingSamples = append(a.a.pendingSamples, Sample{L: ls, T: t, V: v}) + a.a.mtx.Unlock() + + if a.next != nil { + return a.next.Append(ref, ls, t, v) + } + + return computeOrCheckRef(ref, ls) +} + +func computeOrCheckRef(ref storage.SeriesRef, ls labels.Labels) (storage.SeriesRef, error) { + h := ls.Hash() + if ref == 0 { + // Use labels hash as a stand-in for unique series reference, to avoid having to track all series. + return storage.SeriesRef(h), nil + } + + if storage.SeriesRef(h) != ref { + // Check for buggy ref while we at it. + return 0, errors.New("teststorage.appender: found input ref not matching labels; potential bug in Appendable user") + } + return ref, nil +} + +func (a *appender) AppendHistogram(ref storage.SeriesRef, ls labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + if err := a.checkErr(); err != nil { + return 0, err + } + if a.a.appendErrFn != nil { + if err := a.a.appendErrFn(ls); err != nil { + return 0, err + } + } + + a.a.mtx.Lock() + a.a.pendingSamples = append(a.a.pendingSamples, Sample{L: ls, T: t, H: h, FH: fh}) + a.a.mtx.Unlock() + + if a.next != nil { + return a.next.AppendHistogram(ref, ls, t, h, fh) + } + + return computeOrCheckRef(ref, ls) +} + +func (a *appender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { + if err := a.checkErr(); err != nil { + return 0, err + } + if a.a.appendExemplarsError != nil { + return 0, a.a.appendExemplarsError + } + + a.a.mtx.Lock() + // NOTE(bwplotka): Eventually exemplar has to be attached to a series and soon + // the AppenderV2 will guarantee that for TSDB. Assume this from the mock perspective + // with the naive attaching. See: https://github.com/prometheus/prometheus/issues/17632 + i := len(a.a.pendingSamples) - 1 + for ; i >= 0; i-- { // Attach exemplars to the last matching sample. + if ref == storage.SeriesRef(a.a.pendingSamples[i].L.Hash()) { + a.a.pendingSamples[i].ES = append(a.a.pendingSamples[i].ES, e) + break + } + } + a.a.mtx.Unlock() + if i < 0 { + return 0, fmt.Errorf("teststorage.appender: exemplar appender without series; ref %v; l %v; exemplar: %v", ref, l, e) + } + + if a.next != nil { + return a.next.AppendExemplar(ref, l, e) + } + return computeOrCheckRef(ref, l) +} + +func (a *appender) AppendSTZeroSample(ref storage.SeriesRef, l labels.Labels, _, st int64) (storage.SeriesRef, error) { + return a.Append(ref, l, st, 0.0) // This will change soon with AppenderV2, but we already report ST as 0 samples. +} + +func (a *appender) AppendHistogramSTZeroSample(ref storage.SeriesRef, l labels.Labels, _, st int64, h *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) { + if h != nil { + return a.AppendHistogram(ref, l, st, &histogram.Histogram{}, nil) + } + return a.AppendHistogram(ref, l, st, nil, &histogram.FloatHistogram{}) // This will change soon with AppenderV2, but we already report ST as 0 histograms. +} + +func (a *appender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) { + if err := a.checkErr(); err != nil { + return 0, err + } + + a.a.mtx.Lock() + // NOTE(bwplotka): Eventually metadata has to be attached to a series and soon + // the AppenderV2 will guarantee that for TSDB. Assume this from the mock perspective + // with the naive attaching. See: https://github.com/prometheus/prometheus/issues/17632 + i := len(a.a.pendingSamples) - 1 + for ; i >= 0; i-- { // Attach metadata to the last matching sample. + if ref == storage.SeriesRef(a.a.pendingSamples[i].L.Hash()) { + a.a.pendingSamples[i].M = m + break + } + } + a.a.mtx.Unlock() + if i < 0 { + return 0, fmt.Errorf("teststorage.appender: metadata update without series; ref %v; l %v; m: %v", ref, l, m) + } + + if a.next != nil { + return a.next.UpdateMetadata(ref, l, m) + } + return computeOrCheckRef(ref, l) +} + +func (a *appender) Commit() error { + if err := a.checkErr(); err != nil { + return err + } + defer a.a.openAppenders.Dec() + + if a.a.commitErr != nil { + return a.a.commitErr + } + + a.a.mtx.Lock() + a.a.resultSamples = append(a.a.resultSamples, a.a.pendingSamples...) + a.a.pendingSamples = a.a.pendingSamples[:0] + a.err = errClosedAppender + a.a.mtx.Unlock() + + if a.a.next != nil { + return a.next.Commit() + } + return nil +} + +func (a *appender) Rollback() error { + if err := a.checkErr(); err != nil { + return err + } + defer a.a.openAppenders.Dec() + + a.a.mtx.Lock() + a.a.rolledbackSamples = append(a.a.rolledbackSamples, a.a.pendingSamples...) + a.a.pendingSamples = a.a.pendingSamples[:0] + a.err = errClosedAppender + a.a.mtx.Unlock() + + if a.next != nil { + return a.next.Rollback() + } + return nil +} diff --git a/util/teststorage/appender_test.go b/util/teststorage/appender_test.go new file mode 100644 index 0000000000..8c2a825c3a --- /dev/null +++ b/util/teststorage/appender_test.go @@ -0,0 +1,131 @@ +// Copyright The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package teststorage + +import ( + "errors" + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/metadata" + "github.com/prometheus/prometheus/util/testutil" +) + +// TestSample_RequireEqual ensures standard testutil.RequireEqual is enough for comparisons. +// This is thanks to the fact metadata has now Equals method. +func TestSample_RequireEqual(t *testing.T) { + a := []Sample{ + {}, + {L: labels.FromStrings("__name__", "test_metric_total"), M: metadata.Metadata{Type: "counter", Unit: "metric", Help: "some help text"}}, + {L: labels.FromStrings("__name__", "test_metric2", "foo", "bar"), M: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}, V: 123.123}, + {ES: []exemplar.Exemplar{{Labels: labels.FromStrings("__name__", "yolo")}}}, + } + testutil.RequireEqual(t, a, a) + + b1 := []Sample{ + {}, + {L: labels.FromStrings("__name__", "test_metric_total"), M: metadata.Metadata{Type: "counter", Unit: "metric", Help: "some help text"}}, + {L: labels.FromStrings("__name__", "test_metric2_diff", "foo", "bar"), M: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}, V: 123.123}, // test_metric2_diff is different. + {ES: []exemplar.Exemplar{{Labels: labels.FromStrings("__name__", "yolo")}}}, + } + requireNotEqual(t, a, b1) + + b2 := []Sample{ + {}, + {L: labels.FromStrings("__name__", "test_metric_total"), M: metadata.Metadata{Type: "counter", Unit: "metric", Help: "some help text"}}, + {L: labels.FromStrings("__name__", "test_metric2", "foo", "bar"), M: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}, V: 123.123}, + {ES: []exemplar.Exemplar{{Labels: labels.FromStrings("__name__", "yolo2")}}}, // exemplar is different. + } + requireNotEqual(t, a, b2) + + b3 := []Sample{ + {}, + {L: labels.FromStrings("__name__", "test_metric_total"), M: metadata.Metadata{Type: "counter", Unit: "metric", Help: "some help text"}}, + {L: labels.FromStrings("__name__", "test_metric2", "foo", "bar"), M: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}, V: 123.123, T: 123}, // Timestamp is different. + {ES: []exemplar.Exemplar{{Labels: labels.FromStrings("__name__", "yolo")}}}, + } + requireNotEqual(t, a, b3) + + b4 := []Sample{ + {}, + {L: labels.FromStrings("__name__", "test_metric_total"), M: metadata.Metadata{Type: "counter", Unit: "metric", Help: "some help text"}}, + {L: labels.FromStrings("__name__", "test_metric2", "foo", "bar"), M: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}, V: 456.456}, // Value is different. + {ES: []exemplar.Exemplar{{Labels: labels.FromStrings("__name__", "yolo")}}}, + } + requireNotEqual(t, a, b4) + + b5 := []Sample{ + {}, + {L: labels.FromStrings("__name__", "test_metric_total"), M: metadata.Metadata{Type: "counter2", Unit: "metric", Help: "some help text"}}, // Different type. + {L: labels.FromStrings("__name__", "test_metric2", "foo", "bar"), M: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}, V: 123.123}, + {ES: []exemplar.Exemplar{{Labels: labels.FromStrings("__name__", "yolo")}}}, + } + requireNotEqual(t, a, b5) +} + +// TODO(bwplotka): While this mimick testutil.RequireEqual just making it negative, this does not literally test +// testutil.RequireEqual. Either build test suita that mocks `testing.TB` or get rid of testutil.RequireEqual somehow. +func requireNotEqual(t testing.TB, a, b any) { + t.Helper() + if !cmp.Equal(a, b, cmp.Comparer(labels.Equal)) { + return + } + require.Fail(t, fmt.Sprintf("Equal, but expected not: \n"+ + "a: %s\n"+ + "b: %s", a, b)) +} + +func TestConcurrentAppender_ReturnsErrAppender(t *testing.T) { + a := NewAppendable() + + // Non-concurrent multiple use if fine. + app := a.Appender(t.Context()) + require.Equal(t, int32(1), a.openAppenders.Load()) + require.NoError(t, app.Commit()) + // Repeated commit fails. + require.Error(t, app.Commit()) + + app = a.Appender(t.Context()) + require.NoError(t, app.Rollback()) + // Commit after rollback fails. + require.Error(t, app.Commit()) + + a.WithErrs( + nil, + nil, + errors.New("commit err"), + ) + app = a.Appender(t.Context()) + require.Error(t, app.Commit()) + + a.WithErrs(nil, nil, nil) + app = a.Appender(t.Context()) + require.NoError(t, app.Commit()) + require.Equal(t, int32(0), a.openAppenders.Load()) + + // Concurrent use should return appender that errors. + _ = a.Appender(t.Context()) + app = a.Appender(t.Context()) + _, err := app.Append(0, labels.EmptyLabels(), 0, 0) + require.Error(t, err) + _, err = app.AppendHistogram(0, labels.EmptyLabels(), 0, nil, nil) + require.Error(t, err) + require.Error(t, app.Commit()) + require.Error(t, app.Rollback()) +} diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 883ee7aaee..5df415da49 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -24,7 +24,7 @@ }, "mantine-ui": { "name": "@prometheus-io/mantine-ui", - "version": "0.308.0", + "version": "0.308.1", "dependencies": { "@codemirror/autocomplete": "^6.19.1", "@codemirror/language": "^6.11.3", @@ -42,7 +42,7 @@ "@microsoft/fetch-event-source": "^2.0.1", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", - "@prometheus-io/codemirror-promql": "0.308.0", + "@prometheus-io/codemirror-promql": "0.308.1", "@reduxjs/toolkit": "^2.10.1", "@tabler/icons-react": "^3.35.0", "@tanstack/react-query": "^5.90.7", @@ -88,10 +88,10 @@ }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", - "version": "0.308.0", + "version": "0.308.1", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.308.0", + "@prometheus-io/lezer-promql": "0.308.1", "lru-cache": "^11.2.2" }, "devDependencies": { @@ -121,7 +121,7 @@ }, "module/lezer-promql": { "name": "@prometheus-io/lezer-promql", - "version": "0.308.0", + "version": "0.308.1", "license": "Apache-2.0", "devDependencies": { "@lezer/generator": "^1.8.0", @@ -8693,10 +8693,11 @@ } }, "node_modules/ts-jest": { - "version": "29.4.5", - "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.4.5.tgz", - "integrity": "sha512-HO3GyiWn2qvTQA4kTgjDcXiMwYQt68a1Y8+JuLRVpdIzm+UOLSHgl/XqR4c6nzJkq5rOkjc02O2I7P7l/Yof0Q==", + "version": "29.4.6", + "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.4.6.tgz", + "integrity": "sha512-fSpWtOO/1AjSNQguk43hb/JCo16oJDnMJf3CdEGNkqsEX3t0KX96xvyX1D7PfLCpVoKu4MfVrqUkFyblYoY4lA==", "dev": true, + "license": "MIT", "dependencies": { "bs-logger": "^0.2.6", "fast-json-stable-stringify": "^2.1.0",