From 64431830b8daa58849ffe97a3334512bab43d215 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Thu, 5 Jun 2025 21:52:50 +0200 Subject: [PATCH] Add more avg_over_time test cases with extreme values These tests were initially created by @crush-on-anechka. I modified them slightly. Signed-off-by: beorn7 --- promql/promqltest/testdata/functions.test | 73 +++++++++++++++++++++++ 1 file changed, 73 insertions(+) diff --git a/promql/promqltest/testdata/functions.test b/promql/promqltest/testdata/functions.test index 83c49320ed..beb216b93c 100644 --- a/promql/promqltest/testdata/functions.test +++ b/promql/promqltest/testdata/functions.test @@ -1013,6 +1013,79 @@ eval instant at 1m sum_over_time(metric[2m]) eval instant at 1m avg_over_time(metric[2m]) {} 0.5 +# More tests for extreme values. +clear +# All positive values with varying magnitudes. +load 5s + metric1 1e10 1e-6 1e-6 1e-6 1e-6 1e-6 + metric2 5.30921651659898 0.961118537914768 1.62091361305318 0.865089463758091 0.323055185914577 0.951811357687154 + metric3 1.78264e50 0.76342771 1.9592258 7.69805412 458.90154 + metric4 0.76342771 1.9592258 7.69805412 1.78264e50 458.90154 + metric5 1.78264E+50 0.76342771 1.9592258 2.258E+220 7.69805412 458.90154 + +eval instant at 55s avg_over_time(metric1[1m]) + {} 1.6666666666666675e+09 + +eval instant at 55s avg_over_time(metric2[1m]) + {} 1.67186744582113 + +eval instant at 55s avg_over_time(metric3[1m]) + {} 3.56528E+49 + +eval instant at 55s avg_over_time(metric4[1m]) + {} 3.56528E+49 + +eval instant at 55s avg_over_time(metric5[1m]) + {} 3.76333333333333E+219 + +# Contains negative values; result is dominated by a very large value. +load 5s + metric6 -1.78264E+50 0.76342771 1.9592258 2.258E+220 7.69805412 -458.90154 + metric7 -1.78264E+50 0.76342771 1.9592258 -2.258E+220 7.69805412 -458.90154 + metric8 -1.78264E+215 0.76342771 1.9592258 2.258E+220 7.69805412 -458.90154 + metric9 -1.78264E+215 0.76342771 1.9592258 2.258E+220 7.69805412 1.78264E+215 -458.90154 + metric10 -1.78264E+219 0.76342771 1.9592258 2.3757689E+217 -2.3757689E+217 2.258E+220 7.69805412 1.78264E+219 -458.90154 + +eval instant at 55s avg_over_time(metric6[1m]) + {} 3.76333333333333E+219 + +eval instant at 55s avg_over_time(metric7[1m]) + {} -3.76333333333333E+219 + +eval instant at 55s avg_over_time(metric8[1m]) + {} 3.76330362266667E+219 + +eval instant at 55s avg_over_time(metric9[1m]) + {} 3.225714285714286e+219 + +# Interestingly, before PR #16569, this test failed with a result of +# 3.225714285714286e+219, so the incremental calculation combined with +# Kahan summation (in the improved way as introduced by PR #16569) +# seems to be more accurate than the simple mean calculation with +# Kahan summation. +eval instant at 55s avg_over_time(metric10[1m]) + {} 2.5088888888888888e+219 + +# Large values of different magnitude, combined with small values. The +# large values, however, all cancel each other exactly, so that the actual +# average here is determined by only the small values. Therefore, the correct +# outcome is -44.848083237000004. +load 5s + metric11 -2.258E+220 -1.78264E+219 0.76342771 1.9592258 2.3757689E+217 -2.3757689E+217 2.258E+220 7.69805412 1.78264E+219 -458.90154 + +# Thus, the result here is very far off! With the different orders of +# magnitudes involved, even Kahan summation cannot cope. +# Interestingly, with the simple mean calculation (still including +# Kahan summation) prior to PR #16569, the result is 0. That's +# arguably more accurate, but it still shows that Kahan summation +# doesn't work well in this situation. To solve this properly, we +# needed to do something like sorting the values (which is hard given +# how the PromQL engine works). The question is how practically +# relevant this scenario is. +eval instant at 55s avg_over_time(metric11[1m]) + {} -1.881783551706252e+203 +# {} -44.848083237000004 <- This is the correct value. + # Test per-series aggregation on dense samples. clear load 1ms