mirror of https://github.com/grafana/grafana
commit
41e01184aa
@ -0,0 +1,15 @@ |
||||
+++ |
||||
title = "Internal metrics" |
||||
description = "Internal metrics exposed by Grafana" |
||||
keywords = ["grafana", "metrics", "internal metrics"] |
||||
type = "docs" |
||||
[menu.docs] |
||||
parent = "admin" |
||||
weight = 8 |
||||
+++ |
||||
|
||||
# Internal metrics |
||||
|
||||
Grafana collects some metrics about it self internally. Currently Grafana supports pushing metrics to graphite and exposing them to be scraped by Prometheus. |
||||
|
||||
To enabled internal metrics you have to enable it under the [metrics] section in your [grafana.ini](http://docs.grafana.org/installation/configuration/#enabled-6) config file.If you want to push metrics to graphite you have also have to configure the [metrics.graphite](http://docs.grafana.org/installation/configuration/#metrics-graphite) section. |
||||
@ -0,0 +1,115 @@ |
||||
package api |
||||
|
||||
import ( |
||||
"net/http" |
||||
|
||||
macaron "gopkg.in/macaron.v1" |
||||
) |
||||
|
||||
type Router interface { |
||||
Handle(method, pattern string, handlers []macaron.Handler) *macaron.Route |
||||
} |
||||
|
||||
type RouteRegister interface { |
||||
Get(string, ...macaron.Handler) |
||||
Post(string, ...macaron.Handler) |
||||
Delete(string, ...macaron.Handler) |
||||
Put(string, ...macaron.Handler) |
||||
Patch(string, ...macaron.Handler) |
||||
Any(string, ...macaron.Handler) |
||||
|
||||
Group(string, func(RouteRegister), ...macaron.Handler) |
||||
|
||||
Register(Router) *macaron.Router |
||||
} |
||||
|
||||
type RegisterNamedMiddleware func(name string) macaron.Handler |
||||
|
||||
func newRouteRegister(namedMiddleware ...RegisterNamedMiddleware) RouteRegister { |
||||
return &routeRegister{ |
||||
prefix: "", |
||||
routes: []route{}, |
||||
subfixHandlers: []macaron.Handler{}, |
||||
namedMiddleware: namedMiddleware, |
||||
} |
||||
} |
||||
|
||||
type route struct { |
||||
method string |
||||
pattern string |
||||
handlers []macaron.Handler |
||||
} |
||||
|
||||
type routeRegister struct { |
||||
prefix string |
||||
subfixHandlers []macaron.Handler |
||||
namedMiddleware []RegisterNamedMiddleware |
||||
routes []route |
||||
groups []*routeRegister |
||||
} |
||||
|
||||
func (rr *routeRegister) Group(pattern string, fn func(rr RouteRegister), handlers ...macaron.Handler) { |
||||
group := &routeRegister{ |
||||
prefix: rr.prefix + pattern, |
||||
subfixHandlers: append(rr.subfixHandlers, handlers...), |
||||
routes: []route{}, |
||||
namedMiddleware: rr.namedMiddleware, |
||||
} |
||||
|
||||
fn(group) |
||||
rr.groups = append(rr.groups, group) |
||||
} |
||||
|
||||
func (rr *routeRegister) Register(router Router) *macaron.Router { |
||||
for _, r := range rr.routes { |
||||
router.Handle(r.method, r.pattern, r.handlers) |
||||
} |
||||
|
||||
for _, g := range rr.groups { |
||||
g.Register(router) |
||||
} |
||||
|
||||
return &macaron.Router{} |
||||
} |
||||
|
||||
func (rr *routeRegister) route(pattern, method string, handlers ...macaron.Handler) { |
||||
//inject tracing
|
||||
|
||||
h := make([]macaron.Handler, 0) |
||||
for _, fn := range rr.namedMiddleware { |
||||
h = append(h, fn(pattern)) |
||||
} |
||||
|
||||
h = append(h, rr.subfixHandlers...) |
||||
h = append(h, handlers...) |
||||
|
||||
rr.routes = append(rr.routes, route{ |
||||
method: method, |
||||
pattern: rr.prefix + pattern, |
||||
handlers: h, |
||||
}) |
||||
} |
||||
|
||||
func (rr *routeRegister) Get(pattern string, handlers ...macaron.Handler) { |
||||
rr.route(pattern, http.MethodGet, handlers...) |
||||
} |
||||
|
||||
func (rr *routeRegister) Post(pattern string, handlers ...macaron.Handler) { |
||||
rr.route(pattern, http.MethodPost, handlers...) |
||||
} |
||||
|
||||
func (rr *routeRegister) Delete(pattern string, handlers ...macaron.Handler) { |
||||
rr.route(pattern, http.MethodDelete, handlers...) |
||||
} |
||||
|
||||
func (rr *routeRegister) Put(pattern string, handlers ...macaron.Handler) { |
||||
rr.route(pattern, http.MethodPut, handlers...) |
||||
} |
||||
|
||||
func (rr *routeRegister) Patch(pattern string, handlers ...macaron.Handler) { |
||||
rr.route(pattern, http.MethodPatch, handlers...) |
||||
} |
||||
|
||||
func (rr *routeRegister) Any(pattern string, handlers ...macaron.Handler) { |
||||
rr.route(pattern, "*", handlers...) |
||||
} |
||||
@ -0,0 +1,185 @@ |
||||
package api |
||||
|
||||
import ( |
||||
"strconv" |
||||
"testing" |
||||
|
||||
macaron "gopkg.in/macaron.v1" |
||||
) |
||||
|
||||
type fakeRouter struct { |
||||
route []route |
||||
} |
||||
|
||||
func (fr *fakeRouter) Handle(method, pattern string, handlers []macaron.Handler) *macaron.Route { |
||||
fr.route = append(fr.route, route{ |
||||
pattern: pattern, |
||||
method: method, |
||||
handlers: handlers, |
||||
}) |
||||
|
||||
return &macaron.Route{} |
||||
} |
||||
|
||||
func emptyHandlers(n int) []macaron.Handler { |
||||
res := []macaron.Handler{} |
||||
for i := 1; n >= i; i++ { |
||||
res = append(res, emptyHandler(strconv.Itoa(i))) |
||||
} |
||||
return res |
||||
} |
||||
|
||||
func emptyHandler(name string) macaron.Handler { |
||||
return struct{ name string }{name: name} |
||||
} |
||||
|
||||
func TestRouteSimpleRegister(t *testing.T) { |
||||
testTable := []route{ |
||||
{method: "DELETE", pattern: "/admin", handlers: emptyHandlers(2)}, |
||||
{method: "GET", pattern: "/down", handlers: emptyHandlers(3)}, |
||||
} |
||||
|
||||
// Setup
|
||||
rr := newRouteRegister(func(name string) macaron.Handler { |
||||
return emptyHandler(name) |
||||
}) |
||||
|
||||
rr.Delete("/admin", emptyHandler("1")) |
||||
rr.Get("/down", emptyHandler("1"), emptyHandler("2")) |
||||
|
||||
fr := &fakeRouter{} |
||||
rr.Register(fr) |
||||
|
||||
// Validation
|
||||
if len(fr.route) != len(testTable) { |
||||
t.Errorf("want %v routes, got %v", len(testTable), len(fr.route)) |
||||
} |
||||
|
||||
for i := range testTable { |
||||
if testTable[i].method != fr.route[i].method { |
||||
t.Errorf("want %s got %v", testTable[i].method, fr.route[i].method) |
||||
} |
||||
|
||||
if testTable[i].pattern != fr.route[i].pattern { |
||||
t.Errorf("want %s got %v", testTable[i].pattern, fr.route[i].pattern) |
||||
} |
||||
|
||||
if len(testTable[i].handlers) != len(fr.route[i].handlers) { |
||||
t.Errorf("want %d handlers got %d handlers \ntestcase: %v\nroute: %v\n", |
||||
len(testTable[i].handlers), |
||||
len(fr.route[i].handlers), |
||||
testTable[i], |
||||
fr.route[i]) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func TestRouteGroupedRegister(t *testing.T) { |
||||
testTable := []route{ |
||||
{method: "DELETE", pattern: "/admin", handlers: emptyHandlers(1)}, |
||||
{method: "GET", pattern: "/down", handlers: emptyHandlers(2)}, |
||||
{method: "POST", pattern: "/user", handlers: emptyHandlers(1)}, |
||||
{method: "PUT", pattern: "/user/friends", handlers: emptyHandlers(1)}, |
||||
{method: "DELETE", pattern: "/user/admin", handlers: emptyHandlers(2)}, |
||||
{method: "GET", pattern: "/user/admin/all", handlers: emptyHandlers(4)}, |
||||
} |
||||
|
||||
// Setup
|
||||
rr := newRouteRegister() |
||||
|
||||
rr.Delete("/admin", emptyHandler("1")) |
||||
rr.Get("/down", emptyHandler("1"), emptyHandler("2")) |
||||
|
||||
rr.Group("/user", func(user RouteRegister) { |
||||
user.Post("", emptyHandler("1")) |
||||
user.Put("/friends", emptyHandler("2")) |
||||
|
||||
user.Group("/admin", func(admin RouteRegister) { |
||||
admin.Delete("", emptyHandler("3")) |
||||
admin.Get("/all", emptyHandler("3"), emptyHandler("4"), emptyHandler("5")) |
||||
|
||||
}, emptyHandler("3")) |
||||
}) |
||||
|
||||
fr := &fakeRouter{} |
||||
rr.Register(fr) |
||||
|
||||
// Validation
|
||||
if len(fr.route) != len(testTable) { |
||||
t.Errorf("want %v routes, got %v", len(testTable), len(fr.route)) |
||||
} |
||||
|
||||
for i := range testTable { |
||||
if testTable[i].method != fr.route[i].method { |
||||
t.Errorf("want %s got %v", testTable[i].method, fr.route[i].method) |
||||
} |
||||
|
||||
if testTable[i].pattern != fr.route[i].pattern { |
||||
t.Errorf("want %s got %v", testTable[i].pattern, fr.route[i].pattern) |
||||
} |
||||
|
||||
if len(testTable[i].handlers) != len(fr.route[i].handlers) { |
||||
t.Errorf("want %d handlers got %d handlers \ntestcase: %v\nroute: %v\n", |
||||
len(testTable[i].handlers), |
||||
len(fr.route[i].handlers), |
||||
testTable[i], |
||||
fr.route[i]) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func TestNamedMiddlewareRouteRegister(t *testing.T) { |
||||
testTable := []route{ |
||||
{method: "DELETE", pattern: "/admin", handlers: emptyHandlers(2)}, |
||||
{method: "GET", pattern: "/down", handlers: emptyHandlers(3)}, |
||||
{method: "POST", pattern: "/user", handlers: emptyHandlers(2)}, |
||||
{method: "PUT", pattern: "/user/friends", handlers: emptyHandlers(2)}, |
||||
{method: "DELETE", pattern: "/user/admin", handlers: emptyHandlers(3)}, |
||||
{method: "GET", pattern: "/user/admin/all", handlers: emptyHandlers(5)}, |
||||
} |
||||
|
||||
// Setup
|
||||
rr := newRouteRegister(func(name string) macaron.Handler { |
||||
return emptyHandler(name) |
||||
}) |
||||
|
||||
rr.Delete("/admin", emptyHandler("1")) |
||||
rr.Get("/down", emptyHandler("1"), emptyHandler("2")) |
||||
|
||||
rr.Group("/user", func(user RouteRegister) { |
||||
user.Post("", emptyHandler("1")) |
||||
user.Put("/friends", emptyHandler("2")) |
||||
|
||||
user.Group("/admin", func(admin RouteRegister) { |
||||
admin.Delete("", emptyHandler("3")) |
||||
admin.Get("/all", emptyHandler("3"), emptyHandler("4"), emptyHandler("5")) |
||||
|
||||
}, emptyHandler("3")) |
||||
}) |
||||
|
||||
fr := &fakeRouter{} |
||||
rr.Register(fr) |
||||
|
||||
// Validation
|
||||
if len(fr.route) != len(testTable) { |
||||
t.Errorf("want %v routes, got %v", len(testTable), len(fr.route)) |
||||
} |
||||
|
||||
for i := range testTable { |
||||
if testTable[i].method != fr.route[i].method { |
||||
t.Errorf("want %s got %v", testTable[i].method, fr.route[i].method) |
||||
} |
||||
|
||||
if testTable[i].pattern != fr.route[i].pattern { |
||||
t.Errorf("want %s got %v", testTable[i].pattern, fr.route[i].pattern) |
||||
} |
||||
|
||||
if len(testTable[i].handlers) != len(fr.route[i].handlers) { |
||||
t.Errorf("want %d handlers got %d handlers \ntestcase: %v\nroute: %v\n", |
||||
len(testTable[i].handlers), |
||||
len(fr.route[i].handlers), |
||||
testTable[i], |
||||
fr.route[i]) |
||||
} |
||||
} |
||||
} |
||||
@ -0,0 +1,88 @@ |
||||
package imguploader |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"io/ioutil" |
||||
"net/http" |
||||
"os" |
||||
|
||||
"github.com/grafana/grafana/pkg/log" |
||||
"github.com/grafana/grafana/pkg/util" |
||||
"golang.org/x/oauth2/google" |
||||
) |
||||
|
||||
const ( |
||||
tokenUrl string = "https://www.googleapis.com/auth/devstorage.read_write" |
||||
uploadUrl string = "https://www.googleapis.com/upload/storage/v1/b/%s/o?uploadType=media&name=%s&predefinedAcl=publicRead" |
||||
) |
||||
|
||||
type GCSUploader struct { |
||||
keyFile string |
||||
bucket string |
||||
log log.Logger |
||||
} |
||||
|
||||
func NewGCSUploader(keyFile, bucket string) *GCSUploader { |
||||
return &GCSUploader{ |
||||
keyFile: keyFile, |
||||
bucket: bucket, |
||||
log: log.New("gcsuploader"), |
||||
} |
||||
} |
||||
|
||||
func (u *GCSUploader) Upload(ctx context.Context, imageDiskPath string) (string, error) { |
||||
key := util.GetRandomString(20) + ".png" |
||||
|
||||
u.log.Debug("Opening key file ", u.keyFile) |
||||
data, err := ioutil.ReadFile(u.keyFile) |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
|
||||
u.log.Debug("Creating JWT conf") |
||||
conf, err := google.JWTConfigFromJSON(data, tokenUrl) |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
|
||||
u.log.Debug("Creating HTTP client") |
||||
client := conf.Client(ctx) |
||||
err = u.uploadFile(client, imageDiskPath, key) |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
|
||||
return fmt.Sprintf("https://storage.googleapis.com/%s/%s", u.bucket, key), nil |
||||
} |
||||
|
||||
func (u *GCSUploader) uploadFile(client *http.Client, imageDiskPath, key string) error { |
||||
u.log.Debug("Opening image file ", imageDiskPath) |
||||
|
||||
fileReader, err := os.Open(imageDiskPath) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
reqUrl := fmt.Sprintf(uploadUrl, u.bucket, key) |
||||
u.log.Debug("Request URL: ", reqUrl) |
||||
|
||||
req, err := http.NewRequest("POST", reqUrl, fileReader) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
req.Header.Add("Content-Type", "image/png") |
||||
u.log.Debug("Sending POST request to GCS") |
||||
|
||||
resp, err := client.Do(req) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
if resp.StatusCode != 200 { |
||||
return fmt.Errorf("GCS response status code %d", resp.StatusCode) |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
@ -0,0 +1,24 @@ |
||||
package imguploader |
||||
|
||||
import ( |
||||
"context" |
||||
"testing" |
||||
|
||||
"github.com/grafana/grafana/pkg/setting" |
||||
. "github.com/smartystreets/goconvey/convey" |
||||
) |
||||
|
||||
func TestUploadToGCS(t *testing.T) { |
||||
SkipConvey("[Integration test] for external_image_store.gcs", t, func() { |
||||
setting.NewConfigContext(&setting.CommandLineArgs{ |
||||
HomePath: "../../../", |
||||
}) |
||||
|
||||
gcsUploader, _ := NewImageUploader() |
||||
|
||||
path, err := gcsUploader.Upload(context.Background(), "../../../public/img/logo_transparent_400x.png") |
||||
|
||||
So(err, ShouldBeNil) |
||||
So(path, ShouldNotEqual, "") |
||||
}) |
||||
} |
||||
@ -1,122 +0,0 @@ |
||||
// includes code from
|
||||
// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
|
||||
// Copyright 2012 Richard Crowley. All rights reserved.
|
||||
|
||||
package metrics |
||||
|
||||
import ( |
||||
"math" |
||||
"sync" |
||||
"sync/atomic" |
||||
) |
||||
|
||||
// EWMAs continuously calculate an exponentially-weighted moving average
|
||||
// based on an outside source of clock ticks.
|
||||
type EWMA interface { |
||||
Rate() float64 |
||||
Snapshot() EWMA |
||||
Tick() |
||||
Update(int64) |
||||
} |
||||
|
||||
// NewEWMA constructs a new EWMA with the given alpha.
|
||||
func NewEWMA(alpha float64) EWMA { |
||||
if UseNilMetrics { |
||||
return NilEWMA{} |
||||
} |
||||
return &StandardEWMA{alpha: alpha} |
||||
} |
||||
|
||||
// NewEWMA1 constructs a new EWMA for a one-minute moving average.
|
||||
func NewEWMA1() EWMA { |
||||
return NewEWMA(1 - math.Exp(-5.0/60.0/1)) |
||||
} |
||||
|
||||
// NewEWMA5 constructs a new EWMA for a five-minute moving average.
|
||||
func NewEWMA5() EWMA { |
||||
return NewEWMA(1 - math.Exp(-5.0/60.0/5)) |
||||
} |
||||
|
||||
// NewEWMA15 constructs a new EWMA for a fifteen-minute moving average.
|
||||
func NewEWMA15() EWMA { |
||||
return NewEWMA(1 - math.Exp(-5.0/60.0/15)) |
||||
} |
||||
|
||||
// EWMASnapshot is a read-only copy of another EWMA.
|
||||
type EWMASnapshot float64 |
||||
|
||||
// Rate returns the rate of events per second at the time the snapshot was
|
||||
// taken.
|
||||
func (a EWMASnapshot) Rate() float64 { return float64(a) } |
||||
|
||||
// Snapshot returns the snapshot.
|
||||
func (a EWMASnapshot) Snapshot() EWMA { return a } |
||||
|
||||
// Tick panics.
|
||||
func (EWMASnapshot) Tick() { |
||||
panic("Tick called on an EWMASnapshot") |
||||
} |
||||
|
||||
// Update panics.
|
||||
func (EWMASnapshot) Update(int64) { |
||||
panic("Update called on an EWMASnapshot") |
||||
} |
||||
|
||||
// NilEWMA is a no-op EWMA.
|
||||
type NilEWMA struct{} |
||||
|
||||
// Rate is a no-op.
|
||||
func (NilEWMA) Rate() float64 { return 0.0 } |
||||
|
||||
// Snapshot is a no-op.
|
||||
func (NilEWMA) Snapshot() EWMA { return NilEWMA{} } |
||||
|
||||
// Tick is a no-op.
|
||||
func (NilEWMA) Tick() {} |
||||
|
||||
// Update is a no-op.
|
||||
func (NilEWMA) Update(n int64) {} |
||||
|
||||
// StandardEWMA is the standard implementation of an EWMA and tracks the number
|
||||
// of uncounted events and processes them on each tick. It uses the
|
||||
// sync/atomic package to manage uncounted events.
|
||||
type StandardEWMA struct { |
||||
uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment
|
||||
alpha float64 |
||||
rate float64 |
||||
init bool |
||||
mutex sync.Mutex |
||||
} |
||||
|
||||
// Rate returns the moving average rate of events per second.
|
||||
func (a *StandardEWMA) Rate() float64 { |
||||
a.mutex.Lock() |
||||
defer a.mutex.Unlock() |
||||
return a.rate * float64(1e9) |
||||
} |
||||
|
||||
// Snapshot returns a read-only copy of the EWMA.
|
||||
func (a *StandardEWMA) Snapshot() EWMA { |
||||
return EWMASnapshot(a.Rate()) |
||||
} |
||||
|
||||
// Tick ticks the clock to update the moving average. It assumes it is called
|
||||
// every five seconds.
|
||||
func (a *StandardEWMA) Tick() { |
||||
count := atomic.LoadInt64(&a.uncounted) |
||||
atomic.AddInt64(&a.uncounted, -count) |
||||
instantRate := float64(count) / float64(5e9) |
||||
a.mutex.Lock() |
||||
defer a.mutex.Unlock() |
||||
if a.init { |
||||
a.rate += a.alpha * (instantRate - a.rate) |
||||
} else { |
||||
a.init = true |
||||
a.rate = instantRate |
||||
} |
||||
} |
||||
|
||||
// Update adds n uncounted events.
|
||||
func (a *StandardEWMA) Update(n int64) { |
||||
atomic.AddInt64(&a.uncounted, n) |
||||
} |
||||
@ -1,46 +0,0 @@ |
||||
package metrics |
||||
|
||||
// type comboCounterRef struct {
|
||||
// *MetricMeta
|
||||
// usageCounter Counter
|
||||
// metricCounter Counter
|
||||
// }
|
||||
//
|
||||
// func RegComboCounter(name string, tagStrings ...string) Counter {
|
||||
// meta := NewMetricMeta(name, tagStrings)
|
||||
// cr := &comboCounterRef{
|
||||
// MetricMeta: meta,
|
||||
// usageCounter: NewCounter(meta),
|
||||
// metricCounter: NewCounter(meta),
|
||||
// }
|
||||
//
|
||||
// UsageStats.Register(cr.usageCounter)
|
||||
// MetricStats.Register(cr.metricCounter)
|
||||
//
|
||||
// return cr
|
||||
// }
|
||||
//
|
||||
// func (c comboCounterRef) Clear() {
|
||||
// c.usageCounter.Clear()
|
||||
// c.metricCounter.Clear()
|
||||
// }
|
||||
//
|
||||
// func (c comboCounterRef) Count() int64 {
|
||||
// panic("Count called on a combocounter ref")
|
||||
// }
|
||||
//
|
||||
// // Dec panics.
|
||||
// func (c comboCounterRef) Dec(i int64) {
|
||||
// c.usageCounter.Dec(i)
|
||||
// c.metricCounter.Dec(i)
|
||||
// }
|
||||
//
|
||||
// // Inc panics.
|
||||
// func (c comboCounterRef) Inc(i int64) {
|
||||
// c.usageCounter.Inc(i)
|
||||
// c.metricCounter.Inc(i)
|
||||
// }
|
||||
//
|
||||
// func (c comboCounterRef) Snapshot() Metric {
|
||||
// return c.metricCounter.Snapshot()
|
||||
// }
|
||||
@ -1,61 +0,0 @@ |
||||
package metrics |
||||
|
||||
import "github.com/grafana/grafana/pkg/log" |
||||
|
||||
type MetricMeta struct { |
||||
tags map[string]string |
||||
name string |
||||
} |
||||
|
||||
func NewMetricMeta(name string, tagStrings []string) *MetricMeta { |
||||
if len(tagStrings)%2 != 0 { |
||||
log.Fatal(3, "Metrics: tags array is missing value for key, %v", tagStrings) |
||||
} |
||||
|
||||
tags := make(map[string]string) |
||||
for i := 0; i < len(tagStrings); i += 2 { |
||||
tags[tagStrings[i]] = tagStrings[i+1] |
||||
} |
||||
|
||||
return &MetricMeta{ |
||||
tags: tags, |
||||
name: name, |
||||
} |
||||
} |
||||
|
||||
func (m *MetricMeta) Name() string { |
||||
return m.name |
||||
} |
||||
|
||||
func (m *MetricMeta) GetTagsCopy() map[string]string { |
||||
if len(m.tags) == 0 { |
||||
return make(map[string]string) |
||||
} |
||||
|
||||
copy := make(map[string]string) |
||||
for k2, v2 := range m.tags { |
||||
copy[k2] = v2 |
||||
} |
||||
|
||||
return copy |
||||
} |
||||
|
||||
func (m *MetricMeta) StringifyTags() string { |
||||
if len(m.tags) == 0 { |
||||
return "" |
||||
} |
||||
|
||||
str := "" |
||||
for key, value := range m.tags { |
||||
str += "." + key + "_" + value |
||||
} |
||||
|
||||
return str |
||||
} |
||||
|
||||
type Metric interface { |
||||
Name() string |
||||
GetTagsCopy() map[string]string |
||||
StringifyTags() string |
||||
Snapshot() Metric |
||||
} |
||||
@ -1,61 +0,0 @@ |
||||
package metrics |
||||
|
||||
import "sync/atomic" |
||||
|
||||
// Counters hold an int64 value that can be incremented and decremented.
|
||||
type Counter interface { |
||||
Metric |
||||
|
||||
Clear() |
||||
Count() int64 |
||||
Dec(int64) |
||||
Inc(int64) |
||||
} |
||||
|
||||
// NewCounter constructs a new StandardCounter.
|
||||
func NewCounter(meta *MetricMeta) Counter { |
||||
return &StandardCounter{ |
||||
MetricMeta: meta, |
||||
count: 0, |
||||
} |
||||
} |
||||
|
||||
func RegCounter(name string, tagStrings ...string) Counter { |
||||
cr := NewCounter(NewMetricMeta(name, tagStrings)) |
||||
MetricStats.Register(cr) |
||||
return cr |
||||
} |
||||
|
||||
// StandardCounter is the standard implementation of a Counter and uses the
|
||||
// sync/atomic package to manage a single int64 value.
|
||||
type StandardCounter struct { |
||||
count int64 //Due to a bug in golang the 64bit variable need to come first to be 64bit aligned. https://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||
*MetricMeta |
||||
} |
||||
|
||||
// Clear sets the counter to zero.
|
||||
func (c *StandardCounter) Clear() { |
||||
atomic.StoreInt64(&c.count, 0) |
||||
} |
||||
|
||||
// Count returns the current count.
|
||||
func (c *StandardCounter) Count() int64 { |
||||
return atomic.LoadInt64(&c.count) |
||||
} |
||||
|
||||
// Dec decrements the counter by the given amount.
|
||||
func (c *StandardCounter) Dec(i int64) { |
||||
atomic.AddInt64(&c.count, -i) |
||||
} |
||||
|
||||
// Inc increments the counter by the given amount.
|
||||
func (c *StandardCounter) Inc(i int64) { |
||||
atomic.AddInt64(&c.count, i) |
||||
} |
||||
|
||||
func (c *StandardCounter) Snapshot() Metric { |
||||
return &StandardCounter{ |
||||
MetricMeta: c.MetricMeta, |
||||
count: c.count, |
||||
} |
||||
} |
||||
@ -1,11 +0,0 @@ |
||||
package metrics |
||||
|
||||
import "math" |
||||
|
||||
func calculateDelta(oldValue, newValue int64) int64 { |
||||
if oldValue < newValue { |
||||
return newValue - oldValue |
||||
} else { |
||||
return (math.MaxInt64 - oldValue) + (newValue - math.MinInt64) + 1 |
||||
} |
||||
} |
||||
@ -1,83 +0,0 @@ |
||||
// includes code from
|
||||
// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
|
||||
// Copyright 2012 Richard Crowley. All rights reserved.
|
||||
|
||||
package metrics |
||||
|
||||
import "sync/atomic" |
||||
|
||||
// Gauges hold an int64 value that can be set arbitrarily.
|
||||
type Gauge interface { |
||||
Metric |
||||
|
||||
Update(int64) |
||||
Value() int64 |
||||
} |
||||
|
||||
func NewGauge(meta *MetricMeta) Gauge { |
||||
if UseNilMetrics { |
||||
return NilGauge{} |
||||
} |
||||
return &StandardGauge{ |
||||
MetricMeta: meta, |
||||
value: 0, |
||||
} |
||||
} |
||||
|
||||
func RegGauge(name string, tagStrings ...string) Gauge { |
||||
tr := NewGauge(NewMetricMeta(name, tagStrings)) |
||||
MetricStats.Register(tr) |
||||
return tr |
||||
} |
||||
|
||||
// GaugeSnapshot is a read-only copy of another Gauge.
|
||||
type GaugeSnapshot struct { |
||||
value int64 |
||||
*MetricMeta |
||||
} |
||||
|
||||
// Snapshot returns the snapshot.
|
||||
func (g GaugeSnapshot) Snapshot() Metric { return g } |
||||
|
||||
// Update panics.
|
||||
func (GaugeSnapshot) Update(int64) { |
||||
panic("Update called on a GaugeSnapshot") |
||||
} |
||||
|
||||
// Value returns the value at the time the snapshot was taken.
|
||||
func (g GaugeSnapshot) Value() int64 { return g.value } |
||||
|
||||
// NilGauge is a no-op Gauge.
|
||||
type NilGauge struct{ *MetricMeta } |
||||
|
||||
// Snapshot is a no-op.
|
||||
func (NilGauge) Snapshot() Metric { return NilGauge{} } |
||||
|
||||
// Update is a no-op.
|
||||
func (NilGauge) Update(v int64) {} |
||||
|
||||
// Value is a no-op.
|
||||
func (NilGauge) Value() int64 { return 0 } |
||||
|
||||
// StandardGauge is the standard implementation of a Gauge and uses the
|
||||
// sync/atomic package to manage a single int64 value.
|
||||
// atomic needs 64-bit aligned memory which is ensure for first word
|
||||
type StandardGauge struct { |
||||
value int64 |
||||
*MetricMeta |
||||
} |
||||
|
||||
// Snapshot returns a read-only copy of the gauge.
|
||||
func (g *StandardGauge) Snapshot() Metric { |
||||
return GaugeSnapshot{MetricMeta: g.MetricMeta, value: g.value} |
||||
} |
||||
|
||||
// Update updates the gauge's value.
|
||||
func (g *StandardGauge) Update(v int64) { |
||||
atomic.StoreInt64(&g.value, v) |
||||
} |
||||
|
||||
// Value returns the gauge's current value.
|
||||
func (g *StandardGauge) Value() int64 { |
||||
return atomic.LoadInt64(&g.value) |
||||
} |
||||
@ -1,107 +0,0 @@ |
||||
package metrics |
||||
|
||||
import ( |
||||
"bytes" |
||||
"fmt" |
||||
"net" |
||||
"strings" |
||||
"time" |
||||
|
||||
"github.com/grafana/grafana/pkg/log" |
||||
"github.com/grafana/grafana/pkg/setting" |
||||
) |
||||
|
||||
type GraphitePublisher struct { |
||||
address string |
||||
protocol string |
||||
prefix string |
||||
prevCounts map[string]int64 |
||||
} |
||||
|
||||
func CreateGraphitePublisher() (*GraphitePublisher, error) { |
||||
graphiteSection, err := setting.Cfg.GetSection("metrics.graphite") |
||||
if err != nil { |
||||
return nil, nil |
||||
} |
||||
|
||||
address := graphiteSection.Key("address").String() |
||||
if address == "" { |
||||
return nil, nil |
||||
} |
||||
|
||||
publisher := &GraphitePublisher{} |
||||
publisher.prevCounts = make(map[string]int64) |
||||
publisher.protocol = "tcp" |
||||
publisher.prefix = graphiteSection.Key("prefix").MustString("prod.grafana.%(instance_name)s") |
||||
publisher.address = address |
||||
|
||||
safeInstanceName := strings.Replace(setting.InstanceName, ".", "_", -1) |
||||
prefix := graphiteSection.Key("prefix").Value() |
||||
|
||||
if prefix == "" { |
||||
prefix = "prod.grafana.%(instance_name)s." |
||||
} |
||||
|
||||
publisher.prefix = strings.Replace(prefix, "%(instance_name)s", safeInstanceName, -1) |
||||
return publisher, nil |
||||
} |
||||
|
||||
func (this *GraphitePublisher) Publish(metrics []Metric) { |
||||
conn, err := net.DialTimeout(this.protocol, this.address, time.Second*5) |
||||
|
||||
if err != nil { |
||||
log.Error(3, "Metrics: GraphitePublisher: Failed to connect to %s!", err) |
||||
return |
||||
} |
||||
|
||||
buf := bytes.NewBufferString("") |
||||
now := time.Now().Unix() |
||||
|
||||
for _, m := range metrics { |
||||
metricName := this.prefix + m.Name() + m.StringifyTags() |
||||
|
||||
switch metric := m.(type) { |
||||
case Counter: |
||||
this.addCount(buf, metricName+".count", metric.Count(), now) |
||||
case Gauge: |
||||
this.addCount(buf, metricName, metric.Value(), now) |
||||
case Timer: |
||||
percentiles := metric.Percentiles([]float64{0.25, 0.75, 0.90, 0.99}) |
||||
this.addCount(buf, metricName+".count", metric.Count(), now) |
||||
this.addInt(buf, metricName+".max", metric.Max(), now) |
||||
this.addInt(buf, metricName+".min", metric.Min(), now) |
||||
this.addFloat(buf, metricName+".mean", metric.Mean(), now) |
||||
this.addFloat(buf, metricName+".std", metric.StdDev(), now) |
||||
this.addFloat(buf, metricName+".p25", percentiles[0], now) |
||||
this.addFloat(buf, metricName+".p75", percentiles[1], now) |
||||
this.addFloat(buf, metricName+".p90", percentiles[2], now) |
||||
this.addFloat(buf, metricName+".p99", percentiles[3], now) |
||||
} |
||||
} |
||||
|
||||
log.Trace("Metrics: GraphitePublisher.Publish() \n%s", buf) |
||||
_, err = conn.Write(buf.Bytes()) |
||||
|
||||
if err != nil { |
||||
log.Error(3, "Metrics: GraphitePublisher: Failed to send metrics! %s", err) |
||||
} |
||||
} |
||||
|
||||
func (this *GraphitePublisher) addInt(buf *bytes.Buffer, metric string, value int64, now int64) { |
||||
buf.WriteString(fmt.Sprintf("%s %d %d\n", metric, value, now)) |
||||
} |
||||
|
||||
func (this *GraphitePublisher) addFloat(buf *bytes.Buffer, metric string, value float64, now int64) { |
||||
buf.WriteString(fmt.Sprintf("%s %f %d\n", metric, value, now)) |
||||
} |
||||
|
||||
func (this *GraphitePublisher) addCount(buf *bytes.Buffer, metric string, value int64, now int64) { |
||||
delta := value |
||||
|
||||
if last, ok := this.prevCounts[metric]; ok { |
||||
delta = calculateDelta(last, value) |
||||
} |
||||
|
||||
this.prevCounts[metric] = value |
||||
buf.WriteString(fmt.Sprintf("%s %d %d\n", metric, delta, now)) |
||||
} |
||||
@ -1,77 +0,0 @@ |
||||
package metrics |
||||
|
||||
import ( |
||||
"testing" |
||||
|
||||
"github.com/grafana/grafana/pkg/setting" |
||||
|
||||
. "github.com/smartystreets/goconvey/convey" |
||||
) |
||||
|
||||
func TestGraphitePublisher(t *testing.T) { |
||||
|
||||
setting.CustomInitPath = "conf/does_not_exist.ini" |
||||
|
||||
Convey("Test graphite prefix replacement", t, func() { |
||||
var err error |
||||
err = setting.NewConfigContext(&setting.CommandLineArgs{ |
||||
HomePath: "../../", |
||||
}) |
||||
|
||||
So(err, ShouldBeNil) |
||||
|
||||
sec, err := setting.Cfg.NewSection("metrics.graphite") |
||||
sec.NewKey("prefix", "prod.grafana.%(instance_name)s.") |
||||
sec.NewKey("address", "localhost:2001") |
||||
|
||||
So(err, ShouldBeNil) |
||||
|
||||
setting.InstanceName = "hostname.with.dots.com" |
||||
publisher, err := CreateGraphitePublisher() |
||||
|
||||
So(err, ShouldBeNil) |
||||
So(publisher, ShouldNotBeNil) |
||||
|
||||
So(publisher.prefix, ShouldEqual, "prod.grafana.hostname_with_dots_com.") |
||||
So(publisher.address, ShouldEqual, "localhost:2001") |
||||
}) |
||||
|
||||
Convey("Test graphite publisher default prefix", t, func() { |
||||
var err error |
||||
err = setting.NewConfigContext(&setting.CommandLineArgs{ |
||||
HomePath: "../../", |
||||
}) |
||||
|
||||
So(err, ShouldBeNil) |
||||
|
||||
sec, err := setting.Cfg.NewSection("metrics.graphite") |
||||
sec.NewKey("address", "localhost:2001") |
||||
|
||||
So(err, ShouldBeNil) |
||||
|
||||
setting.InstanceName = "hostname.with.dots.com" |
||||
publisher, err := CreateGraphitePublisher() |
||||
|
||||
So(err, ShouldBeNil) |
||||
So(publisher, ShouldNotBeNil) |
||||
|
||||
So(publisher.prefix, ShouldEqual, "prod.grafana.hostname_with_dots_com.") |
||||
So(publisher.address, ShouldEqual, "localhost:2001") |
||||
}) |
||||
|
||||
Convey("Test graphite publisher default values", t, func() { |
||||
var err error |
||||
err = setting.NewConfigContext(&setting.CommandLineArgs{ |
||||
HomePath: "../../", |
||||
}) |
||||
|
||||
So(err, ShouldBeNil) |
||||
|
||||
_, err = setting.Cfg.NewSection("metrics.graphite") |
||||
|
||||
publisher, err := CreateGraphitePublisher() |
||||
|
||||
So(err, ShouldBeNil) |
||||
So(publisher, ShouldBeNil) |
||||
}) |
||||
} |
||||
@ -0,0 +1,396 @@ |
||||
// Copyright 2016 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package graphite provides a bridge to push Prometheus metrics to a Graphite
|
||||
// server.
|
||||
package graphitebridge |
||||
|
||||
import ( |
||||
"bufio" |
||||
"errors" |
||||
"fmt" |
||||
"io" |
||||
"net" |
||||
"sort" |
||||
"strings" |
||||
"time" |
||||
|
||||
"github.com/prometheus/common/expfmt" |
||||
"github.com/prometheus/common/model" |
||||
"golang.org/x/net/context" |
||||
|
||||
dto "github.com/prometheus/client_model/go" |
||||
|
||||
"github.com/prometheus/client_golang/prometheus" |
||||
) |
||||
|
||||
const ( |
||||
defaultInterval = 15 * time.Second |
||||
millisecondsPerSecond = 1000 |
||||
) |
||||
|
||||
// HandlerErrorHandling defines how a Handler serving metrics will handle
|
||||
// errors.
|
||||
type HandlerErrorHandling int |
||||
|
||||
// These constants cause handlers serving metrics to behave as described if
|
||||
// errors are encountered.
|
||||
const ( |
||||
// Ignore errors and try to push as many metrics to Graphite as possible.
|
||||
ContinueOnError HandlerErrorHandling = iota |
||||
|
||||
// Abort the push to Graphite upon the first error encountered.
|
||||
AbortOnError |
||||
) |
||||
|
||||
var metricCategoryPrefix []string = []string{"proxy_", "api_", "page_", "alerting_", "aws_", "db_", "stat_", "go_", "process_"} |
||||
var trimMetricPrefix []string = []string{"grafana_"} |
||||
|
||||
// Config defines the Graphite bridge config.
|
||||
type Config struct { |
||||
// The url to push data to. Required.
|
||||
URL string |
||||
|
||||
// The prefix for the pushed Graphite metrics. Defaults to empty string.
|
||||
Prefix string |
||||
|
||||
// The interval to use for pushing data to Graphite. Defaults to 15 seconds.
|
||||
Interval time.Duration |
||||
|
||||
// The timeout for pushing metrics to Graphite. Defaults to 15 seconds.
|
||||
Timeout time.Duration |
||||
|
||||
// The Gatherer to use for metrics. Defaults to prometheus.DefaultGatherer.
|
||||
Gatherer prometheus.Gatherer |
||||
|
||||
// The logger that messages are written to. Defaults to no logging.
|
||||
Logger Logger |
||||
|
||||
// ErrorHandling defines how errors are handled. Note that errors are
|
||||
// logged regardless of the configured ErrorHandling provided Logger
|
||||
// is not nil.
|
||||
ErrorHandling HandlerErrorHandling |
||||
|
||||
// Graphite does not support ever increasing counter the same way
|
||||
// prometheus does. Rollups and ingestion might cannot handle ever
|
||||
// increasing counters. This option allows enabled the caller to
|
||||
// calculate the delta by saving the last sent counter in memory
|
||||
// and subtraction it from the collected value before sending.
|
||||
CountersAsDelta bool |
||||
} |
||||
|
||||
// Bridge pushes metrics to the configured Graphite server.
|
||||
type Bridge struct { |
||||
url string |
||||
prefix string |
||||
countersAsDetlas bool |
||||
interval time.Duration |
||||
timeout time.Duration |
||||
|
||||
errorHandling HandlerErrorHandling |
||||
logger Logger |
||||
|
||||
g prometheus.Gatherer |
||||
|
||||
lastValue map[model.Fingerprint]float64 |
||||
} |
||||
|
||||
// Logger is the minimal interface Bridge needs for logging. Note that
|
||||
// log.Logger from the standard library implements this interface, and it is
|
||||
// easy to implement by custom loggers, if they don't do so already anyway.
|
||||
type Logger interface { |
||||
Println(v ...interface{}) |
||||
} |
||||
|
||||
// NewBridge returns a pointer to a new Bridge struct.
|
||||
func NewBridge(c *Config) (*Bridge, error) { |
||||
b := &Bridge{} |
||||
|
||||
if c.URL == "" { |
||||
return nil, errors.New("missing URL") |
||||
} |
||||
b.url = c.URL |
||||
|
||||
if c.Gatherer == nil { |
||||
b.g = prometheus.DefaultGatherer |
||||
} else { |
||||
b.g = c.Gatherer |
||||
} |
||||
|
||||
if c.Logger != nil { |
||||
b.logger = c.Logger |
||||
} |
||||
|
||||
if c.Prefix != "" { |
||||
b.prefix = c.Prefix |
||||
} |
||||
|
||||
var z time.Duration |
||||
if c.Interval == z { |
||||
b.interval = defaultInterval |
||||
} else { |
||||
b.interval = c.Interval |
||||
} |
||||
|
||||
if c.Timeout == z { |
||||
b.timeout = defaultInterval |
||||
} else { |
||||
b.timeout = c.Timeout |
||||
} |
||||
|
||||
b.errorHandling = c.ErrorHandling |
||||
b.lastValue = map[model.Fingerprint]float64{} |
||||
b.countersAsDetlas = c.CountersAsDelta |
||||
|
||||
return b, nil |
||||
} |
||||
|
||||
// Run starts the event loop that pushes Prometheus metrics to Graphite at the
|
||||
// configured interval.
|
||||
func (b *Bridge) Run(ctx context.Context) { |
||||
ticker := time.NewTicker(b.interval) |
||||
defer ticker.Stop() |
||||
for { |
||||
select { |
||||
case <-ticker.C: |
||||
if err := b.Push(); err != nil && b.logger != nil { |
||||
b.logger.Println("error pushing to Graphite:", err) |
||||
} |
||||
case <-ctx.Done(): |
||||
return |
||||
} |
||||
} |
||||
} |
||||
|
||||
// Push pushes Prometheus metrics to the configured Graphite server.
|
||||
func (b *Bridge) Push() error { |
||||
mfs, err := b.g.Gather() |
||||
if err != nil || len(mfs) == 0 { |
||||
switch b.errorHandling { |
||||
case AbortOnError: |
||||
return err |
||||
case ContinueOnError: |
||||
if b.logger != nil { |
||||
b.logger.Println("continue on error:", err) |
||||
} |
||||
default: |
||||
panic("unrecognized error handling value") |
||||
} |
||||
} |
||||
|
||||
conn, err := net.DialTimeout("tcp", b.url, b.timeout) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer conn.Close() |
||||
|
||||
return b.writeMetrics(conn, mfs, b.prefix, model.Now()) |
||||
} |
||||
|
||||
func (b *Bridge) writeMetrics(w io.Writer, mfs []*dto.MetricFamily, prefix string, now model.Time) error { |
||||
for _, mf := range mfs { |
||||
vec, err := expfmt.ExtractSamples(&expfmt.DecodeOptions{ |
||||
Timestamp: now, |
||||
}, mf) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
buf := bufio.NewWriter(w) |
||||
for _, s := range vec { |
||||
if err := writePrefix(buf, prefix); err != nil { |
||||
return err |
||||
} |
||||
|
||||
if err := writeMetric(buf, s.Metric, mf); err != nil { |
||||
return err |
||||
} |
||||
|
||||
value := b.replaceCounterWithDelta(mf, s.Metric, s.Value) |
||||
if _, err := fmt.Fprintf(buf, " %g %d\n", value, int64(s.Timestamp)/millisecondsPerSecond); err != nil { |
||||
return err |
||||
} |
||||
if err := buf.Flush(); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func writeMetric(buf *bufio.Writer, m model.Metric, mf *dto.MetricFamily) error { |
||||
metricName, hasName := m[model.MetricNameLabel] |
||||
numLabels := len(m) - 1 |
||||
if !hasName { |
||||
numLabels = len(m) |
||||
} |
||||
for _, v := range metricCategoryPrefix { |
||||
if strings.HasPrefix(string(metricName), v) { |
||||
group := strings.Replace(v, "_", " ", 1) |
||||
metricName = model.LabelValue(strings.Replace(string(metricName), v, group, 1)) |
||||
} |
||||
} |
||||
|
||||
for _, v := range trimMetricPrefix { |
||||
if strings.HasPrefix(string(metricName), v) { |
||||
metricName = model.LabelValue(strings.Replace(string(metricName), v, "", 1)) |
||||
} |
||||
} |
||||
|
||||
labelStrings := make([]string, 0, numLabels) |
||||
for label, value := range m { |
||||
if label != model.MetricNameLabel { |
||||
labelStrings = append(labelStrings, fmt.Sprintf("%s %s", string(label), string(value))) |
||||
} |
||||
} |
||||
|
||||
var err error |
||||
switch numLabels { |
||||
case 0: |
||||
if hasName { |
||||
if err := writeSanitized(buf, string(metricName)); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
default: |
||||
sort.Strings(labelStrings) |
||||
if err = writeSanitized(buf, string(metricName)); err != nil { |
||||
return err |
||||
} |
||||
for _, s := range labelStrings { |
||||
if err = buf.WriteByte('.'); err != nil { |
||||
return err |
||||
} |
||||
if err = writeSanitized(buf, s); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
} |
||||
|
||||
if err = addExtentionConventionForRollups(buf, mf, m); err != nil { |
||||
return err |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func addExtentionConventionForRollups(buf *bufio.Writer, mf *dto.MetricFamily, m model.Metric) error { |
||||
// Adding `.count` `.sum` suffix makes it possible to configure
|
||||
// different rollup strategies based on metric type
|
||||
|
||||
mfType := mf.GetType() |
||||
var err error |
||||
if mfType == dto.MetricType_COUNTER { |
||||
if _, err = fmt.Fprint(buf, ".count"); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
|
||||
if mfType == dto.MetricType_SUMMARY || mfType == dto.MetricType_HISTOGRAM { |
||||
if strings.HasSuffix(string(m[model.MetricNameLabel]), "_count") { |
||||
if _, err = fmt.Fprint(buf, ".count"); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
} |
||||
if mfType == dto.MetricType_HISTOGRAM { |
||||
if strings.HasSuffix(string(m[model.MetricNameLabel]), "_sum") { |
||||
if _, err = fmt.Fprint(buf, ".sum"); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func writePrefix(buf *bufio.Writer, s string) error { |
||||
for _, c := range s { |
||||
if _, err := buf.WriteRune(replaceInvalid(c)); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func writeSanitized(buf *bufio.Writer, s string) error { |
||||
prevUnderscore := false |
||||
|
||||
for _, c := range s { |
||||
c = replaceInvalidRune(c) |
||||
if c == '_' { |
||||
if prevUnderscore { |
||||
continue |
||||
} |
||||
prevUnderscore = true |
||||
} else { |
||||
prevUnderscore = false |
||||
} |
||||
if _, err := buf.WriteRune(c); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func replaceInvalid(c rune) rune { |
||||
if c == ' ' || c == '.' { |
||||
return '.' |
||||
} |
||||
return replaceInvalidRune(c) |
||||
} |
||||
|
||||
func replaceInvalidRune(c rune) rune { |
||||
if c == ' ' { |
||||
return '.' |
||||
} |
||||
if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_' || c == ':' || (c >= '0' && c <= '9')) { |
||||
return '_' |
||||
} |
||||
return c |
||||
} |
||||
|
||||
func (b *Bridge) replaceCounterWithDelta(mf *dto.MetricFamily, metric model.Metric, value model.SampleValue) float64 { |
||||
if !b.countersAsDetlas { |
||||
return float64(value) |
||||
} |
||||
|
||||
mfType := mf.GetType() |
||||
if mfType == dto.MetricType_COUNTER { |
||||
return b.returnDelta(metric, value) |
||||
} |
||||
|
||||
if mfType == dto.MetricType_SUMMARY { |
||||
if strings.HasSuffix(string(metric[model.MetricNameLabel]), "_count") { |
||||
return b.returnDelta(metric, value) |
||||
} |
||||
} |
||||
|
||||
return float64(value) |
||||
} |
||||
|
||||
func (b *Bridge) returnDelta(metric model.Metric, value model.SampleValue) float64 { |
||||
key := metric.Fingerprint() |
||||
_, exists := b.lastValue[key] |
||||
if !exists { |
||||
b.lastValue[key] = 0 |
||||
} |
||||
|
||||
delta := float64(value) - b.lastValue[key] |
||||
b.lastValue[key] = float64(value) |
||||
|
||||
return delta |
||||
} |
||||
@ -0,0 +1,503 @@ |
||||
package graphitebridge |
||||
|
||||
import ( |
||||
"bufio" |
||||
"bytes" |
||||
"io" |
||||
"net" |
||||
"regexp" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/prometheus/client_golang/prometheus" |
||||
dto "github.com/prometheus/client_model/go" |
||||
"github.com/prometheus/common/model" |
||||
) |
||||
|
||||
func TestCountersAsDelta(t *testing.T) { |
||||
b, _ := NewBridge(&Config{ |
||||
URL: "localhost:12345", |
||||
CountersAsDelta: true, |
||||
}) |
||||
ty := dto.MetricType(0) |
||||
mf := &dto.MetricFamily{ |
||||
Type: &ty, |
||||
Metric: []*dto.Metric{}, |
||||
} |
||||
m := model.Metric{} |
||||
|
||||
var want float64 |
||||
var got float64 |
||||
want = float64(1) |
||||
got = b.replaceCounterWithDelta(mf, m, model.SampleValue(1)) |
||||
if got != want { |
||||
t.Fatalf("want %v got %v", want, got) |
||||
} |
||||
|
||||
got = b.replaceCounterWithDelta(mf, m, model.SampleValue(2)) |
||||
if got != want { |
||||
t.Fatalf("want %v got %v", want, got) |
||||
} |
||||
} |
||||
|
||||
func TestCountersAsDeltaDisabled(t *testing.T) { |
||||
b, _ := NewBridge(&Config{ |
||||
URL: "localhost:12345", |
||||
CountersAsDelta: false, |
||||
}) |
||||
ty := dto.MetricType(0) |
||||
mf := &dto.MetricFamily{ |
||||
Type: &ty, |
||||
Metric: []*dto.Metric{}, |
||||
} |
||||
m := model.Metric{} |
||||
|
||||
var want float64 |
||||
var got float64 |
||||
want = float64(1) |
||||
got = b.replaceCounterWithDelta(mf, m, model.SampleValue(1)) |
||||
if got != want { |
||||
t.Fatalf("want %v got %v", want, got) |
||||
} |
||||
|
||||
want = float64(2) |
||||
got = b.replaceCounterWithDelta(mf, m, model.SampleValue(2)) |
||||
if got != want { |
||||
t.Fatalf("want %v got %v", want, got) |
||||
} |
||||
} |
||||
|
||||
func TestSanitize(t *testing.T) { |
||||
testCases := []struct { |
||||
in, out string |
||||
}{ |
||||
{in: "hello", out: "hello"}, |
||||
{in: "hE/l1o", out: "hE_l1o"}, |
||||
{in: "he,*ll(.o", out: "he_ll_o"}, |
||||
{in: "hello_there%^&", out: "hello_there_"}, |
||||
} |
||||
|
||||
var buf bytes.Buffer |
||||
w := bufio.NewWriter(&buf) |
||||
|
||||
for i, tc := range testCases { |
||||
if err := writeSanitized(w, tc.in); err != nil { |
||||
t.Fatalf("write failed: %v", err) |
||||
} |
||||
if err := w.Flush(); err != nil { |
||||
t.Fatalf("flush failed: %v", err) |
||||
} |
||||
|
||||
if want, got := tc.out, buf.String(); want != got { |
||||
t.Fatalf("test case index %d: got sanitized string %s, want %s", i, got, want) |
||||
} |
||||
|
||||
buf.Reset() |
||||
} |
||||
} |
||||
|
||||
func TestSanitizePrefix(t *testing.T) { |
||||
testCases := []struct { |
||||
in, out string |
||||
}{ |
||||
{in: "service.prod.", out: "service.prod."}, |
||||
{in: "service.prod", out: "service.prod"}, |
||||
} |
||||
|
||||
var buf bytes.Buffer |
||||
w := bufio.NewWriter(&buf) |
||||
|
||||
for i, tc := range testCases { |
||||
if err := writePrefix(w, tc.in); err != nil { |
||||
t.Fatalf("write failed: %v", err) |
||||
} |
||||
if err := w.Flush(); err != nil { |
||||
t.Fatalf("flush failed: %v", err) |
||||
} |
||||
|
||||
if want, got := tc.out, buf.String(); want != got { |
||||
t.Fatalf("test case index %d: got sanitized string %s, want %s", i, got, want) |
||||
} |
||||
|
||||
buf.Reset() |
||||
} |
||||
} |
||||
|
||||
func TestWriteSummary(t *testing.T) { |
||||
sumVec := prometheus.NewSummaryVec( |
||||
prometheus.SummaryOpts{ |
||||
Name: "name", |
||||
Help: "docstring", |
||||
ConstLabels: prometheus.Labels{"constname": "constvalue"}, |
||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, |
||||
}, |
||||
[]string{"labelname"}, |
||||
) |
||||
|
||||
reg := prometheus.NewRegistry() |
||||
reg.MustRegister(sumVec) |
||||
|
||||
b, err := NewBridge(&Config{ |
||||
URL: "localhost:8080", |
||||
Gatherer: reg, |
||||
CountersAsDelta: true, |
||||
}) |
||||
if err != nil { |
||||
t.Fatalf("cannot create bridge. err: %v", err) |
||||
} |
||||
|
||||
sumVec.WithLabelValues("val1").Observe(float64(10)) |
||||
sumVec.WithLabelValues("val1").Observe(float64(20)) |
||||
sumVec.WithLabelValues("val1").Observe(float64(30)) |
||||
sumVec.WithLabelValues("val2").Observe(float64(20)) |
||||
sumVec.WithLabelValues("val2").Observe(float64(30)) |
||||
sumVec.WithLabelValues("val2").Observe(float64(40)) |
||||
|
||||
mfs, err := reg.Gather() |
||||
if err != nil { |
||||
t.Fatalf("error: %v", err) |
||||
} |
||||
|
||||
now := model.Time(1477043083) |
||||
var buf bytes.Buffer |
||||
err = b.writeMetrics(&buf, mfs, "prefix.", now) |
||||
if err != nil { |
||||
t.Fatalf("error: %v", err) |
||||
} |
||||
|
||||
want := `prefix.name.constname.constvalue.labelname.val1.quantile.0_5 20 1477043 |
||||
prefix.name.constname.constvalue.labelname.val1.quantile.0_9 30 1477043 |
||||
prefix.name.constname.constvalue.labelname.val1.quantile.0_99 30 1477043 |
||||
prefix.name_sum.constname.constvalue.labelname.val1 60 1477043 |
||||
prefix.name_count.constname.constvalue.labelname.val1.count 3 1477043 |
||||
prefix.name.constname.constvalue.labelname.val2.quantile.0_5 30 1477043 |
||||
prefix.name.constname.constvalue.labelname.val2.quantile.0_9 40 1477043 |
||||
prefix.name.constname.constvalue.labelname.val2.quantile.0_99 40 1477043 |
||||
prefix.name_sum.constname.constvalue.labelname.val2 90 1477043 |
||||
prefix.name_count.constname.constvalue.labelname.val2.count 3 1477043 |
||||
` |
||||
|
||||
if got := buf.String(); want != got { |
||||
t.Fatalf("wanted \n%s\n, got \n%s\n", want, got) |
||||
} |
||||
} |
||||
|
||||
func TestWriteHistogram(t *testing.T) { |
||||
histVec := prometheus.NewHistogramVec( |
||||
prometheus.HistogramOpts{ |
||||
Name: "name", |
||||
Help: "docstring", |
||||
ConstLabels: prometheus.Labels{"constname": "constvalue"}, |
||||
Buckets: []float64{0.01, 0.02, 0.05, 0.1}, |
||||
}, |
||||
[]string{"labelname"}, |
||||
) |
||||
|
||||
reg := prometheus.NewRegistry() |
||||
reg.MustRegister(histVec) |
||||
|
||||
b, err := NewBridge(&Config{ |
||||
URL: "localhost:8080", |
||||
Gatherer: reg, |
||||
CountersAsDelta: true, |
||||
}) |
||||
if err != nil { |
||||
t.Fatalf("error creating bridge: %v", err) |
||||
} |
||||
|
||||
histVec.WithLabelValues("val1").Observe(float64(10)) |
||||
histVec.WithLabelValues("val1").Observe(float64(20)) |
||||
histVec.WithLabelValues("val1").Observe(float64(30)) |
||||
histVec.WithLabelValues("val2").Observe(float64(20)) |
||||
histVec.WithLabelValues("val2").Observe(float64(30)) |
||||
histVec.WithLabelValues("val2").Observe(float64(40)) |
||||
|
||||
mfs, err := reg.Gather() |
||||
if err != nil { |
||||
t.Fatalf("error: %v", err) |
||||
} |
||||
|
||||
now := model.Time(1477043083) |
||||
var buf bytes.Buffer |
||||
err = b.writeMetrics(&buf, mfs, "prefix.", now) |
||||
if err != nil { |
||||
t.Fatalf("error: %v", err) |
||||
} |
||||
|
||||
want := `prefix.name_bucket.constname.constvalue.labelname.val1.le.0_01 0 1477043 |
||||
prefix.name_bucket.constname.constvalue.labelname.val1.le.0_02 0 1477043 |
||||
prefix.name_bucket.constname.constvalue.labelname.val1.le.0_05 0 1477043 |
||||
prefix.name_bucket.constname.constvalue.labelname.val1.le.0_1 0 1477043 |
||||
prefix.name_sum.constname.constvalue.labelname.val1.sum 60 1477043 |
||||
prefix.name_count.constname.constvalue.labelname.val1.count 3 1477043 |
||||
prefix.name_bucket.constname.constvalue.labelname.val1.le._Inf 3 1477043 |
||||
prefix.name_bucket.constname.constvalue.labelname.val2.le.0_01 0 1477043 |
||||
prefix.name_bucket.constname.constvalue.labelname.val2.le.0_02 0 1477043 |
||||
prefix.name_bucket.constname.constvalue.labelname.val2.le.0_05 0 1477043 |
||||
prefix.name_bucket.constname.constvalue.labelname.val2.le.0_1 0 1477043 |
||||
prefix.name_sum.constname.constvalue.labelname.val2.sum 90 1477043 |
||||
prefix.name_count.constname.constvalue.labelname.val2.count 3 1477043 |
||||
prefix.name_bucket.constname.constvalue.labelname.val2.le._Inf 3 1477043 |
||||
` |
||||
if got := buf.String(); want != got { |
||||
t.Fatalf("wanted \n%s\n, got \n%s\n", want, got) |
||||
} |
||||
} |
||||
|
||||
func TestCounterVec(t *testing.T) { |
||||
cntVec := prometheus.NewCounterVec( |
||||
prometheus.CounterOpts{ |
||||
Name: "page_response", |
||||
Help: "docstring", |
||||
ConstLabels: prometheus.Labels{"constname": "constvalue"}, |
||||
}, |
||||
[]string{"labelname"}, |
||||
) |
||||
|
||||
reg := prometheus.NewRegistry() |
||||
reg.MustRegister(cntVec) |
||||
|
||||
cntVec.WithLabelValues("val1").Inc() |
||||
cntVec.WithLabelValues("val2").Inc() |
||||
|
||||
b, err := NewBridge(&Config{ |
||||
URL: "localhost:8080", |
||||
Gatherer: reg, |
||||
CountersAsDelta: true, |
||||
}) |
||||
if err != nil { |
||||
t.Fatalf("error creating bridge: %v", err) |
||||
} |
||||
|
||||
// first collect
|
||||
mfs, err := reg.Gather() |
||||
if err != nil { |
||||
t.Fatalf("error: %v", err) |
||||
} |
||||
|
||||
var buf bytes.Buffer |
||||
err = b.writeMetrics(&buf, mfs, "prefix.", model.Time(1477043083)) |
||||
if err != nil { |
||||
t.Fatalf("error: %v", err) |
||||
} |
||||
|
||||
want := `prefix.page.response.constname.constvalue.labelname.val1.count 1 1477043 |
||||
prefix.page.response.constname.constvalue.labelname.val2.count 1 1477043 |
||||
` |
||||
if got := buf.String(); want != got { |
||||
t.Fatalf("wanted \n%s\n, got \n%s\n", want, got) |
||||
} |
||||
|
||||
//next collect
|
||||
cntVec.WithLabelValues("val1").Inc() |
||||
cntVec.WithLabelValues("val2").Inc() |
||||
|
||||
mfs, err = reg.Gather() |
||||
if err != nil { |
||||
t.Fatalf("error: %v", err) |
||||
} |
||||
|
||||
buf = bytes.Buffer{} |
||||
err = b.writeMetrics(&buf, mfs, "prefix.", model.Time(1477053083)) |
||||
if err != nil { |
||||
t.Fatalf("error: %v", err) |
||||
} |
||||
|
||||
want2 := `prefix.page.response.constname.constvalue.labelname.val1.count 1 1477053 |
||||
prefix.page.response.constname.constvalue.labelname.val2.count 1 1477053 |
||||
` |
||||
if got := buf.String(); want2 != got { |
||||
t.Fatalf("wanted \n%s\n, got \n%s\n", want2, got) |
||||
} |
||||
} |
||||
|
||||
func TestCounter(t *testing.T) { |
||||
cntVec := prometheus.NewCounter( |
||||
prometheus.CounterOpts{ |
||||
Name: "page_response", |
||||
Help: "docstring", |
||||
ConstLabels: prometheus.Labels{"constname": "constvalue"}, |
||||
}) |
||||
|
||||
reg := prometheus.NewRegistry() |
||||
reg.MustRegister(cntVec) |
||||
|
||||
cntVec.Inc() |
||||
|
||||
b, err := NewBridge(&Config{ |
||||
URL: "localhost:8080", |
||||
Gatherer: reg, |
||||
CountersAsDelta: true, |
||||
}) |
||||
if err != nil { |
||||
t.Fatalf("error creating bridge: %v", err) |
||||
} |
||||
|
||||
// first collect
|
||||
mfs, err := reg.Gather() |
||||
if err != nil { |
||||
t.Fatalf("error: %v", err) |
||||
} |
||||
|
||||
var buf bytes.Buffer |
||||
err = b.writeMetrics(&buf, mfs, "prefix.", model.Time(1477043083)) |
||||
if err != nil { |
||||
t.Fatalf("error: %v", err) |
||||
} |
||||
|
||||
want := "prefix.page.response.constname.constvalue.count 1 1477043\n" |
||||
if got := buf.String(); want != got { |
||||
t.Fatalf("wanted \n%s\n, got \n%s\n", want, got) |
||||
} |
||||
|
||||
//next collect
|
||||
cntVec.Inc() |
||||
|
||||
mfs, err = reg.Gather() |
||||
if err != nil { |
||||
t.Fatalf("error: %v", err) |
||||
} |
||||
|
||||
buf = bytes.Buffer{} |
||||
err = b.writeMetrics(&buf, mfs, "prefix.", model.Time(1477053083)) |
||||
if err != nil { |
||||
t.Fatalf("error: %v", err) |
||||
} |
||||
|
||||
want2 := "prefix.page.response.constname.constvalue.count 1 1477053\n" |
||||
if got := buf.String(); want2 != got { |
||||
t.Fatalf("wanted \n%s\n, got \n%s\n", want2, got) |
||||
} |
||||
} |
||||
|
||||
func TestTrimGrafanaNamespace(t *testing.T) { |
||||
cntVec := prometheus.NewCounter( |
||||
prometheus.CounterOpts{ |
||||
Name: "grafana_http_request_total", |
||||
Help: "docstring", |
||||
ConstLabels: prometheus.Labels{"constname": "constvalue"}, |
||||
}) |
||||
|
||||
reg := prometheus.NewRegistry() |
||||
reg.MustRegister(cntVec) |
||||
|
||||
cntVec.Inc() |
||||
|
||||
b, err := NewBridge(&Config{ |
||||
URL: "localhost:8080", |
||||
Gatherer: reg, |
||||
CountersAsDelta: true, |
||||
}) |
||||
if err != nil { |
||||
t.Fatalf("error creating bridge: %v", err) |
||||
} |
||||
|
||||
// first collect
|
||||
mfs, err := reg.Gather() |
||||
if err != nil { |
||||
t.Fatalf("error: %v", err) |
||||
} |
||||
|
||||
var buf bytes.Buffer |
||||
err = b.writeMetrics(&buf, mfs, "prefix.", model.Time(1477043083)) |
||||
if err != nil { |
||||
t.Fatalf("error: %v", err) |
||||
} |
||||
|
||||
want := "prefix.http_request_total.constname.constvalue.count 1 1477043\n" |
||||
if got := buf.String(); want != got { |
||||
t.Fatalf("wanted \n%s\n, got \n%s\n", want, got) |
||||
} |
||||
} |
||||
|
||||
func TestPush(t *testing.T) { |
||||
reg := prometheus.NewRegistry() |
||||
cntVec := prometheus.NewCounterVec( |
||||
prometheus.CounterOpts{ |
||||
Name: "name", |
||||
Help: "docstring", |
||||
ConstLabels: prometheus.Labels{"constname": "constvalue"}, |
||||
}, |
||||
[]string{"labelname"}, |
||||
) |
||||
cntVec.WithLabelValues("val1").Inc() |
||||
cntVec.WithLabelValues("val2").Inc() |
||||
reg.MustRegister(cntVec) |
||||
|
||||
host := "localhost" |
||||
port := ":56789" |
||||
b, err := NewBridge(&Config{ |
||||
URL: host + port, |
||||
Gatherer: reg, |
||||
Prefix: "prefix.", |
||||
}) |
||||
if err != nil { |
||||
t.Fatalf("error creating bridge: %v", err) |
||||
} |
||||
|
||||
nmg, err := newMockGraphite(port) |
||||
if err != nil { |
||||
t.Fatalf("error creating mock graphite: %v", err) |
||||
} |
||||
defer nmg.Close() |
||||
|
||||
err = b.Push() |
||||
if err != nil { |
||||
t.Fatalf("error pushing: %v", err) |
||||
} |
||||
|
||||
wants := []string{ |
||||
"prefix.name.constname.constvalue.labelname.val1.count 1", |
||||
"prefix.name.constname.constvalue.labelname.val2.count 1", |
||||
} |
||||
|
||||
select { |
||||
case got := <-nmg.readc: |
||||
for _, want := range wants { |
||||
matched, err := regexp.MatchString(want, got) |
||||
if err != nil { |
||||
t.Fatalf("error pushing: %v", err) |
||||
} |
||||
if !matched { |
||||
t.Fatalf("missing metric:\nno match for %s received by server:\n%s", want, got) |
||||
} |
||||
} |
||||
return |
||||
case err := <-nmg.errc: |
||||
t.Fatalf("error reading push: %v", err) |
||||
case <-time.After(50 * time.Millisecond): |
||||
t.Fatalf("no result from graphite server") |
||||
} |
||||
} |
||||
|
||||
func newMockGraphite(port string) (*mockGraphite, error) { |
||||
readc := make(chan string) |
||||
errc := make(chan error) |
||||
ln, err := net.Listen("tcp", port) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
go func() { |
||||
conn, err := ln.Accept() |
||||
if err != nil { |
||||
errc <- err |
||||
} |
||||
var b bytes.Buffer |
||||
io.Copy(&b, conn) |
||||
readc <- b.String() |
||||
}() |
||||
|
||||
return &mockGraphite{ |
||||
readc: readc, |
||||
errc: errc, |
||||
Listener: ln, |
||||
}, nil |
||||
} |
||||
|
||||
type mockGraphite struct { |
||||
readc chan string |
||||
errc chan error |
||||
|
||||
net.Listener |
||||
} |
||||
@ -1,189 +0,0 @@ |
||||
// includes code from
|
||||
// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
|
||||
// Copyright 2012 Richard Crowley. All rights reserved.
|
||||
|
||||
package metrics |
||||
|
||||
// Histograms calculate distribution statistics from a series of int64 values.
|
||||
type Histogram interface { |
||||
Metric |
||||
|
||||
Clear() |
||||
Count() int64 |
||||
Max() int64 |
||||
Mean() float64 |
||||
Min() int64 |
||||
Percentile(float64) float64 |
||||
Percentiles([]float64) []float64 |
||||
StdDev() float64 |
||||
Sum() int64 |
||||
Update(int64) |
||||
Variance() float64 |
||||
} |
||||
|
||||
func NewHistogram(meta *MetricMeta, s Sample) Histogram { |
||||
return &StandardHistogram{ |
||||
MetricMeta: meta, |
||||
sample: s, |
||||
} |
||||
} |
||||
|
||||
// HistogramSnapshot is a read-only copy of another Histogram.
|
||||
type HistogramSnapshot struct { |
||||
*MetricMeta |
||||
sample *SampleSnapshot |
||||
} |
||||
|
||||
// Clear panics.
|
||||
func (*HistogramSnapshot) Clear() { |
||||
panic("Clear called on a HistogramSnapshot") |
||||
} |
||||
|
||||
// Count returns the number of samples recorded at the time the snapshot was
|
||||
// taken.
|
||||
func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() } |
||||
|
||||
// Max returns the maximum value in the sample at the time the snapshot was
|
||||
// taken.
|
||||
func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() } |
||||
|
||||
// Mean returns the mean of the values in the sample at the time the snapshot
|
||||
// was taken.
|
||||
func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() } |
||||
|
||||
// Min returns the minimum value in the sample at the time the snapshot was
|
||||
// taken.
|
||||
func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() } |
||||
|
||||
// Percentile returns an arbitrary percentile of values in the sample at the
|
||||
// time the snapshot was taken.
|
||||
func (h *HistogramSnapshot) Percentile(p float64) float64 { |
||||
return h.sample.Percentile(p) |
||||
} |
||||
|
||||
// Percentiles returns a slice of arbitrary percentiles of values in the sample
|
||||
// at the time the snapshot was taken.
|
||||
func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 { |
||||
return h.sample.Percentiles(ps) |
||||
} |
||||
|
||||
// Sample returns the Sample underlying the histogram.
|
||||
func (h *HistogramSnapshot) Sample() Sample { return h.sample } |
||||
|
||||
// Snapshot returns the snapshot.
|
||||
func (h *HistogramSnapshot) Snapshot() Metric { return h } |
||||
|
||||
// StdDev returns the standard deviation of the values in the sample at the
|
||||
// time the snapshot was taken.
|
||||
func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() } |
||||
|
||||
// Sum returns the sum in the sample at the time the snapshot was taken.
|
||||
func (h *HistogramSnapshot) Sum() int64 { return h.sample.Sum() } |
||||
|
||||
// Update panics.
|
||||
func (*HistogramSnapshot) Update(int64) { |
||||
panic("Update called on a HistogramSnapshot") |
||||
} |
||||
|
||||
// Variance returns the variance of inputs at the time the snapshot was taken.
|
||||
func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() } |
||||
|
||||
// NilHistogram is a no-op Histogram.
|
||||
type NilHistogram struct { |
||||
*MetricMeta |
||||
} |
||||
|
||||
// Clear is a no-op.
|
||||
func (NilHistogram) Clear() {} |
||||
|
||||
// Count is a no-op.
|
||||
func (NilHistogram) Count() int64 { return 0 } |
||||
|
||||
// Max is a no-op.
|
||||
func (NilHistogram) Max() int64 { return 0 } |
||||
|
||||
// Mean is a no-op.
|
||||
func (NilHistogram) Mean() float64 { return 0.0 } |
||||
|
||||
// Min is a no-op.
|
||||
func (NilHistogram) Min() int64 { return 0 } |
||||
|
||||
// Percentile is a no-op.
|
||||
func (NilHistogram) Percentile(p float64) float64 { return 0.0 } |
||||
|
||||
// Percentiles is a no-op.
|
||||
func (NilHistogram) Percentiles(ps []float64) []float64 { |
||||
return make([]float64, len(ps)) |
||||
} |
||||
|
||||
// Sample is a no-op.
|
||||
func (NilHistogram) Sample() Sample { return NilSample{} } |
||||
|
||||
// Snapshot is a no-op.
|
||||
func (n NilHistogram) Snapshot() Metric { return n } |
||||
|
||||
// StdDev is a no-op.
|
||||
func (NilHistogram) StdDev() float64 { return 0.0 } |
||||
|
||||
// Sum is a no-op.
|
||||
func (NilHistogram) Sum() int64 { return 0 } |
||||
|
||||
// Update is a no-op.
|
||||
func (NilHistogram) Update(v int64) {} |
||||
|
||||
// Variance is a no-op.
|
||||
func (NilHistogram) Variance() float64 { return 0.0 } |
||||
|
||||
// StandardHistogram is the standard implementation of a Histogram and uses a
|
||||
// Sample to bound its memory use.
|
||||
type StandardHistogram struct { |
||||
*MetricMeta |
||||
sample Sample |
||||
} |
||||
|
||||
// Clear clears the histogram and its sample.
|
||||
func (h *StandardHistogram) Clear() { h.sample.Clear() } |
||||
|
||||
// Count returns the number of samples recorded since the histogram was last
|
||||
// cleared.
|
||||
func (h *StandardHistogram) Count() int64 { return h.sample.Count() } |
||||
|
||||
// Max returns the maximum value in the sample.
|
||||
func (h *StandardHistogram) Max() int64 { return h.sample.Max() } |
||||
|
||||
// Mean returns the mean of the values in the sample.
|
||||
func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() } |
||||
|
||||
// Min returns the minimum value in the sample.
|
||||
func (h *StandardHistogram) Min() int64 { return h.sample.Min() } |
||||
|
||||
// Percentile returns an arbitrary percentile of the values in the sample.
|
||||
func (h *StandardHistogram) Percentile(p float64) float64 { |
||||
return h.sample.Percentile(p) |
||||
} |
||||
|
||||
// Percentiles returns a slice of arbitrary percentiles of the values in the
|
||||
// sample.
|
||||
func (h *StandardHistogram) Percentiles(ps []float64) []float64 { |
||||
return h.sample.Percentiles(ps) |
||||
} |
||||
|
||||
// Sample returns the Sample underlying the histogram.
|
||||
func (h *StandardHistogram) Sample() Sample { return h.sample } |
||||
|
||||
// Snapshot returns a read-only copy of the histogram.
|
||||
func (h *StandardHistogram) Snapshot() Metric { |
||||
return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)} |
||||
} |
||||
|
||||
// StdDev returns the standard deviation of the values in the sample.
|
||||
func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() } |
||||
|
||||
// Sum returns the sum in the sample.
|
||||
func (h *StandardHistogram) Sum() int64 { return h.sample.Sum() } |
||||
|
||||
// Update samples a new value.
|
||||
func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) } |
||||
|
||||
// Variance returns the variance of the values in the sample.
|
||||
func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() } |
||||
@ -1,90 +0,0 @@ |
||||
// includes code from
|
||||
// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
|
||||
// Copyright 2012 Richard Crowley. All rights reserved.
|
||||
|
||||
package metrics |
||||
|
||||
import "testing" |
||||
|
||||
func BenchmarkHistogram(b *testing.B) { |
||||
h := NewHistogram(nil, NewUniformSample(100)) |
||||
b.ResetTimer() |
||||
for i := 0; i < b.N; i++ { |
||||
h.Update(int64(i)) |
||||
} |
||||
} |
||||
|
||||
func TestHistogram10000(t *testing.T) { |
||||
h := NewHistogram(nil, NewUniformSample(100000)) |
||||
for i := 1; i <= 10000; i++ { |
||||
h.Update(int64(i)) |
||||
} |
||||
testHistogram10000(t, h) |
||||
} |
||||
|
||||
func TestHistogramEmpty(t *testing.T) { |
||||
h := NewHistogram(nil, NewUniformSample(100)) |
||||
if count := h.Count(); 0 != count { |
||||
t.Errorf("h.Count(): 0 != %v\n", count) |
||||
} |
||||
if min := h.Min(); 0 != min { |
||||
t.Errorf("h.Min(): 0 != %v\n", min) |
||||
} |
||||
if max := h.Max(); 0 != max { |
||||
t.Errorf("h.Max(): 0 != %v\n", max) |
||||
} |
||||
if mean := h.Mean(); 0.0 != mean { |
||||
t.Errorf("h.Mean(): 0.0 != %v\n", mean) |
||||
} |
||||
if stdDev := h.StdDev(); 0.0 != stdDev { |
||||
t.Errorf("h.StdDev(): 0.0 != %v\n", stdDev) |
||||
} |
||||
ps := h.Percentiles([]float64{0.5, 0.75, 0.99}) |
||||
if 0.0 != ps[0] { |
||||
t.Errorf("median: 0.0 != %v\n", ps[0]) |
||||
} |
||||
if 0.0 != ps[1] { |
||||
t.Errorf("75th percentile: 0.0 != %v\n", ps[1]) |
||||
} |
||||
if 0.0 != ps[2] { |
||||
t.Errorf("99th percentile: 0.0 != %v\n", ps[2]) |
||||
} |
||||
} |
||||
|
||||
func TestHistogramSnapshot(t *testing.T) { |
||||
h := NewHistogram(nil, NewUniformSample(100000)) |
||||
for i := 1; i <= 10000; i++ { |
||||
h.Update(int64(i)) |
||||
} |
||||
snapshot := h.Snapshot().(Histogram) |
||||
h.Update(0) |
||||
testHistogram10000(t, snapshot) |
||||
} |
||||
|
||||
func testHistogram10000(t *testing.T, h Histogram) { |
||||
if count := h.Count(); 10000 != count { |
||||
t.Errorf("h.Count(): 10000 != %v\n", count) |
||||
} |
||||
if min := h.Min(); 1 != min { |
||||
t.Errorf("h.Min(): 1 != %v\n", min) |
||||
} |
||||
if max := h.Max(); 10000 != max { |
||||
t.Errorf("h.Max(): 10000 != %v\n", max) |
||||
} |
||||
if mean := h.Mean(); 5000.5 != mean { |
||||
t.Errorf("h.Mean(): 5000.5 != %v\n", mean) |
||||
} |
||||
if stdDev := h.StdDev(); 2886.751331514372 != stdDev { |
||||
t.Errorf("h.StdDev(): 2886.751331514372 != %v\n", stdDev) |
||||
} |
||||
ps := h.Percentiles([]float64{0.5, 0.75, 0.99}) |
||||
if 5000.5 != ps[0] { |
||||
t.Errorf("median: 5000.5 != %v\n", ps[0]) |
||||
} |
||||
if 7500.75 != ps[1] { |
||||
t.Errorf("75th percentile: 7500.75 != %v\n", ps[1]) |
||||
} |
||||
if 9900.99 != ps[2] { |
||||
t.Errorf("99th percentile: 9900.99 != %v\n", ps[2]) |
||||
} |
||||
} |
||||
@ -0,0 +1,38 @@ |
||||
package metrics |
||||
|
||||
import ( |
||||
"context" |
||||
|
||||
ini "gopkg.in/ini.v1" |
||||
|
||||
"github.com/grafana/grafana/pkg/log" |
||||
"github.com/grafana/grafana/pkg/metrics/graphitebridge" |
||||
) |
||||
|
||||
var metricsLogger log.Logger = log.New("metrics") |
||||
|
||||
type logWrapper struct { |
||||
logger log.Logger |
||||
} |
||||
|
||||
func (lw *logWrapper) Println(v ...interface{}) { |
||||
lw.logger.Info("graphite metric bridge", v...) |
||||
} |
||||
|
||||
func Init(file *ini.File) { |
||||
cfg := ReadSettings(file) |
||||
internalInit(cfg) |
||||
} |
||||
|
||||
func internalInit(settings *MetricSettings) { |
||||
initMetricVars(settings) |
||||
|
||||
if settings.GraphiteBridgeConfig != nil { |
||||
bridge, err := graphitebridge.NewBridge(settings.GraphiteBridgeConfig) |
||||
if err != nil { |
||||
metricsLogger.Error("failed to create graphite bridge", "error", err) |
||||
} else { |
||||
go bridge.Run(context.Background()) |
||||
} |
||||
} |
||||
} |
||||
@ -1,221 +0,0 @@ |
||||
// includes code from
|
||||
// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
|
||||
// Copyright 2012 Richard Crowley. All rights reserved.
|
||||
|
||||
package metrics |
||||
|
||||
import ( |
||||
"sync" |
||||
"time" |
||||
) |
||||
|
||||
// Meters count events to produce exponentially-weighted moving average rates
|
||||
// at one-, five-, and fifteen-minutes and a mean rate.
|
||||
type Meter interface { |
||||
Metric |
||||
|
||||
Count() int64 |
||||
Mark(int64) |
||||
Rate1() float64 |
||||
Rate5() float64 |
||||
Rate15() float64 |
||||
RateMean() float64 |
||||
} |
||||
|
||||
// NewMeter constructs a new StandardMeter and launches a goroutine.
|
||||
func NewMeter(meta *MetricMeta) Meter { |
||||
if UseNilMetrics { |
||||
return NilMeter{} |
||||
} |
||||
|
||||
m := newStandardMeter(meta) |
||||
arbiter.Lock() |
||||
defer arbiter.Unlock() |
||||
arbiter.meters = append(arbiter.meters, m) |
||||
if !arbiter.started { |
||||
arbiter.started = true |
||||
go arbiter.tick() |
||||
} |
||||
return m |
||||
} |
||||
|
||||
type MeterSnapshot struct { |
||||
*MetricMeta |
||||
count int64 |
||||
rate1, rate5, rate15, rateMean float64 |
||||
} |
||||
|
||||
// Count returns the count of events at the time the snapshot was taken.
|
||||
func (m *MeterSnapshot) Count() int64 { return m.count } |
||||
|
||||
// Mark panics.
|
||||
func (*MeterSnapshot) Mark(n int64) { |
||||
panic("Mark called on a MeterSnapshot") |
||||
} |
||||
|
||||
// Rate1 returns the one-minute moving average rate of events per second at the
|
||||
// time the snapshot was taken.
|
||||
func (m *MeterSnapshot) Rate1() float64 { return m.rate1 } |
||||
|
||||
// Rate5 returns the five-minute moving average rate of events per second at
|
||||
// the time the snapshot was taken.
|
||||
func (m *MeterSnapshot) Rate5() float64 { return m.rate5 } |
||||
|
||||
// Rate15 returns the fifteen-minute moving average rate of events per second
|
||||
// at the time the snapshot was taken.
|
||||
func (m *MeterSnapshot) Rate15() float64 { return m.rate15 } |
||||
|
||||
// RateMean returns the meter's mean rate of events per second at the time the
|
||||
// snapshot was taken.
|
||||
func (m *MeterSnapshot) RateMean() float64 { return m.rateMean } |
||||
|
||||
// Snapshot returns the snapshot.
|
||||
func (m *MeterSnapshot) Snapshot() Metric { return m } |
||||
|
||||
// NilMeter is a no-op Meter.
|
||||
type NilMeter struct{ *MetricMeta } |
||||
|
||||
// Count is a no-op.
|
||||
func (NilMeter) Count() int64 { return 0 } |
||||
|
||||
// Mark is a no-op.
|
||||
func (NilMeter) Mark(n int64) {} |
||||
|
||||
// Rate1 is a no-op.
|
||||
func (NilMeter) Rate1() float64 { return 0.0 } |
||||
|
||||
// Rate5 is a no-op.
|
||||
func (NilMeter) Rate5() float64 { return 0.0 } |
||||
|
||||
// Rate15is a no-op.
|
||||
func (NilMeter) Rate15() float64 { return 0.0 } |
||||
|
||||
// RateMean is a no-op.
|
||||
func (NilMeter) RateMean() float64 { return 0.0 } |
||||
|
||||
// Snapshot is a no-op.
|
||||
func (NilMeter) Snapshot() Metric { return NilMeter{} } |
||||
|
||||
// StandardMeter is the standard implementation of a Meter.
|
||||
type StandardMeter struct { |
||||
*MetricMeta |
||||
lock sync.RWMutex |
||||
snapshot *MeterSnapshot |
||||
a1, a5, a15 EWMA |
||||
startTime time.Time |
||||
} |
||||
|
||||
func newStandardMeter(meta *MetricMeta) *StandardMeter { |
||||
return &StandardMeter{ |
||||
MetricMeta: meta, |
||||
snapshot: &MeterSnapshot{MetricMeta: meta}, |
||||
a1: NewEWMA1(), |
||||
a5: NewEWMA5(), |
||||
a15: NewEWMA15(), |
||||
startTime: time.Now(), |
||||
} |
||||
} |
||||
|
||||
// Count returns the number of events recorded.
|
||||
func (m *StandardMeter) Count() int64 { |
||||
m.lock.RLock() |
||||
count := m.snapshot.count |
||||
m.lock.RUnlock() |
||||
return count |
||||
} |
||||
|
||||
// Mark records the occurrence of n events.
|
||||
func (m *StandardMeter) Mark(n int64) { |
||||
m.lock.Lock() |
||||
defer m.lock.Unlock() |
||||
m.snapshot.count += n |
||||
m.a1.Update(n) |
||||
m.a5.Update(n) |
||||
m.a15.Update(n) |
||||
m.updateSnapshot() |
||||
} |
||||
|
||||
// Rate1 returns the one-minute moving average rate of events per second.
|
||||
func (m *StandardMeter) Rate1() float64 { |
||||
m.lock.RLock() |
||||
rate1 := m.snapshot.rate1 |
||||
m.lock.RUnlock() |
||||
return rate1 |
||||
} |
||||
|
||||
// Rate5 returns the five-minute moving average rate of events per second.
|
||||
func (m *StandardMeter) Rate5() float64 { |
||||
m.lock.RLock() |
||||
rate5 := m.snapshot.rate5 |
||||
m.lock.RUnlock() |
||||
return rate5 |
||||
} |
||||
|
||||
// Rate15 returns the fifteen-minute moving average rate of events per second.
|
||||
func (m *StandardMeter) Rate15() float64 { |
||||
m.lock.RLock() |
||||
rate15 := m.snapshot.rate15 |
||||
m.lock.RUnlock() |
||||
return rate15 |
||||
} |
||||
|
||||
// RateMean returns the meter's mean rate of events per second.
|
||||
func (m *StandardMeter) RateMean() float64 { |
||||
m.lock.RLock() |
||||
rateMean := m.snapshot.rateMean |
||||
m.lock.RUnlock() |
||||
return rateMean |
||||
} |
||||
|
||||
// Snapshot returns a read-only copy of the meter.
|
||||
func (m *StandardMeter) Snapshot() Metric { |
||||
m.lock.RLock() |
||||
snapshot := *m.snapshot |
||||
m.lock.RUnlock() |
||||
return &snapshot |
||||
} |
||||
|
||||
func (m *StandardMeter) updateSnapshot() { |
||||
// should run with write lock held on m.lock
|
||||
snapshot := m.snapshot |
||||
snapshot.rate1 = m.a1.Rate() |
||||
snapshot.rate5 = m.a5.Rate() |
||||
snapshot.rate15 = m.a15.Rate() |
||||
snapshot.rateMean = float64(snapshot.count) / time.Since(m.startTime).Seconds() |
||||
} |
||||
|
||||
func (m *StandardMeter) tick() { |
||||
m.lock.Lock() |
||||
defer m.lock.Unlock() |
||||
m.a1.Tick() |
||||
m.a5.Tick() |
||||
m.a15.Tick() |
||||
m.updateSnapshot() |
||||
} |
||||
|
||||
type meterArbiter struct { |
||||
sync.RWMutex |
||||
started bool |
||||
meters []*StandardMeter |
||||
ticker *time.Ticker |
||||
} |
||||
|
||||
var arbiter = meterArbiter{ticker: time.NewTicker(5e9)} |
||||
|
||||
// Ticks meters on the scheduled interval
|
||||
func (ma *meterArbiter) tick() { |
||||
for { |
||||
select { |
||||
case <-ma.ticker.C: |
||||
ma.tickMeters() |
||||
} |
||||
} |
||||
} |
||||
|
||||
func (ma *meterArbiter) tickMeters() { |
||||
ma.RLock() |
||||
defer ma.RUnlock() |
||||
for _, meter := range ma.meters { |
||||
meter.tick() |
||||
} |
||||
} |
||||
@ -1,151 +1,398 @@ |
||||
package metrics |
||||
|
||||
var MetricStats Registry |
||||
var UseNilMetrics bool |
||||
import ( |
||||
"bytes" |
||||
"encoding/json" |
||||
"net/http" |
||||
"runtime" |
||||
"strings" |
||||
"time" |
||||
|
||||
func init() { |
||||
// init with nil metrics
|
||||
initMetricVars(&MetricSettings{}) |
||||
} |
||||
"github.com/grafana/grafana/pkg/bus" |
||||
"github.com/grafana/grafana/pkg/models" |
||||
"github.com/grafana/grafana/pkg/plugins" |
||||
"github.com/grafana/grafana/pkg/setting" |
||||
"github.com/prometheus/client_golang/prometheus" |
||||
) |
||||
|
||||
const exporterName = "grafana" |
||||
|
||||
var ( |
||||
M_Instance_Start Counter |
||||
M_Page_Status_200 Counter |
||||
M_Page_Status_500 Counter |
||||
M_Page_Status_404 Counter |
||||
M_Page_Status_Unknown Counter |
||||
M_Api_Status_200 Counter |
||||
M_Api_Status_404 Counter |
||||
M_Api_Status_500 Counter |
||||
M_Api_Status_Unknown Counter |
||||
M_Proxy_Status_200 Counter |
||||
M_Proxy_Status_404 Counter |
||||
M_Proxy_Status_500 Counter |
||||
M_Proxy_Status_Unknown Counter |
||||
M_Api_User_SignUpStarted Counter |
||||
M_Api_User_SignUpCompleted Counter |
||||
M_Api_User_SignUpInvite Counter |
||||
M_Api_Dashboard_Save Timer |
||||
M_Api_Dashboard_Get Timer |
||||
M_Api_Dashboard_Search Timer |
||||
M_Api_Admin_User_Create Counter |
||||
M_Api_Login_Post Counter |
||||
M_Api_Login_OAuth Counter |
||||
M_Api_Org_Create Counter |
||||
M_Api_Dashboard_Snapshot_Create Counter |
||||
M_Api_Dashboard_Snapshot_External Counter |
||||
M_Api_Dashboard_Snapshot_Get Counter |
||||
M_Api_UserGroup_Create Counter |
||||
M_Api_Dashboard_Acl_Update Counter |
||||
M_Models_Dashboard_Insert Counter |
||||
M_Alerting_Result_State_Alerting Counter |
||||
M_Alerting_Result_State_Ok Counter |
||||
M_Alerting_Result_State_Paused Counter |
||||
M_Alerting_Result_State_NoData Counter |
||||
M_Alerting_Result_State_Pending Counter |
||||
M_Alerting_Notification_Sent_Slack Counter |
||||
M_Alerting_Notification_Sent_Email Counter |
||||
M_Alerting_Notification_Sent_Webhook Counter |
||||
M_Alerting_Notification_Sent_DingDing Counter |
||||
M_Alerting_Notification_Sent_PagerDuty Counter |
||||
M_Alerting_Notification_Sent_LINE Counter |
||||
M_Alerting_Notification_Sent_Victorops Counter |
||||
M_Alerting_Notification_Sent_OpsGenie Counter |
||||
M_Alerting_Notification_Sent_Telegram Counter |
||||
M_Alerting_Notification_Sent_Threema Counter |
||||
M_Alerting_Notification_Sent_Sensu Counter |
||||
M_Alerting_Notification_Sent_Pushover Counter |
||||
M_Aws_CloudWatch_GetMetricStatistics Counter |
||||
M_Aws_CloudWatch_ListMetrics Counter |
||||
M_DB_DataSource_QueryById Counter |
||||
M_Instance_Start prometheus.Counter |
||||
M_Page_Status *prometheus.CounterVec |
||||
M_Api_Status *prometheus.CounterVec |
||||
M_Proxy_Status *prometheus.CounterVec |
||||
M_Http_Request_Total *prometheus.CounterVec |
||||
M_Http_Request_Summary *prometheus.SummaryVec |
||||
|
||||
M_Api_User_SignUpStarted prometheus.Counter |
||||
M_Api_User_SignUpCompleted prometheus.Counter |
||||
M_Api_User_SignUpInvite prometheus.Counter |
||||
M_Api_Dashboard_Save prometheus.Summary |
||||
M_Api_Dashboard_Get prometheus.Summary |
||||
M_Api_Dashboard_Search prometheus.Summary |
||||
M_Api_Admin_User_Create prometheus.Counter |
||||
M_Api_Login_Post prometheus.Counter |
||||
M_Api_Login_OAuth prometheus.Counter |
||||
M_Api_Org_Create prometheus.Counter |
||||
|
||||
M_Api_Dashboard_Snapshot_Create prometheus.Counter |
||||
M_Api_Dashboard_Snapshot_External prometheus.Counter |
||||
M_Api_Dashboard_Snapshot_Get prometheus.Counter |
||||
M_Api_Dashboard_Insert prometheus.Counter |
||||
M_Alerting_Result_State *prometheus.CounterVec |
||||
M_Alerting_Notification_Sent *prometheus.CounterVec |
||||
M_Aws_CloudWatch_GetMetricStatistics prometheus.Counter |
||||
M_Aws_CloudWatch_ListMetrics prometheus.Counter |
||||
M_DB_DataSource_QueryById prometheus.Counter |
||||
|
||||
// Timers
|
||||
M_DataSource_ProxyReq_Timer Timer |
||||
M_Alerting_Execution_Time Timer |
||||
M_DataSource_ProxyReq_Timer prometheus.Summary |
||||
M_Alerting_Execution_Time prometheus.Summary |
||||
|
||||
// StatTotals
|
||||
M_Alerting_Active_Alerts Gauge |
||||
M_StatTotal_Dashboards Gauge |
||||
M_StatTotal_Users Gauge |
||||
M_StatTotal_Orgs Gauge |
||||
M_StatTotal_Playlists Gauge |
||||
M_Alerting_Active_Alerts prometheus.Gauge |
||||
M_StatTotal_Dashboards prometheus.Gauge |
||||
M_StatTotal_Users prometheus.Gauge |
||||
M_StatTotal_Orgs prometheus.Gauge |
||||
M_StatTotal_Playlists prometheus.Gauge |
||||
) |
||||
|
||||
func init() { |
||||
M_Instance_Start = prometheus.NewCounter(prometheus.CounterOpts{ |
||||
Name: "instance_start_total", |
||||
Help: "counter for started instances", |
||||
Namespace: exporterName, |
||||
}) |
||||
|
||||
M_Page_Status = prometheus.NewCounterVec( |
||||
prometheus.CounterOpts{ |
||||
Name: "page_response_status_total", |
||||
Help: "page http response status", |
||||
Namespace: exporterName, |
||||
}, |
||||
[]string{"code"}, |
||||
) |
||||
|
||||
M_Api_Status = prometheus.NewCounterVec( |
||||
prometheus.CounterOpts{ |
||||
Name: "api_response_status_total", |
||||
Help: "api http response status", |
||||
Namespace: exporterName, |
||||
}, |
||||
[]string{"code"}, |
||||
) |
||||
|
||||
M_Proxy_Status = prometheus.NewCounterVec( |
||||
prometheus.CounterOpts{ |
||||
Name: "proxy_response_status_total", |
||||
Help: "proxy http response status", |
||||
Namespace: exporterName, |
||||
}, |
||||
[]string{"code"}, |
||||
) |
||||
|
||||
M_Http_Request_Total = prometheus.NewCounterVec( |
||||
prometheus.CounterOpts{ |
||||
Name: "http_request_total", |
||||
Help: "http request counter", |
||||
}, |
||||
[]string{"handler", "statuscode", "method"}, |
||||
) |
||||
|
||||
M_Http_Request_Summary = prometheus.NewSummaryVec( |
||||
prometheus.SummaryOpts{ |
||||
Name: "http_request_duration_milleseconds", |
||||
Help: "http request summary", |
||||
}, |
||||
[]string{"handler", "statuscode", "method"}, |
||||
) |
||||
|
||||
M_Api_User_SignUpStarted = prometheus.NewCounter(prometheus.CounterOpts{ |
||||
Name: "api_user_signup_started_total", |
||||
Help: "amount of users who started the signup flow", |
||||
Namespace: exporterName, |
||||
}) |
||||
|
||||
M_Api_User_SignUpCompleted = prometheus.NewCounter(prometheus.CounterOpts{ |
||||
Name: "api_user_signup_completed_total", |
||||
Help: "amount of users who completed the signup flow", |
||||
Namespace: exporterName, |
||||
}) |
||||
|
||||
M_Api_User_SignUpInvite = prometheus.NewCounter(prometheus.CounterOpts{ |
||||
Name: "api_user_signup_invite_total", |
||||
Help: "amount of users who have been invited", |
||||
Namespace: exporterName, |
||||
}) |
||||
|
||||
M_Api_Dashboard_Save = prometheus.NewSummary(prometheus.SummaryOpts{ |
||||
Name: "api_dashboard_save_milleseconds", |
||||
Help: "summary for dashboard save duration", |
||||
Namespace: exporterName, |
||||
}) |
||||
|
||||
M_Api_Dashboard_Get = prometheus.NewSummary(prometheus.SummaryOpts{ |
||||
Name: "api_dashboard_get_milleseconds", |
||||
Help: "summary for dashboard get duration", |
||||
Namespace: exporterName, |
||||
}) |
||||
|
||||
M_Api_Dashboard_Search = prometheus.NewSummary(prometheus.SummaryOpts{ |
||||
Name: "api_dashboard_search_milleseconds", |
||||
Help: "summary for dashboard search duration", |
||||
Namespace: exporterName, |
||||
}) |
||||
|
||||
M_Api_Admin_User_Create = prometheus.NewCounter(prometheus.CounterOpts{ |
||||
Name: "api_admin_user_created_total", |
||||
Help: "api admin user created counter", |
||||
Namespace: exporterName, |
||||
}) |
||||
|
||||
M_Api_Login_Post = prometheus.NewCounter(prometheus.CounterOpts{ |
||||
Name: "api_login_post_total", |
||||
Help: "api login post counter", |
||||
Namespace: exporterName, |
||||
}) |
||||
|
||||
M_Api_Login_OAuth = prometheus.NewCounter(prometheus.CounterOpts{ |
||||
Name: "api_login_oauth_total", |
||||
Help: "api login oauth counter", |
||||
Namespace: exporterName, |
||||
}) |
||||
|
||||
M_Api_Org_Create = prometheus.NewCounter(prometheus.CounterOpts{ |
||||
Name: "api_org_create_total", |
||||
Help: "api org created counter", |
||||
Namespace: exporterName, |
||||
}) |
||||
|
||||
M_Api_Dashboard_Snapshot_Create = prometheus.NewCounter(prometheus.CounterOpts{ |
||||
Name: "api_dashboard_snapshot_create_total", |
||||
Help: "dashboard snapshots created", |
||||
Namespace: exporterName, |
||||
}) |
||||
|
||||
M_Api_Dashboard_Snapshot_External = prometheus.NewCounter(prometheus.CounterOpts{ |
||||
Name: "api_dashboard_snapshot_external_total", |
||||
Help: "external dashboard snapshots created", |
||||
Namespace: exporterName, |
||||
}) |
||||
|
||||
M_Api_Dashboard_Snapshot_Get = prometheus.NewCounter(prometheus.CounterOpts{ |
||||
Name: "api_dashboard_snapshot_get_total", |
||||
Help: "loaded dashboards", |
||||
Namespace: exporterName, |
||||
}) |
||||
|
||||
M_Api_Dashboard_Insert = prometheus.NewCounter(prometheus.CounterOpts{ |
||||
Name: "api_models_dashboard_insert_total", |
||||
Help: "dashboards inserted ", |
||||
Namespace: exporterName, |
||||
}) |
||||
|
||||
M_Alerting_Result_State = prometheus.NewCounterVec(prometheus.CounterOpts{ |
||||
Name: "alerting_result_total", |
||||
Help: "alert execution result counter", |
||||
Namespace: exporterName, |
||||
}, []string{"state"}) |
||||
|
||||
M_Alerting_Notification_Sent = prometheus.NewCounterVec(prometheus.CounterOpts{ |
||||
Name: "alerting_notification_sent_total", |
||||
Help: "counter for how many alert notifications been sent", |
||||
Namespace: exporterName, |
||||
}, []string{"type"}) |
||||
|
||||
M_Aws_CloudWatch_GetMetricStatistics = prometheus.NewCounter(prometheus.CounterOpts{ |
||||
Name: "aws_cloudwatch_get_metric_statistics_total", |
||||
Help: "counter for getting metric statistics from aws", |
||||
Namespace: exporterName, |
||||
}) |
||||
|
||||
M_Aws_CloudWatch_ListMetrics = prometheus.NewCounter(prometheus.CounterOpts{ |
||||
Name: "aws_cloudwatch_list_metrics_total", |
||||
Help: "counter for getting list of metrics from aws", |
||||
Namespace: exporterName, |
||||
}) |
||||
|
||||
M_DB_DataSource_QueryById = prometheus.NewCounter(prometheus.CounterOpts{ |
||||
Name: "db_datasource_query_by_id_total", |
||||
Help: "counter for getting datasource by id", |
||||
Namespace: exporterName, |
||||
}) |
||||
|
||||
M_DataSource_ProxyReq_Timer = prometheus.NewSummary(prometheus.SummaryOpts{ |
||||
Name: "api_dataproxy_request_all_milleseconds", |
||||
Help: "summary for dashboard search duration", |
||||
Namespace: exporterName, |
||||
}) |
||||
|
||||
M_Alerting_Execution_Time = prometheus.NewSummary(prometheus.SummaryOpts{ |
||||
Name: "alerting_execution_time_milliseconds", |
||||
Help: "summary of alert exeuction duration", |
||||
Namespace: exporterName, |
||||
}) |
||||
|
||||
M_Alerting_Active_Alerts = prometheus.NewGauge(prometheus.GaugeOpts{ |
||||
Name: "alerting_active_alerts", |
||||
Help: "amount of active alerts", |
||||
Namespace: exporterName, |
||||
}) |
||||
|
||||
M_StatTotal_Dashboards = prometheus.NewGauge(prometheus.GaugeOpts{ |
||||
Name: "stat_totals_dashboard", |
||||
Help: "total amount of dashboards", |
||||
Namespace: exporterName, |
||||
}) |
||||
|
||||
M_StatTotal_Users = prometheus.NewGauge(prometheus.GaugeOpts{ |
||||
Name: "stat_total_users", |
||||
Help: "total amount of users", |
||||
Namespace: exporterName, |
||||
}) |
||||
|
||||
M_StatTotal_Orgs = prometheus.NewGauge(prometheus.GaugeOpts{ |
||||
Name: "stat_total_orgs", |
||||
Help: "total amount of orgs", |
||||
Namespace: exporterName, |
||||
}) |
||||
|
||||
M_StatTotal_Playlists = prometheus.NewGauge(prometheus.GaugeOpts{ |
||||
Name: "stat_total_playlists", |
||||
Help: "total amount of playlists", |
||||
Namespace: exporterName, |
||||
}) |
||||
} |
||||
|
||||
func initMetricVars(settings *MetricSettings) { |
||||
UseNilMetrics = settings.Enabled == false |
||||
MetricStats = NewRegistry() |
||||
|
||||
M_Instance_Start = RegCounter("instance_start") |
||||
|
||||
M_Page_Status_200 = RegCounter("page.resp_status", "code", "200") |
||||
M_Page_Status_500 = RegCounter("page.resp_status", "code", "500") |
||||
M_Page_Status_404 = RegCounter("page.resp_status", "code", "404") |
||||
M_Page_Status_Unknown = RegCounter("page.resp_status", "code", "unknown") |
||||
|
||||
M_Api_Status_200 = RegCounter("api.resp_status", "code", "200") |
||||
M_Api_Status_404 = RegCounter("api.resp_status", "code", "404") |
||||
M_Api_Status_500 = RegCounter("api.resp_status", "code", "500") |
||||
M_Api_Status_Unknown = RegCounter("api.resp_status", "code", "unknown") |
||||
|
||||
M_Proxy_Status_200 = RegCounter("proxy.resp_status", "code", "200") |
||||
M_Proxy_Status_404 = RegCounter("proxy.resp_status", "code", "404") |
||||
M_Proxy_Status_500 = RegCounter("proxy.resp_status", "code", "500") |
||||
M_Proxy_Status_Unknown = RegCounter("proxy.resp_status", "code", "unknown") |
||||
|
||||
M_Api_User_SignUpStarted = RegCounter("api.user.signup_started") |
||||
M_Api_User_SignUpCompleted = RegCounter("api.user.signup_completed") |
||||
M_Api_User_SignUpInvite = RegCounter("api.user.signup_invite") |
||||
|
||||
M_Api_UserGroup_Create = RegCounter("api.usergroup.create") |
||||
M_Api_Dashboard_Acl_Update = RegCounter("api.dashboard.acl.update") |
||||
|
||||
M_Api_Dashboard_Save = RegTimer("api.dashboard.save") |
||||
M_Api_Dashboard_Get = RegTimer("api.dashboard.get") |
||||
M_Api_Dashboard_Search = RegTimer("api.dashboard.search") |
||||
|
||||
M_Api_Admin_User_Create = RegCounter("api.admin.user_create") |
||||
M_Api_Login_Post = RegCounter("api.login.post") |
||||
M_Api_Login_OAuth = RegCounter("api.login.oauth") |
||||
M_Api_Org_Create = RegCounter("api.org.create") |
||||
|
||||
M_Api_Dashboard_Snapshot_Create = RegCounter("api.dashboard_snapshot.create") |
||||
M_Api_Dashboard_Snapshot_External = RegCounter("api.dashboard_snapshot.external") |
||||
M_Api_Dashboard_Snapshot_Get = RegCounter("api.dashboard_snapshot.get") |
||||
|
||||
M_Models_Dashboard_Insert = RegCounter("models.dashboard.insert") |
||||
|
||||
M_Alerting_Result_State_Alerting = RegCounter("alerting.result", "state", "alerting") |
||||
M_Alerting_Result_State_Ok = RegCounter("alerting.result", "state", "ok") |
||||
M_Alerting_Result_State_Paused = RegCounter("alerting.result", "state", "paused") |
||||
M_Alerting_Result_State_NoData = RegCounter("alerting.result", "state", "no_data") |
||||
M_Alerting_Result_State_Pending = RegCounter("alerting.result", "state", "pending") |
||||
|
||||
M_Alerting_Notification_Sent_Slack = RegCounter("alerting.notifications_sent", "type", "slack") |
||||
M_Alerting_Notification_Sent_Email = RegCounter("alerting.notifications_sent", "type", "email") |
||||
M_Alerting_Notification_Sent_Webhook = RegCounter("alerting.notifications_sent", "type", "webhook") |
||||
M_Alerting_Notification_Sent_DingDing = RegCounter("alerting.notifications_sent", "type", "dingding") |
||||
M_Alerting_Notification_Sent_PagerDuty = RegCounter("alerting.notifications_sent", "type", "pagerduty") |
||||
M_Alerting_Notification_Sent_Victorops = RegCounter("alerting.notifications_sent", "type", "victorops") |
||||
M_Alerting_Notification_Sent_OpsGenie = RegCounter("alerting.notifications_sent", "type", "opsgenie") |
||||
M_Alerting_Notification_Sent_Telegram = RegCounter("alerting.notifications_sent", "type", "telegram") |
||||
M_Alerting_Notification_Sent_Threema = RegCounter("alerting.notifications_sent", "type", "threema") |
||||
M_Alerting_Notification_Sent_Sensu = RegCounter("alerting.notifications_sent", "type", "sensu") |
||||
M_Alerting_Notification_Sent_LINE = RegCounter("alerting.notifications_sent", "type", "LINE") |
||||
M_Alerting_Notification_Sent_Pushover = RegCounter("alerting.notifications_sent", "type", "pushover") |
||||
|
||||
M_Aws_CloudWatch_GetMetricStatistics = RegCounter("aws.cloudwatch.get_metric_statistics") |
||||
M_Aws_CloudWatch_ListMetrics = RegCounter("aws.cloudwatch.list_metrics") |
||||
|
||||
M_DB_DataSource_QueryById = RegCounter("db.datasource.query_by_id") |
||||
prometheus.MustRegister( |
||||
M_Instance_Start, |
||||
M_Page_Status, |
||||
M_Api_Status, |
||||
M_Proxy_Status, |
||||
M_Http_Request_Total, |
||||
M_Http_Request_Summary, |
||||
M_Api_User_SignUpStarted, |
||||
M_Api_User_SignUpCompleted, |
||||
M_Api_User_SignUpInvite, |
||||
M_Api_Dashboard_Save, |
||||
M_Api_Dashboard_Get, |
||||
M_Api_Dashboard_Search, |
||||
M_DataSource_ProxyReq_Timer, |
||||
M_Alerting_Execution_Time, |
||||
M_Api_Admin_User_Create, |
||||
M_Api_Login_Post, |
||||
M_Api_Login_OAuth, |
||||
M_Api_Org_Create, |
||||
M_Api_Dashboard_Snapshot_Create, |
||||
M_Api_Dashboard_Snapshot_External, |
||||
M_Api_Dashboard_Snapshot_Get, |
||||
M_Api_Dashboard_Insert, |
||||
M_Alerting_Result_State, |
||||
M_Alerting_Notification_Sent, |
||||
M_Aws_CloudWatch_GetMetricStatistics, |
||||
M_Aws_CloudWatch_ListMetrics, |
||||
M_DB_DataSource_QueryById, |
||||
M_Alerting_Active_Alerts, |
||||
M_StatTotal_Dashboards, |
||||
M_StatTotal_Users, |
||||
M_StatTotal_Orgs, |
||||
M_StatTotal_Playlists) |
||||
|
||||
// Timers
|
||||
M_DataSource_ProxyReq_Timer = RegTimer("api.dataproxy.request.all") |
||||
M_Alerting_Execution_Time = RegTimer("alerting.execution_time") |
||||
go instrumentationLoop(settings) |
||||
} |
||||
|
||||
// StatTotals
|
||||
M_Alerting_Active_Alerts = RegGauge("alerting.active_alerts") |
||||
M_StatTotal_Dashboards = RegGauge("stat_totals", "stat", "dashboards") |
||||
M_StatTotal_Users = RegGauge("stat_totals", "stat", "users") |
||||
M_StatTotal_Orgs = RegGauge("stat_totals", "stat", "orgs") |
||||
M_StatTotal_Playlists = RegGauge("stat_totals", "stat", "playlists") |
||||
func instrumentationLoop(settings *MetricSettings) chan struct{} { |
||||
M_Instance_Start.Inc() |
||||
|
||||
onceEveryDayTick := time.NewTicker(time.Hour * 24) |
||||
secondTicker := time.NewTicker(time.Second * time.Duration(settings.IntervalSeconds)) |
||||
|
||||
for { |
||||
select { |
||||
case <-onceEveryDayTick.C: |
||||
sendUsageStats() |
||||
case <-secondTicker.C: |
||||
updateTotalStats() |
||||
} |
||||
} |
||||
} |
||||
|
||||
var metricPublishCounter int64 = 0 |
||||
|
||||
func updateTotalStats() { |
||||
metricPublishCounter++ |
||||
if metricPublishCounter == 1 || metricPublishCounter%10 == 0 { |
||||
statsQuery := models.GetSystemStatsQuery{} |
||||
if err := bus.Dispatch(&statsQuery); err != nil { |
||||
metricsLogger.Error("Failed to get system stats", "error", err) |
||||
return |
||||
} |
||||
|
||||
M_StatTotal_Dashboards.Set(float64(statsQuery.Result.Dashboards)) |
||||
M_StatTotal_Users.Set(float64(statsQuery.Result.Users)) |
||||
M_StatTotal_Playlists.Set(float64(statsQuery.Result.Playlists)) |
||||
M_StatTotal_Orgs.Set(float64(statsQuery.Result.Orgs)) |
||||
} |
||||
} |
||||
|
||||
func sendUsageStats() { |
||||
if !setting.ReportingEnabled { |
||||
return |
||||
} |
||||
|
||||
metricsLogger.Debug("Sending anonymous usage stats to stats.grafana.org") |
||||
|
||||
version := strings.Replace(setting.BuildVersion, ".", "_", -1) |
||||
|
||||
metrics := map[string]interface{}{} |
||||
report := map[string]interface{}{ |
||||
"version": version, |
||||
"metrics": metrics, |
||||
"os": runtime.GOOS, |
||||
"arch": runtime.GOARCH, |
||||
} |
||||
|
||||
statsQuery := models.GetSystemStatsQuery{} |
||||
if err := bus.Dispatch(&statsQuery); err != nil { |
||||
metricsLogger.Error("Failed to get system stats", "error", err) |
||||
return |
||||
} |
||||
|
||||
metrics["stats.dashboards.count"] = statsQuery.Result.Dashboards |
||||
metrics["stats.users.count"] = statsQuery.Result.Users |
||||
metrics["stats.orgs.count"] = statsQuery.Result.Orgs |
||||
metrics["stats.playlist.count"] = statsQuery.Result.Playlists |
||||
metrics["stats.plugins.apps.count"] = len(plugins.Apps) |
||||
metrics["stats.plugins.panels.count"] = len(plugins.Panels) |
||||
metrics["stats.plugins.datasources.count"] = len(plugins.DataSources) |
||||
metrics["stats.alerts.count"] = statsQuery.Result.Alerts |
||||
metrics["stats.active_users.count"] = statsQuery.Result.ActiveUsers |
||||
metrics["stats.datasources.count"] = statsQuery.Result.Datasources |
||||
|
||||
dsStats := models.GetDataSourceStatsQuery{} |
||||
if err := bus.Dispatch(&dsStats); err != nil { |
||||
metricsLogger.Error("Failed to get datasource stats", "error", err) |
||||
return |
||||
} |
||||
|
||||
// send counters for each data source
|
||||
// but ignore any custom data sources
|
||||
// as sending that name could be sensitive information
|
||||
dsOtherCount := 0 |
||||
for _, dsStat := range dsStats.Result { |
||||
if models.IsKnownDataSourcePlugin(dsStat.Type) { |
||||
metrics["stats.ds."+dsStat.Type+".count"] = dsStat.Count |
||||
} else { |
||||
dsOtherCount += dsStat.Count |
||||
} |
||||
} |
||||
metrics["stats.ds.other.count"] = dsOtherCount |
||||
|
||||
out, _ := json.MarshalIndent(report, "", " ") |
||||
data := bytes.NewBuffer(out) |
||||
|
||||
client := http.Client{Timeout: time.Duration(5 * time.Second)} |
||||
go client.Post("https://stats.grafana.org/grafana-usage-report", "application/json", data) |
||||
} |
||||
|
||||
@ -1,135 +0,0 @@ |
||||
package metrics |
||||
|
||||
import ( |
||||
"bytes" |
||||
"encoding/json" |
||||
"net/http" |
||||
"runtime" |
||||
"strings" |
||||
"time" |
||||
|
||||
"github.com/grafana/grafana/pkg/bus" |
||||
"github.com/grafana/grafana/pkg/log" |
||||
m "github.com/grafana/grafana/pkg/models" |
||||
"github.com/grafana/grafana/pkg/plugins" |
||||
"github.com/grafana/grafana/pkg/setting" |
||||
) |
||||
|
||||
var metricsLogger log.Logger = log.New("metrics") |
||||
var metricPublishCounter int64 = 0 |
||||
|
||||
func Init() { |
||||
settings := readSettings() |
||||
initMetricVars(settings) |
||||
go instrumentationLoop(settings) |
||||
} |
||||
|
||||
func instrumentationLoop(settings *MetricSettings) chan struct{} { |
||||
M_Instance_Start.Inc(1) |
||||
|
||||
onceEveryDayTick := time.NewTicker(time.Hour * 24) |
||||
secondTicker := time.NewTicker(time.Second * time.Duration(settings.IntervalSeconds)) |
||||
|
||||
for { |
||||
select { |
||||
case <-onceEveryDayTick.C: |
||||
sendUsageStats() |
||||
case <-secondTicker.C: |
||||
if settings.Enabled { |
||||
sendMetrics(settings) |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
func sendMetrics(settings *MetricSettings) { |
||||
if len(settings.Publishers) == 0 { |
||||
return |
||||
} |
||||
|
||||
updateTotalStats() |
||||
|
||||
metrics := MetricStats.GetSnapshots() |
||||
for _, publisher := range settings.Publishers { |
||||
publisher.Publish(metrics) |
||||
} |
||||
} |
||||
|
||||
func updateTotalStats() { |
||||
|
||||
// every interval also publish totals
|
||||
metricPublishCounter++ |
||||
if metricPublishCounter%10 == 0 { |
||||
// get stats
|
||||
statsQuery := m.GetSystemStatsQuery{} |
||||
if err := bus.Dispatch(&statsQuery); err != nil { |
||||
metricsLogger.Error("Failed to get system stats", "error", err) |
||||
return |
||||
} |
||||
|
||||
M_StatTotal_Dashboards.Update(statsQuery.Result.Dashboards) |
||||
M_StatTotal_Users.Update(statsQuery.Result.Users) |
||||
M_StatTotal_Playlists.Update(statsQuery.Result.Playlists) |
||||
M_StatTotal_Orgs.Update(statsQuery.Result.Orgs) |
||||
} |
||||
} |
||||
|
||||
func sendUsageStats() { |
||||
if !setting.ReportingEnabled { |
||||
return |
||||
} |
||||
|
||||
metricsLogger.Debug("Sending anonymous usage stats to stats.grafana.org") |
||||
|
||||
version := strings.Replace(setting.BuildVersion, ".", "_", -1) |
||||
|
||||
metrics := map[string]interface{}{} |
||||
report := map[string]interface{}{ |
||||
"version": version, |
||||
"metrics": metrics, |
||||
"os": runtime.GOOS, |
||||
"arch": runtime.GOARCH, |
||||
} |
||||
|
||||
statsQuery := m.GetSystemStatsQuery{} |
||||
if err := bus.Dispatch(&statsQuery); err != nil { |
||||
metricsLogger.Error("Failed to get system stats", "error", err) |
||||
return |
||||
} |
||||
|
||||
metrics["stats.dashboards.count"] = statsQuery.Result.Dashboards |
||||
metrics["stats.users.count"] = statsQuery.Result.Users |
||||
metrics["stats.orgs.count"] = statsQuery.Result.Orgs |
||||
metrics["stats.playlist.count"] = statsQuery.Result.Playlists |
||||
metrics["stats.plugins.apps.count"] = len(plugins.Apps) |
||||
metrics["stats.plugins.panels.count"] = len(plugins.Panels) |
||||
metrics["stats.plugins.datasources.count"] = len(plugins.DataSources) |
||||
metrics["stats.alerts.count"] = statsQuery.Result.Alerts |
||||
metrics["stats.active_users.count"] = statsQuery.Result.ActiveUsers |
||||
metrics["stats.datasources.count"] = statsQuery.Result.Datasources |
||||
|
||||
dsStats := m.GetDataSourceStatsQuery{} |
||||
if err := bus.Dispatch(&dsStats); err != nil { |
||||
metricsLogger.Error("Failed to get datasource stats", "error", err) |
||||
return |
||||
} |
||||
|
||||
// send counters for each data source
|
||||
// but ignore any custom data sources
|
||||
// as sending that name could be sensitive information
|
||||
dsOtherCount := 0 |
||||
for _, dsStat := range dsStats.Result { |
||||
if m.IsKnownDataSourcePlugin(dsStat.Type) { |
||||
metrics["stats.ds."+dsStat.Type+".count"] = dsStat.Count |
||||
} else { |
||||
dsOtherCount += dsStat.Count |
||||
} |
||||
} |
||||
metrics["stats.ds.other.count"] = dsOtherCount |
||||
|
||||
out, _ := json.MarshalIndent(report, "", " ") |
||||
data := bytes.NewBuffer(out) |
||||
|
||||
client := http.Client{Timeout: time.Duration(5 * time.Second)} |
||||
go client.Post("https://stats.grafana.org/grafana-usage-report", "application/json", data) |
||||
} |
||||
@ -1,37 +0,0 @@ |
||||
package metrics |
||||
|
||||
import "sync" |
||||
|
||||
type Registry interface { |
||||
GetSnapshots() []Metric |
||||
Register(metric Metric) |
||||
} |
||||
|
||||
// The standard implementation of a Registry is a mutex-protected map
|
||||
// of names to metrics.
|
||||
type StandardRegistry struct { |
||||
metrics []Metric |
||||
mutex sync.Mutex |
||||
} |
||||
|
||||
// Create a new registry.
|
||||
func NewRegistry() Registry { |
||||
return &StandardRegistry{ |
||||
metrics: make([]Metric, 0), |
||||
} |
||||
} |
||||
|
||||
func (r *StandardRegistry) Register(metric Metric) { |
||||
r.mutex.Lock() |
||||
defer r.mutex.Unlock() |
||||
r.metrics = append(r.metrics, metric) |
||||
} |
||||
|
||||
// Call the given function for each registered metric.
|
||||
func (r *StandardRegistry) GetSnapshots() []Metric { |
||||
metrics := make([]Metric, len(r.metrics)) |
||||
for i, metric := range r.metrics { |
||||
metrics[i] = metric.Snapshot() |
||||
} |
||||
return metrics |
||||
} |
||||
@ -1,607 +0,0 @@ |
||||
// includes code from
|
||||
// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
|
||||
// Copyright 2012 Richard Crowley. All rights reserved.
|
||||
|
||||
package metrics |
||||
|
||||
import ( |
||||
"math" |
||||
"math/rand" |
||||
"sort" |
||||
"sync" |
||||
"time" |
||||
) |
||||
|
||||
const rescaleThreshold = time.Hour |
||||
|
||||
// Samples maintain a statistically-significant selection of values from
|
||||
// a stream.
|
||||
type Sample interface { |
||||
Clear() |
||||
Count() int64 |
||||
Max() int64 |
||||
Mean() float64 |
||||
Min() int64 |
||||
Percentile(float64) float64 |
||||
Percentiles([]float64) []float64 |
||||
Size() int |
||||
Snapshot() Sample |
||||
StdDev() float64 |
||||
Sum() int64 |
||||
Update(int64) |
||||
Values() []int64 |
||||
Variance() float64 |
||||
} |
||||
|
||||
// ExpDecaySample is an exponentially-decaying sample using a forward-decaying
|
||||
// priority reservoir. See Cormode et al's "Forward Decay: A Practical Time
|
||||
// Decay Model for Streaming Systems".
|
||||
//
|
||||
// <http://www.research.att.com/people/Cormode_Graham/library/publications/CormodeShkapenyukSrivastavaXu09.pdf>
|
||||
type ExpDecaySample struct { |
||||
alpha float64 |
||||
count int64 |
||||
mutex sync.Mutex |
||||
reservoirSize int |
||||
t0, t1 time.Time |
||||
values *expDecaySampleHeap |
||||
} |
||||
|
||||
// NewExpDecaySample constructs a new exponentially-decaying sample with the
|
||||
// given reservoir size and alpha.
|
||||
func NewExpDecaySample(reservoirSize int, alpha float64) Sample { |
||||
s := &ExpDecaySample{ |
||||
alpha: alpha, |
||||
reservoirSize: reservoirSize, |
||||
t0: time.Now(), |
||||
values: newExpDecaySampleHeap(reservoirSize), |
||||
} |
||||
s.t1 = s.t0.Add(rescaleThreshold) |
||||
return s |
||||
} |
||||
|
||||
// Clear clears all samples.
|
||||
func (s *ExpDecaySample) Clear() { |
||||
s.mutex.Lock() |
||||
defer s.mutex.Unlock() |
||||
s.count = 0 |
||||
s.t0 = time.Now() |
||||
s.t1 = s.t0.Add(rescaleThreshold) |
||||
s.values.Clear() |
||||
} |
||||
|
||||
// Count returns the number of samples recorded, which may exceed the
|
||||
// reservoir size.
|
||||
func (s *ExpDecaySample) Count() int64 { |
||||
s.mutex.Lock() |
||||
defer s.mutex.Unlock() |
||||
return s.count |
||||
} |
||||
|
||||
// Max returns the maximum value in the sample, which may not be the maximum
|
||||
// value ever to be part of the sample.
|
||||
func (s *ExpDecaySample) Max() int64 { |
||||
return SampleMax(s.Values()) |
||||
} |
||||
|
||||
// Mean returns the mean of the values in the sample.
|
||||
func (s *ExpDecaySample) Mean() float64 { |
||||
return SampleMean(s.Values()) |
||||
} |
||||
|
||||
// Min returns the minimum value in the sample, which may not be the minimum
|
||||
// value ever to be part of the sample.
|
||||
func (s *ExpDecaySample) Min() int64 { |
||||
return SampleMin(s.Values()) |
||||
} |
||||
|
||||
// Percentile returns an arbitrary percentile of values in the sample.
|
||||
func (s *ExpDecaySample) Percentile(p float64) float64 { |
||||
return SamplePercentile(s.Values(), p) |
||||
} |
||||
|
||||
// Percentiles returns a slice of arbitrary percentiles of values in the
|
||||
// sample.
|
||||
func (s *ExpDecaySample) Percentiles(ps []float64) []float64 { |
||||
return SamplePercentiles(s.Values(), ps) |
||||
} |
||||
|
||||
// Size returns the size of the sample, which is at most the reservoir size.
|
||||
func (s *ExpDecaySample) Size() int { |
||||
s.mutex.Lock() |
||||
defer s.mutex.Unlock() |
||||
return s.values.Size() |
||||
} |
||||
|
||||
// Snapshot returns a read-only copy of the sample.
|
||||
func (s *ExpDecaySample) Snapshot() Sample { |
||||
s.mutex.Lock() |
||||
defer s.mutex.Unlock() |
||||
vals := s.values.Values() |
||||
values := make([]int64, len(vals)) |
||||
for i, v := range vals { |
||||
values[i] = v.v |
||||
} |
||||
return &SampleSnapshot{ |
||||
count: s.count, |
||||
values: values, |
||||
} |
||||
} |
||||
|
||||
// StdDev returns the standard deviation of the values in the sample.
|
||||
func (s *ExpDecaySample) StdDev() float64 { |
||||
return SampleStdDev(s.Values()) |
||||
} |
||||
|
||||
// Sum returns the sum of the values in the sample.
|
||||
func (s *ExpDecaySample) Sum() int64 { |
||||
return SampleSum(s.Values()) |
||||
} |
||||
|
||||
// Update samples a new value.
|
||||
func (s *ExpDecaySample) Update(v int64) { |
||||
s.update(time.Now(), v) |
||||
} |
||||
|
||||
// Values returns a copy of the values in the sample.
|
||||
func (s *ExpDecaySample) Values() []int64 { |
||||
s.mutex.Lock() |
||||
defer s.mutex.Unlock() |
||||
vals := s.values.Values() |
||||
values := make([]int64, len(vals)) |
||||
for i, v := range vals { |
||||
values[i] = v.v |
||||
} |
||||
return values |
||||
} |
||||
|
||||
// Variance returns the variance of the values in the sample.
|
||||
func (s *ExpDecaySample) Variance() float64 { |
||||
return SampleVariance(s.Values()) |
||||
} |
||||
|
||||
// update samples a new value at a particular timestamp. This is a method all
|
||||
// its own to facilitate testing.
|
||||
func (s *ExpDecaySample) update(t time.Time, v int64) { |
||||
s.mutex.Lock() |
||||
defer s.mutex.Unlock() |
||||
s.count++ |
||||
if s.values.Size() == s.reservoirSize { |
||||
s.values.Pop() |
||||
} |
||||
s.values.Push(expDecaySample{ |
||||
k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / rand.Float64(), |
||||
v: v, |
||||
}) |
||||
if t.After(s.t1) { |
||||
values := s.values.Values() |
||||
t0 := s.t0 |
||||
s.values.Clear() |
||||
s.t0 = t |
||||
s.t1 = s.t0.Add(rescaleThreshold) |
||||
for _, v := range values { |
||||
v.k = v.k * math.Exp(-s.alpha*s.t0.Sub(t0).Seconds()) |
||||
s.values.Push(v) |
||||
} |
||||
} |
||||
} |
||||
|
||||
// NilSample is a no-op Sample.
|
||||
type NilSample struct{} |
||||
|
||||
// Clear is a no-op.
|
||||
func (NilSample) Clear() {} |
||||
|
||||
// Count is a no-op.
|
||||
func (NilSample) Count() int64 { return 0 } |
||||
|
||||
// Max is a no-op.
|
||||
func (NilSample) Max() int64 { return 0 } |
||||
|
||||
// Mean is a no-op.
|
||||
func (NilSample) Mean() float64 { return 0.0 } |
||||
|
||||
// Min is a no-op.
|
||||
func (NilSample) Min() int64 { return 0 } |
||||
|
||||
// Percentile is a no-op.
|
||||
func (NilSample) Percentile(p float64) float64 { return 0.0 } |
||||
|
||||
// Percentiles is a no-op.
|
||||
func (NilSample) Percentiles(ps []float64) []float64 { |
||||
return make([]float64, len(ps)) |
||||
} |
||||
|
||||
// Size is a no-op.
|
||||
func (NilSample) Size() int { return 0 } |
||||
|
||||
// Sample is a no-op.
|
||||
func (NilSample) Snapshot() Sample { return NilSample{} } |
||||
|
||||
// StdDev is a no-op.
|
||||
func (NilSample) StdDev() float64 { return 0.0 } |
||||
|
||||
// Sum is a no-op.
|
||||
func (NilSample) Sum() int64 { return 0 } |
||||
|
||||
// Update is a no-op.
|
||||
func (NilSample) Update(v int64) {} |
||||
|
||||
// Values is a no-op.
|
||||
func (NilSample) Values() []int64 { return []int64{} } |
||||
|
||||
// Variance is a no-op.
|
||||
func (NilSample) Variance() float64 { return 0.0 } |
||||
|
||||
// SampleMax returns the maximum value of the slice of int64.
|
||||
func SampleMax(values []int64) int64 { |
||||
if 0 == len(values) { |
||||
return 0 |
||||
} |
||||
var max int64 = math.MinInt64 |
||||
for _, v := range values { |
||||
if max < v { |
||||
max = v |
||||
} |
||||
} |
||||
return max |
||||
} |
||||
|
||||
// SampleMean returns the mean value of the slice of int64.
|
||||
func SampleMean(values []int64) float64 { |
||||
if 0 == len(values) { |
||||
return 0.0 |
||||
} |
||||
return float64(SampleSum(values)) / float64(len(values)) |
||||
} |
||||
|
||||
// SampleMin returns the minimum value of the slice of int64.
|
||||
func SampleMin(values []int64) int64 { |
||||
if 0 == len(values) { |
||||
return 0 |
||||
} |
||||
var min int64 = math.MaxInt64 |
||||
for _, v := range values { |
||||
if min > v { |
||||
min = v |
||||
} |
||||
} |
||||
return min |
||||
} |
||||
|
||||
// SamplePercentiles returns an arbitrary percentile of the slice of int64.
|
||||
func SamplePercentile(values int64Slice, p float64) float64 { |
||||
return SamplePercentiles(values, []float64{p})[0] |
||||
} |
||||
|
||||
// SamplePercentiles returns a slice of arbitrary percentiles of the slice of
|
||||
// int64.
|
||||
func SamplePercentiles(values int64Slice, ps []float64) []float64 { |
||||
scores := make([]float64, len(ps)) |
||||
size := len(values) |
||||
if size > 0 { |
||||
sort.Sort(values) |
||||
for i, p := range ps { |
||||
pos := p * float64(size+1) |
||||
if pos < 1.0 { |
||||
scores[i] = float64(values[0]) |
||||
} else if pos >= float64(size) { |
||||
scores[i] = float64(values[size-1]) |
||||
} else { |
||||
lower := float64(values[int(pos)-1]) |
||||
upper := float64(values[int(pos)]) |
||||
scores[i] = lower + (pos-math.Floor(pos))*(upper-lower) |
||||
} |
||||
} |
||||
} |
||||
return scores |
||||
} |
||||
|
||||
// SampleSnapshot is a read-only copy of another Sample.
|
||||
type SampleSnapshot struct { |
||||
count int64 |
||||
values []int64 |
||||
} |
||||
|
||||
// Clear panics.
|
||||
func (*SampleSnapshot) Clear() { |
||||
panic("Clear called on a SampleSnapshot") |
||||
} |
||||
|
||||
// Count returns the count of inputs at the time the snapshot was taken.
|
||||
func (s *SampleSnapshot) Count() int64 { return s.count } |
||||
|
||||
// Max returns the maximal value at the time the snapshot was taken.
|
||||
func (s *SampleSnapshot) Max() int64 { return SampleMax(s.values) } |
||||
|
||||
// Mean returns the mean value at the time the snapshot was taken.
|
||||
func (s *SampleSnapshot) Mean() float64 { return SampleMean(s.values) } |
||||
|
||||
// Min returns the minimal value at the time the snapshot was taken.
|
||||
func (s *SampleSnapshot) Min() int64 { return SampleMin(s.values) } |
||||
|
||||
// Percentile returns an arbitrary percentile of values at the time the
|
||||
// snapshot was taken.
|
||||
func (s *SampleSnapshot) Percentile(p float64) float64 { |
||||
return SamplePercentile(s.values, p) |
||||
} |
||||
|
||||
// Percentiles returns a slice of arbitrary percentiles of values at the time
|
||||
// the snapshot was taken.
|
||||
func (s *SampleSnapshot) Percentiles(ps []float64) []float64 { |
||||
return SamplePercentiles(s.values, ps) |
||||
} |
||||
|
||||
// Size returns the size of the sample at the time the snapshot was taken.
|
||||
func (s *SampleSnapshot) Size() int { return len(s.values) } |
||||
|
||||
// Snapshot returns the snapshot.
|
||||
func (s *SampleSnapshot) Snapshot() Sample { return s } |
||||
|
||||
// StdDev returns the standard deviation of values at the time the snapshot was
|
||||
// taken.
|
||||
func (s *SampleSnapshot) StdDev() float64 { return SampleStdDev(s.values) } |
||||
|
||||
// Sum returns the sum of values at the time the snapshot was taken.
|
||||
func (s *SampleSnapshot) Sum() int64 { return SampleSum(s.values) } |
||||
|
||||
// Update panics.
|
||||
func (*SampleSnapshot) Update(int64) { |
||||
panic("Update called on a SampleSnapshot") |
||||
} |
||||
|
||||
// Values returns a copy of the values in the sample.
|
||||
func (s *SampleSnapshot) Values() []int64 { |
||||
values := make([]int64, len(s.values)) |
||||
copy(values, s.values) |
||||
return values |
||||
} |
||||
|
||||
// Variance returns the variance of values at the time the snapshot was taken.
|
||||
func (s *SampleSnapshot) Variance() float64 { return SampleVariance(s.values) } |
||||
|
||||
// SampleStdDev returns the standard deviation of the slice of int64.
|
||||
func SampleStdDev(values []int64) float64 { |
||||
return math.Sqrt(SampleVariance(values)) |
||||
} |
||||
|
||||
// SampleSum returns the sum of the slice of int64.
|
||||
func SampleSum(values []int64) int64 { |
||||
var sum int64 |
||||
for _, v := range values { |
||||
sum += v |
||||
} |
||||
return sum |
||||
} |
||||
|
||||
// SampleVariance returns the variance of the slice of int64.
|
||||
func SampleVariance(values []int64) float64 { |
||||
if 0 == len(values) { |
||||
return 0.0 |
||||
} |
||||
m := SampleMean(values) |
||||
var sum float64 |
||||
for _, v := range values { |
||||
d := float64(v) - m |
||||
sum += d * d |
||||
} |
||||
return sum / float64(len(values)) |
||||
} |
||||
|
||||
// A uniform sample using Vitter's Algorithm R.
|
||||
//
|
||||
// <http://www.cs.umd.edu/~samir/498/vitter.pdf>
|
||||
type UniformSample struct { |
||||
count int64 |
||||
mutex sync.Mutex |
||||
reservoirSize int |
||||
values []int64 |
||||
} |
||||
|
||||
// NewUniformSample constructs a new uniform sample with the given reservoir
|
||||
// size.
|
||||
func NewUniformSample(reservoirSize int) Sample { |
||||
return &UniformSample{ |
||||
reservoirSize: reservoirSize, |
||||
values: make([]int64, 0, reservoirSize), |
||||
} |
||||
} |
||||
|
||||
// Clear clears all samples.
|
||||
func (s *UniformSample) Clear() { |
||||
s.mutex.Lock() |
||||
defer s.mutex.Unlock() |
||||
s.count = 0 |
||||
s.values = make([]int64, 0, s.reservoirSize) |
||||
} |
||||
|
||||
// Count returns the number of samples recorded, which may exceed the
|
||||
// reservoir size.
|
||||
func (s *UniformSample) Count() int64 { |
||||
s.mutex.Lock() |
||||
defer s.mutex.Unlock() |
||||
return s.count |
||||
} |
||||
|
||||
// Max returns the maximum value in the sample, which may not be the maximum
|
||||
// value ever to be part of the sample.
|
||||
func (s *UniformSample) Max() int64 { |
||||
s.mutex.Lock() |
||||
defer s.mutex.Unlock() |
||||
return SampleMax(s.values) |
||||
} |
||||
|
||||
// Mean returns the mean of the values in the sample.
|
||||
func (s *UniformSample) Mean() float64 { |
||||
s.mutex.Lock() |
||||
defer s.mutex.Unlock() |
||||
return SampleMean(s.values) |
||||
} |
||||
|
||||
// Min returns the minimum value in the sample, which may not be the minimum
|
||||
// value ever to be part of the sample.
|
||||
func (s *UniformSample) Min() int64 { |
||||
s.mutex.Lock() |
||||
defer s.mutex.Unlock() |
||||
return SampleMin(s.values) |
||||
} |
||||
|
||||
// Percentile returns an arbitrary percentile of values in the sample.
|
||||
func (s *UniformSample) Percentile(p float64) float64 { |
||||
s.mutex.Lock() |
||||
defer s.mutex.Unlock() |
||||
return SamplePercentile(s.values, p) |
||||
} |
||||
|
||||
// Percentiles returns a slice of arbitrary percentiles of values in the
|
||||
// sample.
|
||||
func (s *UniformSample) Percentiles(ps []float64) []float64 { |
||||
s.mutex.Lock() |
||||
defer s.mutex.Unlock() |
||||
return SamplePercentiles(s.values, ps) |
||||
} |
||||
|
||||
// Size returns the size of the sample, which is at most the reservoir size.
|
||||
func (s *UniformSample) Size() int { |
||||
s.mutex.Lock() |
||||
defer s.mutex.Unlock() |
||||
return len(s.values) |
||||
} |
||||
|
||||
// Snapshot returns a read-only copy of the sample.
|
||||
func (s *UniformSample) Snapshot() Sample { |
||||
s.mutex.Lock() |
||||
defer s.mutex.Unlock() |
||||
values := make([]int64, len(s.values)) |
||||
copy(values, s.values) |
||||
return &SampleSnapshot{ |
||||
count: s.count, |
||||
values: values, |
||||
} |
||||
} |
||||
|
||||
// StdDev returns the standard deviation of the values in the sample.
|
||||
func (s *UniformSample) StdDev() float64 { |
||||
s.mutex.Lock() |
||||
defer s.mutex.Unlock() |
||||
return SampleStdDev(s.values) |
||||
} |
||||
|
||||
// Sum returns the sum of the values in the sample.
|
||||
func (s *UniformSample) Sum() int64 { |
||||
s.mutex.Lock() |
||||
defer s.mutex.Unlock() |
||||
return SampleSum(s.values) |
||||
} |
||||
|
||||
// Update samples a new value.
|
||||
func (s *UniformSample) Update(v int64) { |
||||
s.mutex.Lock() |
||||
defer s.mutex.Unlock() |
||||
s.count++ |
||||
if len(s.values) < s.reservoirSize { |
||||
s.values = append(s.values, v) |
||||
} else { |
||||
r := rand.Int63n(s.count) |
||||
if r < int64(len(s.values)) { |
||||
s.values[int(r)] = v |
||||
} |
||||
} |
||||
} |
||||
|
||||
// Values returns a copy of the values in the sample.
|
||||
func (s *UniformSample) Values() []int64 { |
||||
s.mutex.Lock() |
||||
defer s.mutex.Unlock() |
||||
values := make([]int64, len(s.values)) |
||||
copy(values, s.values) |
||||
return values |
||||
} |
||||
|
||||
// Variance returns the variance of the values in the sample.
|
||||
func (s *UniformSample) Variance() float64 { |
||||
s.mutex.Lock() |
||||
defer s.mutex.Unlock() |
||||
return SampleVariance(s.values) |
||||
} |
||||
|
||||
// expDecaySample represents an individual sample in a heap.
|
||||
type expDecaySample struct { |
||||
k float64 |
||||
v int64 |
||||
} |
||||
|
||||
func newExpDecaySampleHeap(reservoirSize int) *expDecaySampleHeap { |
||||
return &expDecaySampleHeap{make([]expDecaySample, 0, reservoirSize)} |
||||
} |
||||
|
||||
// expDecaySampleHeap is a min-heap of expDecaySamples.
|
||||
// The internal implementation is copied from the standard library's container/heap
|
||||
type expDecaySampleHeap struct { |
||||
s []expDecaySample |
||||
} |
||||
|
||||
func (h *expDecaySampleHeap) Clear() { |
||||
h.s = h.s[:0] |
||||
} |
||||
|
||||
func (h *expDecaySampleHeap) Push(s expDecaySample) { |
||||
n := len(h.s) |
||||
h.s = h.s[0 : n+1] |
||||
h.s[n] = s |
||||
h.up(n) |
||||
} |
||||
|
||||
func (h *expDecaySampleHeap) Pop() expDecaySample { |
||||
n := len(h.s) - 1 |
||||
h.s[0], h.s[n] = h.s[n], h.s[0] |
||||
h.down(0, n) |
||||
|
||||
n = len(h.s) |
||||
s := h.s[n-1] |
||||
h.s = h.s[0 : n-1] |
||||
return s |
||||
} |
||||
|
||||
func (h *expDecaySampleHeap) Size() int { |
||||
return len(h.s) |
||||
} |
||||
|
||||
func (h *expDecaySampleHeap) Values() []expDecaySample { |
||||
return h.s |
||||
} |
||||
|
||||
func (h *expDecaySampleHeap) up(j int) { |
||||
for { |
||||
i := (j - 1) / 2 // parent
|
||||
if i == j || !(h.s[j].k < h.s[i].k) { |
||||
break |
||||
} |
||||
h.s[i], h.s[j] = h.s[j], h.s[i] |
||||
j = i |
||||
} |
||||
} |
||||
|
||||
func (h *expDecaySampleHeap) down(i, n int) { |
||||
for { |
||||
j1 := 2*i + 1 |
||||
if j1 >= n || j1 < 0 { // j1 < 0 after int overflow
|
||||
break |
||||
} |
||||
j := j1 // left child
|
||||
if j2 := j1 + 1; j2 < n && !(h.s[j1].k < h.s[j2].k) { |
||||
j = j2 // = 2*i + 2 // right child
|
||||
} |
||||
if !(h.s[j].k < h.s[i].k) { |
||||
break |
||||
} |
||||
h.s[i], h.s[j] = h.s[j], h.s[i] |
||||
i = j |
||||
} |
||||
} |
||||
|
||||
type int64Slice []int64 |
||||
|
||||
func (p int64Slice) Len() int { return len(p) } |
||||
func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] } |
||||
func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } |
||||
@ -1,367 +0,0 @@ |
||||
// includes code from
|
||||
// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
|
||||
// Copyright 2012 Richard Crowley. All rights reserved.
|
||||
|
||||
package metrics |
||||
|
||||
import ( |
||||
"math/rand" |
||||
"runtime" |
||||
"testing" |
||||
"time" |
||||
) |
||||
|
||||
// Benchmark{Compute,Copy}{1000,1000000} demonstrate that, even for relatively
|
||||
// expensive computations like Variance, the cost of copying the Sample, as
|
||||
// approximated by a make and copy, is much greater than the cost of the
|
||||
// computation for small samples and only slightly less for large samples.
|
||||
func BenchmarkCompute1000(b *testing.B) { |
||||
s := make([]int64, 1000) |
||||
for i := 0; i < len(s); i++ { |
||||
s[i] = int64(i) |
||||
} |
||||
b.ResetTimer() |
||||
for i := 0; i < b.N; i++ { |
||||
SampleVariance(s) |
||||
} |
||||
} |
||||
func BenchmarkCompute1000000(b *testing.B) { |
||||
s := make([]int64, 1000000) |
||||
for i := 0; i < len(s); i++ { |
||||
s[i] = int64(i) |
||||
} |
||||
b.ResetTimer() |
||||
for i := 0; i < b.N; i++ { |
||||
SampleVariance(s) |
||||
} |
||||
} |
||||
func BenchmarkCopy1000(b *testing.B) { |
||||
s := make([]int64, 1000) |
||||
for i := 0; i < len(s); i++ { |
||||
s[i] = int64(i) |
||||
} |
||||
b.ResetTimer() |
||||
for i := 0; i < b.N; i++ { |
||||
sCopy := make([]int64, len(s)) |
||||
copy(sCopy, s) |
||||
} |
||||
} |
||||
func BenchmarkCopy1000000(b *testing.B) { |
||||
s := make([]int64, 1000000) |
||||
for i := 0; i < len(s); i++ { |
||||
s[i] = int64(i) |
||||
} |
||||
b.ResetTimer() |
||||
for i := 0; i < b.N; i++ { |
||||
sCopy := make([]int64, len(s)) |
||||
copy(sCopy, s) |
||||
} |
||||
} |
||||
|
||||
func BenchmarkExpDecaySample257(b *testing.B) { |
||||
benchmarkSample(b, NewExpDecaySample(257, 0.015)) |
||||
} |
||||
|
||||
func BenchmarkExpDecaySample514(b *testing.B) { |
||||
benchmarkSample(b, NewExpDecaySample(514, 0.015)) |
||||
} |
||||
|
||||
func BenchmarkExpDecaySample1028(b *testing.B) { |
||||
benchmarkSample(b, NewExpDecaySample(1028, 0.015)) |
||||
} |
||||
|
||||
func BenchmarkUniformSample257(b *testing.B) { |
||||
benchmarkSample(b, NewUniformSample(257)) |
||||
} |
||||
|
||||
func BenchmarkUniformSample514(b *testing.B) { |
||||
benchmarkSample(b, NewUniformSample(514)) |
||||
} |
||||
|
||||
func BenchmarkUniformSample1028(b *testing.B) { |
||||
benchmarkSample(b, NewUniformSample(1028)) |
||||
} |
||||
|
||||
func TestExpDecaySample10(t *testing.T) { |
||||
rand.Seed(1) |
||||
s := NewExpDecaySample(100, 0.99) |
||||
for i := 0; i < 10; i++ { |
||||
s.Update(int64(i)) |
||||
} |
||||
if size := s.Count(); 10 != size { |
||||
t.Errorf("s.Count(): 10 != %v\n", size) |
||||
} |
||||
if size := s.Size(); 10 != size { |
||||
t.Errorf("s.Size(): 10 != %v\n", size) |
||||
} |
||||
if l := len(s.Values()); 10 != l { |
||||
t.Errorf("len(s.Values()): 10 != %v\n", l) |
||||
} |
||||
for _, v := range s.Values() { |
||||
if v > 10 || v < 0 { |
||||
t.Errorf("out of range [0, 10): %v\n", v) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func TestExpDecaySample100(t *testing.T) { |
||||
rand.Seed(1) |
||||
s := NewExpDecaySample(1000, 0.01) |
||||
for i := 0; i < 100; i++ { |
||||
s.Update(int64(i)) |
||||
} |
||||
if size := s.Count(); 100 != size { |
||||
t.Errorf("s.Count(): 100 != %v\n", size) |
||||
} |
||||
if size := s.Size(); 100 != size { |
||||
t.Errorf("s.Size(): 100 != %v\n", size) |
||||
} |
||||
if l := len(s.Values()); 100 != l { |
||||
t.Errorf("len(s.Values()): 100 != %v\n", l) |
||||
} |
||||
for _, v := range s.Values() { |
||||
if v > 100 || v < 0 { |
||||
t.Errorf("out of range [0, 100): %v\n", v) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func TestExpDecaySample1000(t *testing.T) { |
||||
rand.Seed(1) |
||||
s := NewExpDecaySample(100, 0.99) |
||||
for i := 0; i < 1000; i++ { |
||||
s.Update(int64(i)) |
||||
} |
||||
if size := s.Count(); 1000 != size { |
||||
t.Errorf("s.Count(): 1000 != %v\n", size) |
||||
} |
||||
if size := s.Size(); 100 != size { |
||||
t.Errorf("s.Size(): 100 != %v\n", size) |
||||
} |
||||
if l := len(s.Values()); 100 != l { |
||||
t.Errorf("len(s.Values()): 100 != %v\n", l) |
||||
} |
||||
for _, v := range s.Values() { |
||||
if v > 1000 || v < 0 { |
||||
t.Errorf("out of range [0, 1000): %v\n", v) |
||||
} |
||||
} |
||||
} |
||||
|
||||
// This test makes sure that the sample's priority is not amplified by using
|
||||
// nanosecond duration since start rather than second duration since start.
|
||||
// The priority becomes +Inf quickly after starting if this is done,
|
||||
// effectively freezing the set of samples until a rescale step happens.
|
||||
func TestExpDecaySampleNanosecondRegression(t *testing.T) { |
||||
rand.Seed(1) |
||||
s := NewExpDecaySample(100, 0.99) |
||||
for i := 0; i < 100; i++ { |
||||
s.Update(10) |
||||
} |
||||
time.Sleep(1 * time.Millisecond) |
||||
for i := 0; i < 100; i++ { |
||||
s.Update(20) |
||||
} |
||||
v := s.Values() |
||||
avg := float64(0) |
||||
for i := 0; i < len(v); i++ { |
||||
avg += float64(v[i]) |
||||
} |
||||
avg /= float64(len(v)) |
||||
if avg > 16 || avg < 14 { |
||||
t.Errorf("out of range [14, 16]: %v\n", avg) |
||||
} |
||||
} |
||||
|
||||
func TestExpDecaySampleRescale(t *testing.T) { |
||||
s := NewExpDecaySample(2, 0.001).(*ExpDecaySample) |
||||
s.update(time.Now(), 1) |
||||
s.update(time.Now().Add(time.Hour+time.Microsecond), 1) |
||||
for _, v := range s.values.Values() { |
||||
if v.k == 0.0 { |
||||
t.Fatal("v.k == 0.0") |
||||
} |
||||
} |
||||
} |
||||
|
||||
func TestExpDecaySampleSnapshot(t *testing.T) { |
||||
now := time.Now() |
||||
rand.Seed(1) |
||||
s := NewExpDecaySample(100, 0.99) |
||||
for i := 1; i <= 10000; i++ { |
||||
s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i)) |
||||
} |
||||
snapshot := s.Snapshot() |
||||
s.Update(1) |
||||
testExpDecaySampleStatistics(t, snapshot) |
||||
} |
||||
|
||||
func TestExpDecaySampleStatistics(t *testing.T) { |
||||
now := time.Now() |
||||
rand.Seed(1) |
||||
s := NewExpDecaySample(100, 0.99) |
||||
for i := 1; i <= 10000; i++ { |
||||
s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i)) |
||||
} |
||||
testExpDecaySampleStatistics(t, s) |
||||
} |
||||
|
||||
func TestUniformSample(t *testing.T) { |
||||
rand.Seed(1) |
||||
s := NewUniformSample(100) |
||||
for i := 0; i < 1000; i++ { |
||||
s.Update(int64(i)) |
||||
} |
||||
if size := s.Count(); 1000 != size { |
||||
t.Errorf("s.Count(): 1000 != %v\n", size) |
||||
} |
||||
if size := s.Size(); 100 != size { |
||||
t.Errorf("s.Size(): 100 != %v\n", size) |
||||
} |
||||
if l := len(s.Values()); 100 != l { |
||||
t.Errorf("len(s.Values()): 100 != %v\n", l) |
||||
} |
||||
for _, v := range s.Values() { |
||||
if v > 1000 || v < 0 { |
||||
t.Errorf("out of range [0, 100): %v\n", v) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func TestUniformSampleIncludesTail(t *testing.T) { |
||||
rand.Seed(1) |
||||
s := NewUniformSample(100) |
||||
max := 100 |
||||
for i := 0; i < max; i++ { |
||||
s.Update(int64(i)) |
||||
} |
||||
v := s.Values() |
||||
sum := 0 |
||||
exp := (max - 1) * max / 2 |
||||
for i := 0; i < len(v); i++ { |
||||
sum += int(v[i]) |
||||
} |
||||
if exp != sum { |
||||
t.Errorf("sum: %v != %v\n", exp, sum) |
||||
} |
||||
} |
||||
|
||||
func TestUniformSampleSnapshot(t *testing.T) { |
||||
s := NewUniformSample(100) |
||||
for i := 1; i <= 10000; i++ { |
||||
s.Update(int64(i)) |
||||
} |
||||
snapshot := s.Snapshot() |
||||
s.Update(1) |
||||
testUniformSampleStatistics(t, snapshot) |
||||
} |
||||
|
||||
func TestUniformSampleStatistics(t *testing.T) { |
||||
rand.Seed(1) |
||||
s := NewUniformSample(100) |
||||
for i := 1; i <= 10000; i++ { |
||||
s.Update(int64(i)) |
||||
} |
||||
testUniformSampleStatistics(t, s) |
||||
} |
||||
|
||||
func benchmarkSample(b *testing.B, s Sample) { |
||||
var memStats runtime.MemStats |
||||
runtime.ReadMemStats(&memStats) |
||||
pauseTotalNs := memStats.PauseTotalNs |
||||
b.ResetTimer() |
||||
for i := 0; i < b.N; i++ { |
||||
s.Update(1) |
||||
} |
||||
b.StopTimer() |
||||
runtime.GC() |
||||
runtime.ReadMemStats(&memStats) |
||||
b.Logf("GC cost: %d ns/op", int(memStats.PauseTotalNs-pauseTotalNs)/b.N) |
||||
} |
||||
|
||||
func testExpDecaySampleStatistics(t *testing.T, s Sample) { |
||||
if count := s.Count(); 10000 != count { |
||||
t.Errorf("s.Count(): 10000 != %v\n", count) |
||||
} |
||||
if min := s.Min(); 107 != min { |
||||
t.Errorf("s.Min(): 107 != %v\n", min) |
||||
} |
||||
if max := s.Max(); 10000 != max { |
||||
t.Errorf("s.Max(): 10000 != %v\n", max) |
||||
} |
||||
if mean := s.Mean(); 4965.98 != mean { |
||||
t.Errorf("s.Mean(): 4965.98 != %v\n", mean) |
||||
} |
||||
if stdDev := s.StdDev(); 2959.825156930727 != stdDev { |
||||
t.Errorf("s.StdDev(): 2959.825156930727 != %v\n", stdDev) |
||||
} |
||||
ps := s.Percentiles([]float64{0.5, 0.75, 0.99}) |
||||
if 4615 != ps[0] { |
||||
t.Errorf("median: 4615 != %v\n", ps[0]) |
||||
} |
||||
if 7672 != ps[1] { |
||||
t.Errorf("75th percentile: 7672 != %v\n", ps[1]) |
||||
} |
||||
if 9998.99 != ps[2] { |
||||
t.Errorf("99th percentile: 9998.99 != %v\n", ps[2]) |
||||
} |
||||
} |
||||
|
||||
func testUniformSampleStatistics(t *testing.T, s Sample) { |
||||
if count := s.Count(); 10000 != count { |
||||
t.Errorf("s.Count(): 10000 != %v\n", count) |
||||
} |
||||
if min := s.Min(); 37 != min { |
||||
t.Errorf("s.Min(): 37 != %v\n", min) |
||||
} |
||||
if max := s.Max(); 9989 != max { |
||||
t.Errorf("s.Max(): 9989 != %v\n", max) |
||||
} |
||||
if mean := s.Mean(); 4748.14 != mean { |
||||
t.Errorf("s.Mean(): 4748.14 != %v\n", mean) |
||||
} |
||||
if stdDev := s.StdDev(); 2826.684117548333 != stdDev { |
||||
t.Errorf("s.StdDev(): 2826.684117548333 != %v\n", stdDev) |
||||
} |
||||
ps := s.Percentiles([]float64{0.5, 0.75, 0.99}) |
||||
if 4599 != ps[0] { |
||||
t.Errorf("median: 4599 != %v\n", ps[0]) |
||||
} |
||||
if 7380.5 != ps[1] { |
||||
t.Errorf("75th percentile: 7380.5 != %v\n", ps[1]) |
||||
} |
||||
if 9986.429999999998 != ps[2] { |
||||
t.Errorf("99th percentile: 9986.429999999998 != %v\n", ps[2]) |
||||
} |
||||
} |
||||
|
||||
// TestUniformSampleConcurrentUpdateCount would expose data race problems with
|
||||
// concurrent Update and Count calls on Sample when test is called with -race
|
||||
// argument
|
||||
func TestUniformSampleConcurrentUpdateCount(t *testing.T) { |
||||
if testing.Short() { |
||||
t.Skip("skipping in short mode") |
||||
} |
||||
s := NewUniformSample(100) |
||||
for i := 0; i < 100; i++ { |
||||
s.Update(int64(i)) |
||||
} |
||||
quit := make(chan struct{}) |
||||
go func() { |
||||
t := time.NewTicker(10 * time.Millisecond) |
||||
for { |
||||
select { |
||||
case <-t.C: |
||||
s.Update(rand.Int63()) |
||||
case <-quit: |
||||
t.Stop() |
||||
return |
||||
} |
||||
} |
||||
}() |
||||
for i := 0; i < 1000; i++ { |
||||
s.Count() |
||||
time.Sleep(5 * time.Millisecond) |
||||
} |
||||
quit <- struct{}{} |
||||
} |
||||
@ -1,310 +0,0 @@ |
||||
// includes code from
|
||||
// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
|
||||
// Copyright 2012 Richard Crowley. All rights reserved.
|
||||
|
||||
package metrics |
||||
|
||||
import ( |
||||
"sync" |
||||
"time" |
||||
) |
||||
|
||||
// Timers capture the duration and rate of events.
|
||||
type Timer interface { |
||||
Metric |
||||
|
||||
Count() int64 |
||||
Max() int64 |
||||
Mean() float64 |
||||
Min() int64 |
||||
Percentile(float64) float64 |
||||
Percentiles([]float64) []float64 |
||||
Rate1() float64 |
||||
Rate5() float64 |
||||
Rate15() float64 |
||||
RateMean() float64 |
||||
StdDev() float64 |
||||
Sum() int64 |
||||
Time(func()) |
||||
Update(time.Duration) |
||||
UpdateSince(time.Time) |
||||
Variance() float64 |
||||
} |
||||
|
||||
// NewCustomTimer constructs a new StandardTimer from a Histogram and a Meter.
|
||||
func NewCustomTimer(meta *MetricMeta, h Histogram, m Meter) Timer { |
||||
if UseNilMetrics { |
||||
return NilTimer{} |
||||
} |
||||
return &StandardTimer{ |
||||
MetricMeta: meta, |
||||
histogram: h, |
||||
meter: m, |
||||
} |
||||
} |
||||
|
||||
// NewTimer constructs a new StandardTimer using an exponentially-decaying
|
||||
// sample with the same reservoir size and alpha as UNIX load averages.
|
||||
func NewTimer(meta *MetricMeta) Timer { |
||||
if UseNilMetrics { |
||||
return NilTimer{} |
||||
} |
||||
return &StandardTimer{ |
||||
MetricMeta: meta, |
||||
histogram: NewHistogram(meta, NewExpDecaySample(1028, 0.015)), |
||||
meter: NewMeter(meta), |
||||
} |
||||
} |
||||
|
||||
func RegTimer(name string, tagStrings ...string) Timer { |
||||
tr := NewTimer(NewMetricMeta(name, tagStrings)) |
||||
MetricStats.Register(tr) |
||||
return tr |
||||
} |
||||
|
||||
// NilTimer is a no-op Timer.
|
||||
type NilTimer struct { |
||||
*MetricMeta |
||||
h Histogram |
||||
m Meter |
||||
} |
||||
|
||||
// Count is a no-op.
|
||||
func (NilTimer) Count() int64 { return 0 } |
||||
|
||||
// Max is a no-op.
|
||||
func (NilTimer) Max() int64 { return 0 } |
||||
|
||||
// Mean is a no-op.
|
||||
func (NilTimer) Mean() float64 { return 0.0 } |
||||
|
||||
// Min is a no-op.
|
||||
func (NilTimer) Min() int64 { return 0 } |
||||
|
||||
// Percentile is a no-op.
|
||||
func (NilTimer) Percentile(p float64) float64 { return 0.0 } |
||||
|
||||
// Percentiles is a no-op.
|
||||
func (NilTimer) Percentiles(ps []float64) []float64 { |
||||
return make([]float64, len(ps)) |
||||
} |
||||
|
||||
// Rate1 is a no-op.
|
||||
func (NilTimer) Rate1() float64 { return 0.0 } |
||||
|
||||
// Rate5 is a no-op.
|
||||
func (NilTimer) Rate5() float64 { return 0.0 } |
||||
|
||||
// Rate15 is a no-op.
|
||||
func (NilTimer) Rate15() float64 { return 0.0 } |
||||
|
||||
// RateMean is a no-op.
|
||||
func (NilTimer) RateMean() float64 { return 0.0 } |
||||
|
||||
// Snapshot is a no-op.
|
||||
func (n NilTimer) Snapshot() Metric { return n } |
||||
|
||||
// StdDev is a no-op.
|
||||
func (NilTimer) StdDev() float64 { return 0.0 } |
||||
|
||||
// Sum is a no-op.
|
||||
func (NilTimer) Sum() int64 { return 0 } |
||||
|
||||
// Time is a no-op.
|
||||
func (NilTimer) Time(func()) {} |
||||
|
||||
// Update is a no-op.
|
||||
func (NilTimer) Update(time.Duration) {} |
||||
|
||||
// UpdateSince is a no-op.
|
||||
func (NilTimer) UpdateSince(time.Time) {} |
||||
|
||||
// Variance is a no-op.
|
||||
func (NilTimer) Variance() float64 { return 0.0 } |
||||
|
||||
// StandardTimer is the standard implementation of a Timer and uses a Histogram
|
||||
// and Meter.
|
||||
type StandardTimer struct { |
||||
*MetricMeta |
||||
histogram Histogram |
||||
meter Meter |
||||
mutex sync.Mutex |
||||
} |
||||
|
||||
// Count returns the number of events recorded.
|
||||
func (t *StandardTimer) Count() int64 { |
||||
return t.histogram.Count() |
||||
} |
||||
|
||||
// Max returns the maximum value in the sample.
|
||||
func (t *StandardTimer) Max() int64 { |
||||
return t.histogram.Max() |
||||
} |
||||
|
||||
// Mean returns the mean of the values in the sample.
|
||||
func (t *StandardTimer) Mean() float64 { |
||||
return t.histogram.Mean() |
||||
} |
||||
|
||||
// Min returns the minimum value in the sample.
|
||||
func (t *StandardTimer) Min() int64 { |
||||
return t.histogram.Min() |
||||
} |
||||
|
||||
// Percentile returns an arbitrary percentile of the values in the sample.
|
||||
func (t *StandardTimer) Percentile(p float64) float64 { |
||||
return t.histogram.Percentile(p) |
||||
} |
||||
|
||||
// Percentiles returns a slice of arbitrary percentiles of the values in the
|
||||
// sample.
|
||||
func (t *StandardTimer) Percentiles(ps []float64) []float64 { |
||||
return t.histogram.Percentiles(ps) |
||||
} |
||||
|
||||
// Rate1 returns the one-minute moving average rate of events per second.
|
||||
func (t *StandardTimer) Rate1() float64 { |
||||
return t.meter.Rate1() |
||||
} |
||||
|
||||
// Rate5 returns the five-minute moving average rate of events per second.
|
||||
func (t *StandardTimer) Rate5() float64 { |
||||
return t.meter.Rate5() |
||||
} |
||||
|
||||
// Rate15 returns the fifteen-minute moving average rate of events per second.
|
||||
func (t *StandardTimer) Rate15() float64 { |
||||
return t.meter.Rate15() |
||||
} |
||||
|
||||
// RateMean returns the meter's mean rate of events per second.
|
||||
func (t *StandardTimer) RateMean() float64 { |
||||
return t.meter.RateMean() |
||||
} |
||||
|
||||
// Snapshot returns a read-only copy of the timer.
|
||||
func (t *StandardTimer) Snapshot() Metric { |
||||
t.mutex.Lock() |
||||
defer t.mutex.Unlock() |
||||
return &TimerSnapshot{ |
||||
MetricMeta: t.MetricMeta, |
||||
histogram: t.histogram.Snapshot().(*HistogramSnapshot), |
||||
meter: t.meter.Snapshot().(*MeterSnapshot), |
||||
} |
||||
} |
||||
|
||||
// StdDev returns the standard deviation of the values in the sample.
|
||||
func (t *StandardTimer) StdDev() float64 { |
||||
return t.histogram.StdDev() |
||||
} |
||||
|
||||
// Sum returns the sum in the sample.
|
||||
func (t *StandardTimer) Sum() int64 { |
||||
return t.histogram.Sum() |
||||
} |
||||
|
||||
// Record the duration of the execution of the given function.
|
||||
func (t *StandardTimer) Time(f func()) { |
||||
ts := time.Now() |
||||
f() |
||||
t.Update(time.Since(ts)) |
||||
} |
||||
|
||||
// Record the duration of an event.
|
||||
func (t *StandardTimer) Update(d time.Duration) { |
||||
t.mutex.Lock() |
||||
defer t.mutex.Unlock() |
||||
t.histogram.Update(int64(d)) |
||||
t.meter.Mark(1) |
||||
} |
||||
|
||||
// Record the duration of an event that started at a time and ends now.
|
||||
func (t *StandardTimer) UpdateSince(ts time.Time) { |
||||
t.mutex.Lock() |
||||
defer t.mutex.Unlock() |
||||
sinceMs := time.Since(ts) / time.Millisecond |
||||
t.histogram.Update(int64(sinceMs)) |
||||
t.meter.Mark(1) |
||||
} |
||||
|
||||
// Variance returns the variance of the values in the sample.
|
||||
func (t *StandardTimer) Variance() float64 { |
||||
return t.histogram.Variance() |
||||
} |
||||
|
||||
// TimerSnapshot is a read-only copy of another Timer.
|
||||
type TimerSnapshot struct { |
||||
*MetricMeta |
||||
histogram *HistogramSnapshot |
||||
meter *MeterSnapshot |
||||
} |
||||
|
||||
// Count returns the number of events recorded at the time the snapshot was
|
||||
// taken.
|
||||
func (t *TimerSnapshot) Count() int64 { return t.histogram.Count() } |
||||
|
||||
// Max returns the maximum value at the time the snapshot was taken.
|
||||
func (t *TimerSnapshot) Max() int64 { return t.histogram.Max() } |
||||
|
||||
// Mean returns the mean value at the time the snapshot was taken.
|
||||
func (t *TimerSnapshot) Mean() float64 { return t.histogram.Mean() } |
||||
|
||||
// Min returns the minimum value at the time the snapshot was taken.
|
||||
func (t *TimerSnapshot) Min() int64 { return t.histogram.Min() } |
||||
|
||||
// Percentile returns an arbitrary percentile of sampled values at the time the
|
||||
// snapshot was taken.
|
||||
func (t *TimerSnapshot) Percentile(p float64) float64 { |
||||
return t.histogram.Percentile(p) |
||||
} |
||||
|
||||
// Percentiles returns a slice of arbitrary percentiles of sampled values at
|
||||
// the time the snapshot was taken.
|
||||
func (t *TimerSnapshot) Percentiles(ps []float64) []float64 { |
||||
return t.histogram.Percentiles(ps) |
||||
} |
||||
|
||||
// Rate1 returns the one-minute moving average rate of events per second at the
|
||||
// time the snapshot was taken.
|
||||
func (t *TimerSnapshot) Rate1() float64 { return t.meter.Rate1() } |
||||
|
||||
// Rate5 returns the five-minute moving average rate of events per second at
|
||||
// the time the snapshot was taken.
|
||||
func (t *TimerSnapshot) Rate5() float64 { return t.meter.Rate5() } |
||||
|
||||
// Rate15 returns the fifteen-minute moving average rate of events per second
|
||||
// at the time the snapshot was taken.
|
||||
func (t *TimerSnapshot) Rate15() float64 { return t.meter.Rate15() } |
||||
|
||||
// RateMean returns the meter's mean rate of events per second at the time the
|
||||
// snapshot was taken.
|
||||
func (t *TimerSnapshot) RateMean() float64 { return t.meter.RateMean() } |
||||
|
||||
// Snapshot returns the snapshot.
|
||||
func (t *TimerSnapshot) Snapshot() Metric { return t } |
||||
|
||||
// StdDev returns the standard deviation of the values at the time the snapshot
|
||||
// was taken.
|
||||
func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() } |
||||
|
||||
// Sum returns the sum at the time the snapshot was taken.
|
||||
func (t *TimerSnapshot) Sum() int64 { return t.histogram.Sum() } |
||||
|
||||
// Time panics.
|
||||
func (*TimerSnapshot) Time(func()) { |
||||
panic("Time called on a TimerSnapshot") |
||||
} |
||||
|
||||
// Update panics.
|
||||
func (*TimerSnapshot) Update(time.Duration) { |
||||
panic("Update called on a TimerSnapshot") |
||||
} |
||||
|
||||
// UpdateSince panics.
|
||||
func (*TimerSnapshot) UpdateSince(time.Time) { |
||||
panic("UpdateSince called on a TimerSnapshot") |
||||
} |
||||
|
||||
// Variance returns the variance of the values at the time the snapshot was
|
||||
// taken.
|
||||
func (t *TimerSnapshot) Variance() float64 { return t.histogram.Variance() } |
||||
@ -0,0 +1,106 @@ |
||||
// jshint ignore: start
|
||||
// jscs: disable
|
||||
|
||||
ace.define("ace/mode/sql_highlight_rules",["require","exports","module","ace/lib/oop","ace/mode/text_highlight_rules"], function(require, exports, module) { |
||||
"use strict"; |
||||
|
||||
var oop = require("../lib/oop"); |
||||
var TextHighlightRules = require("./text_highlight_rules").TextHighlightRules; |
||||
|
||||
var SqlHighlightRules = function() { |
||||
|
||||
var keywords = ( |
||||
"select|insert|update|delete|from|where|and|or|group|by|order|limit|offset|having|as|case|" + |
||||
"when|else|end|type|left|right|join|on|outer|desc|asc|union|create|table|primary|key|if|" + |
||||
"foreign|not|references|default|null|inner|cross|natural|database|drop|grant" |
||||
); |
||||
|
||||
var builtinConstants = ( |
||||
"true|false" |
||||
); |
||||
|
||||
var builtinFunctions = ( |
||||
"avg|count|first|last|max|min|sum|ucase|lcase|mid|len|round|rank|now|format|" + |
||||
"coalesce|ifnull|isnull|nvl" |
||||
); |
||||
|
||||
var dataTypes = ( |
||||
"int|numeric|decimal|date|varchar|char|bigint|float|double|bit|binary|text|set|timestamp|" + |
||||
"money|real|number|integer" |
||||
); |
||||
|
||||
var keywordMapper = this.createKeywordMapper({ |
||||
"support.function": builtinFunctions, |
||||
"keyword": keywords, |
||||
"constant.language": builtinConstants, |
||||
"storage.type": dataTypes |
||||
}, "identifier", true); |
||||
|
||||
this.$rules = { |
||||
"start" : [ { |
||||
token : "comment", |
||||
regex : "--.*$" |
||||
}, { |
||||
token : "comment", |
||||
start : "/\\*", |
||||
end : "\\*/" |
||||
}, { |
||||
token : "string", // " string
|
||||
regex : '".*?"' |
||||
}, { |
||||
token : "string", // ' string
|
||||
regex : "'.*?'" |
||||
}, { |
||||
token : "string", // ` string (apache drill)
|
||||
regex : "`.*?`" |
||||
}, { |
||||
token : "constant.numeric", // float
|
||||
regex : "[+-]?\\d+(?:(?:\\.\\d*)?(?:[eE][+-]?\\d+)?)?\\b" |
||||
}, { |
||||
token : keywordMapper, |
||||
regex : "[a-zA-Z_$][a-zA-Z0-9_$]*\\b" |
||||
}, { |
||||
token : "keyword.operator", |
||||
regex : "\\+|\\-|\\/|\\/\\/|%|<@>|@>|<@|&|\\^|~|<|>|<=|=>|==|!=|<>|=" |
||||
}, { |
||||
token : "paren.lparen", |
||||
regex : "[\\(]" |
||||
}, { |
||||
token : "paren.rparen", |
||||
regex : "[\\)]" |
||||
}, { |
||||
token : "text", |
||||
regex : "\\s+" |
||||
} ] |
||||
}; |
||||
this.normalizeRules(); |
||||
}; |
||||
|
||||
oop.inherits(SqlHighlightRules, TextHighlightRules); |
||||
|
||||
exports.SqlHighlightRules = SqlHighlightRules; |
||||
}); |
||||
|
||||
ace.define("ace/mode/sql",["require","exports","module","ace/lib/oop","ace/mode/text","ace/mode/sql_highlight_rules"], function(require, exports, module) { |
||||
"use strict"; |
||||
|
||||
var oop = require("../lib/oop"); |
||||
var TextMode = require("./text").Mode; |
||||
var SqlHighlightRules = require("./sql_highlight_rules").SqlHighlightRules; |
||||
|
||||
var Mode = function() { |
||||
this.HighlightRules = SqlHighlightRules; |
||||
this.$behaviour = this.$defaultBehaviour; |
||||
}; |
||||
oop.inherits(Mode, TextMode); |
||||
|
||||
(function() { |
||||
|
||||
this.lineCommentStart = "--"; |
||||
|
||||
this.$id = "ace/mode/sql"; |
||||
}).call(Mode.prototype); |
||||
|
||||
exports.Mode = Mode; |
||||
|
||||
}); |
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue