Update for cortex changes.

Signed-off-by: Tom Wilkie <tom.wilkie@gmail.com>
pull/27/head
Tom Wilkie 7 years ago
parent 7c1355cedf
commit 0ffb432f30
  1. 4
      .gitignore
  2. 8
      Makefile
  3. 12
      cmd/distributor/main.go
  4. 11
      cmd/ingester/main.go
  5. 9
      cmd/promtail/main.go
  6. 11
      cmd/querier/main.go
  7. 21
      pkg/distributor/distributor.go
  8. 2
      pkg/distributor/http.go
  9. 1
      pkg/ingester/client/client.go
  10. 10
      pkg/ingester/ingester.go
  11. 2
      pkg/logproto/dep.go
  12. 2
      pkg/logproto/logproto.proto
  13. 9
      pkg/querier/querier.go

4
.gitignore vendored

@ -1,7 +1,7 @@
.uptodate
!vendor/**/*.pb.go
vendor/github.com/weaveworks/cortex/pkg/ingester/client/cortex.pb.go
vendor/github.com/weaveworks/cortex/pkg/ring/ring.pb.go
vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.pb.go
vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go
.pkg
.cache
cmd/distributor/distributor

@ -17,7 +17,7 @@ UPTODATE := .uptodate
touch $@
# We don't want find to scan inside a bunch of directories, to accelerate the
# 'make: Entering directory '/go/src/github.com/weaveworks/cortex' phase.
# 'make: Entering directory '/go/src/github.com/grafana/tempo' phase.
DONT_FIND := -name tools -prune -o -name vendor -prune -o -name .git -prune -o -name .cache -prune -o -name .pkg -prune -o
# Get a list of directories containing Dockerfiles
@ -32,8 +32,8 @@ images:
# Generating proto code is automated.
PROTO_DEFS := $(shell find . $(DONT_FIND) -type f -name '*.proto' -print)
PROTO_GOS := $(patsubst %.proto,%.pb.go,$(PROTO_DEFS)) \
vendor/github.com/weaveworks/cortex/pkg/ring/ring.pb.go \
vendor/github.com/weaveworks/cortex/pkg/ingester/client/cortex.pb.go
vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go \
vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.pb.go
# Generating yacc code is automated.
YACC_DEFS := $(shell find . $(DONT_FIND) -type f -name *.y -print)
@ -52,7 +52,7 @@ $(foreach exe, $(EXES), $(eval $(call dep_exe, $(exe))))
# Manually declared dependancies and what goes into each exe
pkg/logproto/logproto.pb.go: pkg/logproto/logproto.proto
vendor/github.com/weaveworks/cortex/pkg/ring/ring.pb.go: vendor/github.com/weaveworks/cortex/pkg/ring/ring.proto
vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go: vendor/github.com/cortexproject/cortex/pkg/ring/ring.proto
pkg/parser/labels.go: pkg/parser/labels.y
pkg/parser/matchers.go: pkg/parser/matchers.y
all: $(UPTODATE_FILES)

@ -7,14 +7,12 @@ import (
"github.com/opentracing-contrib/go-stdlib/nethttp"
opentracing "github.com/opentracing/opentracing-go"
"github.com/prometheus/common/promlog"
log "github.com/sirupsen/logrus"
"github.com/weaveworks/common/logging"
"github.com/weaveworks/common/middleware"
"github.com/weaveworks/common/server"
"github.com/weaveworks/cortex/pkg/ring"
"github.com/weaveworks/cortex/pkg/util"
"github.com/cortexproject/cortex/pkg/ring"
"github.com/cortexproject/cortex/pkg/util"
"google.golang.org/grpc"
"github.com/grafana/tempo/pkg/distributor"
@ -32,14 +30,10 @@ func main() {
}
ringConfig ring.Config
distributorConfig distributor.Config
logLevel = promlog.AllowedLevel{}
)
flagext.Var(flagset, &logLevel, "log.level", "info", "")
flagext.RegisterConfigs(flagset, &serverConfig, &ringConfig, &distributorConfig)
flagset.Parse(os.Args[1:])
logging.Setup(logLevel.String())
util.InitLogger(logLevel)
util.InitLogger(&serverConfig)
r, err := ring.New(ringConfig)
if err != nil {

@ -5,13 +5,12 @@ import (
"net/http"
"os"
"github.com/prometheus/common/promlog"
"github.com/cortexproject/cortex/pkg/util"
log "github.com/sirupsen/logrus"
"github.com/weaveworks/common/logging"
"github.com/weaveworks/common/middleware"
"github.com/weaveworks/common/server"
"github.com/weaveworks/cortex/pkg/util"
"google.golang.org/grpc"
"google.golang.org/grpc/health/grpc_health_v1"
"github.com/grafana/tempo/pkg/flagext"
"github.com/grafana/tempo/pkg/ingester"
@ -31,14 +30,11 @@ func main() {
},
}
ingesterConfig ingester.Config
logLevel = promlog.AllowedLevel{}
)
flagext.Var(flagset, &logLevel, "log.level", "info", "")
flagext.RegisterConfigs(flagset, &serverConfig, &ingesterConfig)
flagset.Parse(os.Args[1:])
logging.Setup(logLevel.String())
util.InitLogger(logLevel)
util.InitLogger(&serverConfig)
ingesterConfig.LifecyclerConfig.ListenPort = &serverConfig.GRPCListenPort
ingester, err := ingester.New(ingesterConfig)
@ -55,6 +51,7 @@ func main() {
logproto.RegisterPusherServer(server.GRPC, ingester)
logproto.RegisterQuerierServer(server.GRPC, ingester)
grpc_health_v1.RegisterHealthServer(server.GRPC, ingester)
server.HTTP.Path("/ready").Handler(http.HandlerFunc(ingester.ReadinessHandler))
server.Run()
}

@ -4,12 +4,10 @@ import (
"flag"
"os"
"github.com/cortexproject/cortex/pkg/util"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/common/model"
"github.com/prometheus/common/promlog"
"github.com/weaveworks/common/logging"
"github.com/weaveworks/common/server"
"github.com/weaveworks/cortex/pkg/util"
"github.com/grafana/tempo/pkg/flagext"
"github.com/grafana/tempo/pkg/promtail"
@ -19,17 +17,14 @@ func main() {
var (
flagset = flag.NewFlagSet("", flag.ExitOnError)
configFile = flagset.String("config.file", "promtail.yml", "The config file.")
logLevel = promlog.AllowedLevel{}
serverConfig server.Config
clientConfig promtail.ClientConfig
positionsConfig promtail.PositionsConfig
)
flagext.Var(flagset, &logLevel, "log.level", "info", "")
flagext.RegisterConfigs(flagset, &serverConfig, &clientConfig, &positionsConfig)
flagset.Parse(os.Args[1:])
logging.Setup(logLevel.String())
util.InitLogger(logLevel)
util.InitLogger(&serverConfig)
client, err := promtail.NewClient(clientConfig)
if err != nil {

@ -5,15 +5,13 @@ import (
"net/http"
"os"
"github.com/cortexproject/cortex/pkg/ring"
"github.com/cortexproject/cortex/pkg/util"
"github.com/opentracing-contrib/go-stdlib/nethttp"
opentracing "github.com/opentracing/opentracing-go"
"github.com/prometheus/common/promlog"
log "github.com/sirupsen/logrus"
"github.com/weaveworks/common/logging"
"github.com/weaveworks/common/middleware"
"github.com/weaveworks/common/server"
"github.com/weaveworks/cortex/pkg/ring"
"github.com/weaveworks/cortex/pkg/util"
"google.golang.org/grpc"
"github.com/grafana/tempo/pkg/flagext"
@ -34,14 +32,11 @@ func main() {
}
ringConfig ring.Config
querierConfig querier.Config
logLevel = promlog.AllowedLevel{}
)
flagext.Var(flagset, &logLevel, "log.level", "info", "")
flagext.RegisterConfigs(flagset, &serverConfig, &ringConfig, &querierConfig)
flagset.Parse(os.Args[1:])
logging.Setup(logLevel.String())
util.InitLogger(logLevel)
util.InitLogger(&serverConfig)
r, err := ring.New(ringConfig)
if err != nil {

@ -7,12 +7,13 @@ import (
"sync/atomic"
"time"
cortex_client "github.com/cortexproject/cortex/pkg/ingester/client"
"github.com/cortexproject/cortex/pkg/ring"
"github.com/cortexproject/cortex/pkg/util"
opentracing "github.com/opentracing/opentracing-go"
"github.com/prometheus/client_golang/prometheus"
"github.com/weaveworks/common/instrument"
"github.com/weaveworks/common/user"
cortex_client "github.com/weaveworks/cortex/pkg/ingester/client"
"github.com/weaveworks/cortex/pkg/ring"
"google.golang.org/grpc/health/grpc_health_v1"
"github.com/grafana/tempo/pkg/ingester/client"
@ -75,7 +76,7 @@ func New(cfg Config, ring ring.ReadRing) (*Distributor, error) {
return &Distributor{
cfg: cfg,
ring: ring,
pool: cortex_client.NewPool(cfg.PoolConfig, ring, factory),
pool: cortex_client.NewPool(cfg.PoolConfig, ring, factory, util.Logger),
}, nil
}
@ -122,12 +123,14 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log
return nil, err
}
samplesByIngester := map[*ring.IngesterDesc][]*streamTracker{}
samplesByIngester := map[string][]*streamTracker{}
ingesterDescs := map[string]ring.IngesterDesc{}
for i, replicationSet := range replicationSets {
streams[i].minSuccess = len(replicationSet.Ingesters) - replicationSet.MaxErrors
streams[i].maxFailures = replicationSet.MaxErrors
for _, ingester := range replicationSet.Ingesters {
samplesByIngester[ingester] = append(samplesByIngester[ingester], &streams[i])
samplesByIngester[ingester.Addr] = append(samplesByIngester[ingester.Addr], &streams[i])
ingesterDescs[ingester.Addr] = ingester
}
}
@ -137,7 +140,7 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log
err: make(chan error),
}
for ingester, samples := range samplesByIngester {
go func(ingester *ring.IngesterDesc, samples []*streamTracker) {
go func(ingester ring.IngesterDesc, samples []*streamTracker) {
// Use a background context to make sure all ingesters get samples even if we return early
localCtx, cancel := context.WithTimeout(context.Background(), d.cfg.RemoteTimeout)
defer cancel()
@ -146,7 +149,7 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log
localCtx = opentracing.ContextWithSpan(localCtx, sp)
}
d.sendSamples(localCtx, ingester, samples, &pushTracker)
}(ingester, samples)
}(ingesterDescs[ingester], samples)
}
select {
case err := <-pushTracker.err:
@ -157,7 +160,7 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log
}
// TODO taken from Cortex, see if we can refactor out an usable interface.
func (d *Distributor) sendSamples(ctx context.Context, ingester *ring.IngesterDesc, streamTrackers []*streamTracker, pushTracker *pushTracker) {
func (d *Distributor) sendSamples(ctx context.Context, ingester ring.IngesterDesc, streamTrackers []*streamTracker, pushTracker *pushTracker) {
err := d.sendSamplesErr(ctx, ingester, streamTrackers)
// If we succeed, decrement each sample's pending count by one. If we reach
@ -189,7 +192,7 @@ func (d *Distributor) sendSamples(ctx context.Context, ingester *ring.IngesterDe
}
// TODO taken from Cortex, see if we can refactor out an usable interface.
func (d *Distributor) sendSamplesErr(ctx context.Context, ingester *ring.IngesterDesc, streams []*streamTracker) error {
func (d *Distributor) sendSamplesErr(ctx context.Context, ingester ring.IngesterDesc, streams []*streamTracker) error {
c, err := d.pool.GetClientFor(ingester.Addr)
if err != nil {
return err

@ -3,7 +3,7 @@ package distributor
import (
"net/http"
"github.com/weaveworks/cortex/pkg/util"
"github.com/cortexproject/cortex/pkg/util"
"github.com/grafana/tempo/pkg/logproto"
)

@ -44,6 +44,7 @@ func New(cfg Config, addr string) (grpc_health_v1.HealthClient, error) {
return struct {
logproto.PusherClient
logproto.QuerierClient
grpc_health_v1.HealthClient
io.Closer
}{
PusherClient: logproto.NewPusherClient(conn),

@ -6,8 +6,8 @@ import (
"net/http"
"sync"
"github.com/cortexproject/cortex/pkg/ring"
"github.com/weaveworks/common/user"
"github.com/weaveworks/cortex/pkg/ring"
"google.golang.org/grpc/health/grpc_health_v1"
"github.com/grafana/tempo/pkg/logproto"
@ -57,7 +57,7 @@ func (i *Ingester) Flush() {
}
func (i *Ingester) Transfer() error {
func (i *Ingester) TransferOut(context.Context) error {
return nil
}
@ -114,11 +114,15 @@ func (*Ingester) Check(ctx context.Context, req *grpc_health_v1.HealthCheckReque
return &grpc_health_v1.HealthCheckResponse{Status: grpc_health_v1.HealthCheckResponse_SERVING}, nil
}
func (*Ingester) Watch(*grpc_health_v1.HealthCheckRequest, grpc_health_v1.Health_WatchServer) error {
return nil
}
// ReadinessHandler is used to indicate to k8s when the ingesters are ready for
// the addition removal of another ingester. Returns 204 when the ingester is
// ready, 500 otherwise.
func (i *Ingester) ReadinessHandler(w http.ResponseWriter, r *http.Request) {
if i.lifecycler.IsReady() {
if i.lifecycler.IsReady(r.Context()) {
w.WriteHeader(http.StatusNoContent)
} else {
w.WriteHeader(http.StatusInternalServerError)

@ -2,6 +2,6 @@ package logproto
import (
// trick dep into including this, needed by the generated code.
_ "github.com/cortexproject/cortex/pkg/util/wire"
_ "github.com/gogo/protobuf/types"
_ "github.com/weaveworks/cortex/pkg/util/wire"
)

@ -4,11 +4,9 @@ package logproto;
import "google/protobuf/timestamp.proto";
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
import "google.golang.org/grpc/health/grpc_health_v1/health.proto";
service Pusher {
rpc Push(PushRequest) returns (PushResponse) {};
rpc Check(grpc.health.v1.HealthCheckRequest) returns (grpc.health.v1.HealthCheckResponse);
}
service Querier {

@ -5,8 +5,9 @@ import (
"flag"
"time"
cortex_client "github.com/weaveworks/cortex/pkg/ingester/client"
"github.com/weaveworks/cortex/pkg/ring"
cortex_client "github.com/cortexproject/cortex/pkg/ingester/client"
"github.com/cortexproject/cortex/pkg/ring"
"github.com/cortexproject/cortex/pkg/util"
"google.golang.org/grpc/health/grpc_health_v1"
"github.com/grafana/tempo/pkg/ingester/client"
@ -42,7 +43,7 @@ func New(cfg Config, ring ring.ReadRing) (*Querier, error) {
return &Querier{
cfg: cfg,
ring: ring,
pool: cortex_client.NewPool(cfg.PoolConfig, ring, factory),
pool: cortex_client.NewPool(cfg.PoolConfig, ring, factory, util.Logger),
}, nil
}
@ -56,7 +57,7 @@ func (q *Querier) forAllIngesters(f func(logproto.QuerierClient) (interface{}, e
resps, errs := make(chan interface{}), make(chan error)
for _, ingester := range replicationSet.Ingesters {
go func(ingester *ring.IngesterDesc) {
go func(ingester ring.IngesterDesc) {
client, err := q.pool.GetClientFor(ingester.Addr)
if err != nil {
errs <- err

Loading…
Cancel
Save