vendoring: update cortex to latest master (#938)

* update cortex to latest master

using v3.4.0-rc.1 for go.etcd.io/etcd
v0.0.2 for github.com/prometheus/procfs
v1.0.0 for github.com/prometheus/client_golang
latest master for github.com/weaveworks/common with changes from https://github.com/weaveworks/common/pull/153

* fixed failing tests

* use large instance for test and lint jobs in CircleCI

* running only 6 test binaries in parallel

* removed resource type change for CircleCI

* changed GOGC to 10 for lint makefile target
pull/946/head
Sandeep Sukhani 7 years ago committed by Cyril Tovena
parent 9484632d3d
commit b687ec6e5b
  1. 276
      Gopkg.lock
  2. 12
      Gopkg.toml
  3. 4
      Makefile
  4. 4
      pkg/distributor/distributor.go
  5. 15
      pkg/ingester/flush_test.go
  6. 2
      pkg/ingester/ingester_test.go
  7. 18
      pkg/ingester/transfer_test.go
  8. 202
      vendor/github.com/coreos/go-semver/LICENSE
  9. 5
      vendor/github.com/coreos/go-semver/NOTICE
  10. 296
      vendor/github.com/coreos/go-semver/semver/semver.go
  11. 38
      vendor/github.com/coreos/go-semver/semver/sort.go
  12. 225
      vendor/github.com/coreos/go-systemd/journal/journal.go
  13. 39
      vendor/github.com/coreos/pkg/capnslog/README.md
  14. 157
      vendor/github.com/coreos/pkg/capnslog/formatters.go
  15. 96
      vendor/github.com/coreos/pkg/capnslog/glog_formatter.go
  16. 49
      vendor/github.com/coreos/pkg/capnslog/init.go
  17. 25
      vendor/github.com/coreos/pkg/capnslog/init_windows.go
  18. 68
      vendor/github.com/coreos/pkg/capnslog/journald_formatter.go
  19. 39
      vendor/github.com/coreos/pkg/capnslog/log_hijack.go
  20. 245
      vendor/github.com/coreos/pkg/capnslog/logmap.go
  21. 191
      vendor/github.com/coreos/pkg/capnslog/pkg_logger.go
  22. 65
      vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go
  23. 14
      vendor/github.com/cortexproject/cortex/LICENSE
  24. 28
      vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_table_client.go
  25. 17
      vendor/github.com/cortexproject/cortex/pkg/chunk/aws/metrics_autoscaling.go
  26. 6
      vendor/github.com/cortexproject/cortex/pkg/chunk/cache/cache.go
  27. 4
      vendor/github.com/cortexproject/cortex/pkg/chunk/cache/fifo_cache.go
  28. 36
      vendor/github.com/cortexproject/cortex/pkg/chunk/cache/instrumented.go
  29. 4
      vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached.go
  30. 21
      vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached_client.go
  31. 131
      vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached_client_selector.go
  32. 14
      vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go
  33. 4
      vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go
  34. 2
      vendor/github.com/cortexproject/cortex/pkg/chunk/composite_store.go
  35. 8
      vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/factory.go
  36. 2
      vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/gcs_object_client.go
  37. 4
      vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/instrumentation.go
  38. 2
      vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_index_client.go
  39. 3
      vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go
  40. 7
      vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go
  41. 3
      vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go
  42. 18
      vendor/github.com/cortexproject/cortex/pkg/chunk/table_manager.go
  43. 4
      vendor/github.com/cortexproject/cortex/pkg/ingester/client/pool.go
  44. 40
      vendor/github.com/cortexproject/cortex/pkg/ingester/client/timeseries.go
  45. 14
      vendor/github.com/cortexproject/cortex/pkg/ring/batch.go
  46. 112
      vendor/github.com/cortexproject/cortex/pkg/ring/kv/client.go
  47. 52
      vendor/github.com/cortexproject/cortex/pkg/ring/kv/codec/codec.go
  48. 137
      vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/client.go
  49. 2
      vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/metrics.go
  50. 12
      vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/mock.go
  51. 196
      vendor/github.com/cortexproject/cortex/pkg/ring/kv/etcd/etcd.go
  52. 81
      vendor/github.com/cortexproject/cortex/pkg/ring/kv/etcd/mock.go
  53. 53
      vendor/github.com/cortexproject/cortex/pkg/ring/kv/metrics.go
  54. 39
      vendor/github.com/cortexproject/cortex/pkg/ring/kv/prefix.go
  55. 109
      vendor/github.com/cortexproject/cortex/pkg/ring/kvstore.go
  56. 29
      vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go
  57. 7
      vendor/github.com/cortexproject/cortex/pkg/ring/model.go
  58. 10
      vendor/github.com/cortexproject/cortex/pkg/ring/ring.go
  59. 19
      vendor/github.com/cortexproject/cortex/pkg/util/flagext/stringslice.go
  60. 120
      vendor/github.com/cortexproject/cortex/pkg/util/hash_bucket_histogram.go
  61. 28
      vendor/github.com/cortexproject/cortex/pkg/util/http.go
  62. 188
      vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go
  63. 307
      vendor/github.com/cortexproject/cortex/pkg/util/validation/override.go
  64. 20
      vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go
  65. 21
      vendor/github.com/dustin/go-humanize/.travis.yml
  66. 21
      vendor/github.com/dustin/go-humanize/LICENSE
  67. 124
      vendor/github.com/dustin/go-humanize/README.markdown
  68. 31
      vendor/github.com/dustin/go-humanize/big.go
  69. 173
      vendor/github.com/dustin/go-humanize/bigbytes.go
  70. 143
      vendor/github.com/dustin/go-humanize/bytes.go
  71. 116
      vendor/github.com/dustin/go-humanize/comma.go
  72. 40
      vendor/github.com/dustin/go-humanize/commaf.go
  73. 46
      vendor/github.com/dustin/go-humanize/ftoa.go
  74. 8
      vendor/github.com/dustin/go-humanize/humanize.go
  75. 192
      vendor/github.com/dustin/go-humanize/number.go
  76. 25
      vendor/github.com/dustin/go-humanize/ordinals.go
  77. 123
      vendor/github.com/dustin/go-humanize/si.go
  78. 117
      vendor/github.com/dustin/go-humanize/times.go
  79. 30
      vendor/github.com/etcd-io/bbolt/Makefile
  80. 29
      vendor/github.com/facette/natsort/LICENSE
  81. 104
      vendor/github.com/facette/natsort/README.md
  82. 85
      vendor/github.com/facette/natsort/natsort.go
  83. 1271
      vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
  84. 3086
      vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go
  85. 117
      vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go
  86. 369
      vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go
  87. 83
      vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden
  88. 167
      vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto
  89. 450
      vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
  90. 96
      vendor/github.com/golang/protobuf/ptypes/struct/struct.proto
  91. 9
      vendor/github.com/google/uuid/.travis.yml
  92. 10
      vendor/github.com/google/uuid/CONTRIBUTING.md
  93. 9
      vendor/github.com/google/uuid/CONTRIBUTORS
  94. 27
      vendor/github.com/google/uuid/LICENSE
  95. 19
      vendor/github.com/google/uuid/README.md
  96. 80
      vendor/github.com/google/uuid/dce.go
  97. 12
      vendor/github.com/google/uuid/doc.go
  98. 1
      vendor/github.com/google/uuid/go.mod
  99. 53
      vendor/github.com/google/uuid/hash.go
  100. 37
      vendor/github.com/google/uuid/marshal.go
  101. Some files were not shown because too many files have changed in this diff Show More

276
Gopkg.lock generated

@ -213,10 +213,19 @@
revision = "a9fb20d87448d386e6d50b1f2e1fa70dcf0de43c"
[[projects]]
digest = "1:bc38b83376aa09bdc1e889c00ce73cb748b2140d535bb5c76cb9823da6c7a98a"
digest = "1:05ffeeed3f0f05520de0679f6aa3219ffee69cfd6d9fb6c194879d4c818ad670"
name = "github.com/coreos/go-semver"
packages = ["semver"]
pruneopts = "UT"
revision = "e214231b295a8ea9479f11b70b35d5acf3556d9b"
version = "v0.3.0"
[[projects]]
digest = "1:971a6c7e578f323beb97deeecefe8dca02bacba70d675d3d14284f7467a45151"
name = "github.com/coreos/go-systemd"
packages = [
"activation",
"journal",
"sdjournal",
]
pruneopts = "UT"
@ -224,16 +233,19 @@
version = "v19"
[[projects]]
digest = "1:6e2ff82d2fe11ee35ec8dceb4346b8144a761f1c8655592c4ebe99a92fcec327"
digest = "1:129a158ba1ebf652f53b189d61dcf9fbfca8ac70b36bcb48a501200a21fb6086"
name = "github.com/coreos/pkg"
packages = ["dlopen"]
packages = [
"capnslog",
"dlopen",
]
pruneopts = "UT"
revision = "97fdf19511ea361ae1c100dd393cc47f8dcfa1e1"
version = "v4"
[[projects]]
branch = "master"
digest = "1:5a07b5363e4c2aa127a3afd1e8e323d3a288ba1d90d37793d2e14843f5b5b82e"
digest = "1:fdc932ff5ac5519eb816057507cb79a6addb23c722c03cfeec05aed44b53c96f"
name = "github.com/cortexproject/cortex"
packages = [
"pkg/chunk",
@ -250,6 +262,10 @@
"pkg/ingester/index",
"pkg/prom1/storage/metric",
"pkg/ring",
"pkg/ring/kv",
"pkg/ring/kv/codec",
"pkg/ring/kv/consul",
"pkg/ring/kv/etcd",
"pkg/util",
"pkg/util/extract",
"pkg/util/flagext",
@ -259,7 +275,7 @@
"pkg/util/validation",
]
pruneopts = "UT"
revision = "ef492f6bbafb185bbe61ae7a6955b7a4af5f3d9a"
revision = "934998160dbec7322c1ddfd70342a7aca68177f2"
[[projects]]
digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
@ -365,12 +381,20 @@
version = "v0.4.0"
[[projects]]
digest = "1:3762d59edaa6e5c71d5e594c020c8391f274ff283e9c30fb43c518ec59a3f9b3"
name = "github.com/etcd-io/bbolt"
digest = "1:6f9339c912bbdda81302633ad7e99a28dfa5a639c864061f1929510a9a64aa74"
name = "github.com/dustin/go-humanize"
packages = ["."]
pruneopts = "UT"
revision = "7ee3ded59d4835e10f3e7d0f7603c42aa5e83820"
version = "v1.3.1-etcd.8"
revision = "9f541cc9db5d55bce703bd99987c9d5cb8eea45e"
version = "v1.0.0"
[[projects]]
branch = "master"
digest = "1:66dafe158a72d309ec48d983ee96b603064595fb123337e104159c835450e14d"
name = "github.com/facette/natsort"
packages = ["."]
pruneopts = "UT"
revision = "2cd4dd1e2dcba4d85d6d3ead4adf4cfd2b70caf2"
[[projects]]
digest = "1:865079840386857c809b72ce300be7580cb50d3d3129ce11bf9aa6ca2bc1934a"
@ -463,15 +487,20 @@
version = "v1.0.3"
[[projects]]
digest = "1:82db3b76eba1b5f200b304fd5593770998f3e7362d09172c9632d14e952bb0d8"
digest = "1:e3180bea674449a3d8c40341b39423e44e79d68ddb200f94f5c0020a42685ee8"
name = "github.com/golang/protobuf"
packages = [
"jsonpb",
"proto",
"protoc-gen-go/descriptor",
"protoc-gen-go/generator",
"protoc-gen-go/generator/internal/remap",
"protoc-gen-go/plugin",
"ptypes",
"ptypes/any",
"ptypes/duration",
"ptypes/empty",
"ptypes/struct",
"ptypes/timestamp",
"ptypes/wrappers",
]
@ -503,6 +532,14 @@
pruneopts = "UT"
revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1"
[[projects]]
digest = "1:582b704bebaa06b48c29b0cec224a6058a09c86883aaddabde889cd1a5f73e1b"
name = "github.com/google/uuid"
packages = ["."]
pruneopts = "UT"
revision = "0cd6bf5da1e1c83f8b45653022c74f71af0538a4"
version = "v1.1.1"
[[projects]]
digest = "1:cd9864c6366515827a759931746738ede6079faa08df9c584596370d6add135c"
name = "github.com/googleapis/gax-go"
@ -579,6 +616,26 @@
revision = "c250d6563d4d4c20252cd865923440e829844f4e"
version = "v1.0.0"
[[projects]]
digest = "1:9b7a07ac7577787a8ecc1334cb9f34df1c76ed82a917d556c5713d3ab84fbc43"
name = "github.com/grpc-ecosystem/go-grpc-prometheus"
packages = ["."]
pruneopts = "UT"
revision = "c225b8c3b01faf2899099b768856a9e916e5087b"
version = "v1.2.0"
[[projects]]
digest = "1:3b341cd71012c63aacddabfc70b9110be8e30c553349552ad3f77242843f2d03"
name = "github.com/grpc-ecosystem/grpc-gateway"
packages = [
"internal",
"runtime",
"utilities",
]
pruneopts = "UT"
revision = "fd2d159495beeea56e2b747d3d4b68299a81210c"
version = "v1.9.6"
[[projects]]
branch = "master"
digest = "1:1a1206efd03a54d336dce7bb8719e74f2f8932f661cb9f57d5813a1d99c083d8"
@ -660,6 +717,14 @@
pruneopts = "UT"
revision = "c2b33e84"
[[projects]]
digest = "1:75ab90ae3f5d876167e60f493beadfe66f0ed861a710f283fb06c86437a09538"
name = "github.com/jonboulle/clockwork"
packages = ["."]
pruneopts = "UT"
revision = "2eee05ed794112d45db504eb05aa693efd2b8b09"
version = "v0.1.0"
[[projects]]
digest = "1:3e551bbb3a7c0ab2a2bf4660e7fcad16db089fdcfbb44b0199e62838038623ea"
name = "github.com/json-iterator/go"
@ -871,8 +936,7 @@
version = "v1.0.0"
[[projects]]
branch = "master"
digest = "1:dff66fce6bb8fa6111998bf3575e22aa3d0fd2560712a83de3390236d3cd1f6b"
digest = "1:228aa85ee836d8002915c08b4dcb4257a465e8aae48b5463171403177635b51d"
name = "github.com/prometheus/client_golang"
packages = [
"api",
@ -884,7 +948,8 @@
"prometheus/testutil",
]
pruneopts = "UT"
revision = "c5e14697eaa7af985b2e326f1f5ed50bacb75c06"
revision = "170205fb58decfd011f1550d4cfb737230d7ae4f"
version = "v1.1.0"
[[projects]]
branch = "master"
@ -909,17 +974,16 @@
revision = "2998b132700a7d019ff618c06a234b47c1f3f681"
[[projects]]
branch = "master"
digest = "1:08eb8b60450efe841e37512d66ce366a87d187505d7c67b99307a6c1803483a2"
digest = "1:8232537905152d6a0b116b9af5a0868fcac0e84eb02ec5a150624c077bdedb0b"
name = "github.com/prometheus/procfs"
packages = [
".",
"internal/fs",
"internal/util",
"nfs",
"xfs",
]
pruneopts = "UT"
revision = "b1a0a9a36d7453ba0f62578b99712f3a6c5f82d1"
revision = "00ec24a6a2d86e7074629c8384715dbb05adccd8"
version = "v0.0.4"
[[projects]]
branch = "master"
@ -1022,6 +1086,22 @@
revision = "e1e72e9de974bd926e5c56f83753fba2df402ce5"
version = "v1.3.0"
[[projects]]
digest = "1:30e06e6d62a1d694e3cdbff29d8a9a96022e05a487d0c6eaf0ef898965ef28fb"
name = "github.com/soheilhy/cmux"
packages = ["."]
pruneopts = "UT"
revision = "e09e9389d85d8492d313d73d1469c029e710623f"
version = "v0.1.4"
[[projects]]
digest = "1:c1b1102241e7f645bc8e0c22ae352e8f0dc6484b6cb4d132fa9f24174e0119e2"
name = "github.com/spf13/pflag"
packages = ["."]
pruneopts = "UT"
revision = "298182f68c66c05229eb03ac171abe6e309ee79a"
version = "v1.0.3"
[[projects]]
digest = "1:ac83cf90d08b63ad5f7e020ef480d319ae890c208f8524622a2f3136e2686b02"
name = "github.com/stretchr/objx"
@ -1042,6 +1122,14 @@
revision = "ffdc059bfe9ce6a4e144ba849dbedead332c6053"
version = "v1.3.0"
[[projects]]
branch = "master"
digest = "1:d4e58a50951cee7efe774acb441b5a3b75c99f9fd8cc6d58a3146b4de190e14d"
name = "github.com/tmc/grpc-websocket-proxy"
packages = ["wsproxy"]
pruneopts = "UT"
revision = "0ad062ec5ee553a48f6dbd280b7a1b5638e8a113"
[[projects]]
branch = "master"
digest = "1:9ab2182297ebe5a1433c9804ba65a382e4f41e3084485d1b4d31996e4c992e38"
@ -1089,7 +1177,7 @@
[[projects]]
branch = "server-listen-addr"
digest = "1:0184d699d4cbbbc3073fbbdd7cc0c8592d484c6f23914c89fb6d218e90de171a"
digest = "1:ac8d70a9fb648c76c20d099821664d0b18dfdfbd46029decdb92f31314c1ffa5"
name = "github.com/weaveworks/common"
packages = [
"aws",
@ -1106,8 +1194,8 @@
"user",
]
pruneopts = "UT"
revision = "5bf824591a6567784789cf9b2169f74f162bf80d"
source = "https://github.com/tomwilkie/weaveworks-common"
revision = "8fa0a1ca9d89bb0e89f3b28022b0b20a8cbae0a8"
source = "https://github.com/sandlis/weaveworks-common"
[[projects]]
digest = "1:bb40f7ff970145324f2a2acafdff3a23ed3f05db49cb5eb519b3d6bee86a5887"
@ -1117,6 +1205,107 @@
revision = "0599d764e054d4e983bb120e30759179fafe3942"
version = "v1.2.0"
[[projects]]
digest = "1:36775a135c00ff94c2ab9d4de842ae9bf95f45d2159ade2b033f3ecbafa69423"
name = "github.com/xiang90/probing"
packages = ["."]
pruneopts = "UT"
revision = "43a291ad63a214a207fefbf03c7d9d78b703162b"
version = "0.0.2"
[[projects]]
digest = "1:f2ac2c724fc8214bb7b9dd6d4f5b7a983152051f5133320f228557182263cb94"
name = "go.etcd.io/bbolt"
packages = ["."]
pruneopts = "UT"
revision = "a0458a2b35708eef59eb5f620ceb3cd1c01a824d"
version = "v1.3.3"
[[projects]]
digest = "1:9a09f4299b5d5546c45b892c8f8ec733d7e7d4d113b4f6aa620f6b9ac5dede6b"
name = "go.etcd.io/etcd"
packages = [
"auth",
"auth/authpb",
"client",
"clientv3",
"clientv3/balancer",
"clientv3/balancer/connectivity",
"clientv3/balancer/picker",
"clientv3/balancer/resolver/endpoint",
"clientv3/concurrency",
"clientv3/credentials",
"embed",
"etcdserver",
"etcdserver/api",
"etcdserver/api/etcdhttp",
"etcdserver/api/membership",
"etcdserver/api/rafthttp",
"etcdserver/api/snap",
"etcdserver/api/snap/snappb",
"etcdserver/api/v2auth",
"etcdserver/api/v2discovery",
"etcdserver/api/v2error",
"etcdserver/api/v2http",
"etcdserver/api/v2http/httptypes",
"etcdserver/api/v2stats",
"etcdserver/api/v2store",
"etcdserver/api/v2v3",
"etcdserver/api/v3alarm",
"etcdserver/api/v3client",
"etcdserver/api/v3compactor",
"etcdserver/api/v3election",
"etcdserver/api/v3election/v3electionpb",
"etcdserver/api/v3election/v3electionpb/gw",
"etcdserver/api/v3lock",
"etcdserver/api/v3lock/v3lockpb",
"etcdserver/api/v3lock/v3lockpb/gw",
"etcdserver/api/v3rpc",
"etcdserver/api/v3rpc/rpctypes",
"etcdserver/etcdserverpb",
"etcdserver/etcdserverpb/gw",
"lease",
"lease/leasehttp",
"lease/leasepb",
"mvcc",
"mvcc/backend",
"mvcc/mvccpb",
"pkg/adt",
"pkg/contention",
"pkg/cpuutil",
"pkg/crc",
"pkg/debugutil",
"pkg/fileutil",
"pkg/flags",
"pkg/httputil",
"pkg/idutil",
"pkg/ioutil",
"pkg/logutil",
"pkg/netutil",
"pkg/pathutil",
"pkg/pbutil",
"pkg/runtime",
"pkg/schedule",
"pkg/srv",
"pkg/systemd",
"pkg/tlsutil",
"pkg/transport",
"pkg/types",
"pkg/wait",
"proxy/grpcproxy/adapter",
"raft",
"raft/confchange",
"raft/quorum",
"raft/raftpb",
"raft/tracker",
"version",
"wal",
"wal/walpb",
]
pruneopts = "UT"
revision = "67d0c21bb04c19ef1c76c63549c776bde3d3ee90"
version = "v3.4.0-rc.2"
[[projects]]
digest = "1:86b11d1e4dd05cd44d14b1e30b0497f98a37f696e8740ae88383de56d766cb34"
name = "go.opencensus.io"
@ -1142,11 +1331,44 @@
revision = "b7bf3cdb64150a8c8c53b769fdeb2ba581bd4d4b"
version = "v0.18.0"
[[projects]]
digest = "1:a5158647b553c61877aa9ae74f4015000294e47981e6b8b07525edcbb0747c81"
name = "go.uber.org/atomic"
packages = ["."]
pruneopts = "UT"
revision = "df976f2515e274675050de7b3f42545de80594fd"
version = "v1.4.0"
[[projects]]
digest = "1:60bf2a5e347af463c42ed31a493d817f8a72f102543060ed992754e689805d1a"
name = "go.uber.org/multierr"
packages = ["."]
pruneopts = "UT"
revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a"
version = "v1.1.0"
[[projects]]
digest = "1:676160e6a4722b08e0e26b11521d575c2cb2b6f0c679e1ee6178c5d8dee51e5e"
name = "go.uber.org/zap"
packages = [
".",
"buffer",
"internal/bufferpool",
"internal/color",
"internal/exit",
"zapcore",
]
pruneopts = "UT"
revision = "27376062155ad36be76b0f12cf1572a221d3a48c"
version = "v1.10.0"
[[projects]]
branch = "master"
digest = "1:cfd661f1a52594117f2a753bb640a86d4dbf3e0d778c2641bfbc750e6a1c8be7"
digest = "1:0f1e4f4c52bb58cfc3f6b8a22e62470fcc2faf83563fdda3123b33ef48fdf61f"
name = "golang.org/x/crypto"
packages = [
"bcrypt",
"blowfish",
"ed25519",
"ed25519/internal/edwards25519",
"ssh/terminal",
@ -1292,10 +1514,11 @@
[[projects]]
branch = "master"
digest = "1:919def2c06d4700acfffdca102f912ec76fae5f026535934f921654d19e89cd8"
digest = "1:a01247cd16908f70cd015398c12fc7c5a73310e5f1c81e2657a654c4f245da6b"
name = "google.golang.org/genproto"
packages = [
"googleapis/api/annotations",
"googleapis/api/httpbody",
"googleapis/bigtable/admin/v2",
"googleapis/bigtable/v2",
"googleapis/iam/v1",
@ -1308,7 +1531,7 @@
revision = "6909d8a4a91b6d3fd1c4580b6e35816be4706fef"
[[projects]]
digest = "1:10488712ab5f5da69a7da557c169824c247b63219af7f072318919ba352bee7a"
digest = "1:9e34ade43a63d48dba4be47f64368106f97938edd81f8af42aad3b2ecc01af3a"
name = "google.golang.org/grpc"
packages = [
".",
@ -1335,6 +1558,7 @@
"encoding/gzip",
"encoding/proto",
"grpclog",
"health",
"health/grpc_health_v1",
"internal",
"internal/backoff",
@ -1603,6 +1827,7 @@
"github.com/bmatcuk/doublestar",
"github.com/coreos/go-systemd/sdjournal",
"github.com/cortexproject/cortex/pkg/chunk",
"github.com/cortexproject/cortex/pkg/chunk/cache",
"github.com/cortexproject/cortex/pkg/chunk/encoding",
"github.com/cortexproject/cortex/pkg/chunk/local",
"github.com/cortexproject/cortex/pkg/chunk/storage",
@ -1611,7 +1836,10 @@
"github.com/cortexproject/cortex/pkg/ring",
"github.com/cortexproject/cortex/pkg/util",
"github.com/cortexproject/cortex/pkg/util/flagext",
"github.com/cortexproject/cortex/pkg/util/grpcclient",
"github.com/cortexproject/cortex/pkg/util/spanlogger",
"github.com/cortexproject/cortex/pkg/util/validation",
"github.com/davecgh/go-spew/spew",
"github.com/docker/docker/api/types/backend",
"github.com/docker/docker/api/types/plugins/logdriver",
"github.com/docker/docker/daemon/logger",

@ -30,7 +30,7 @@
[[constraint]]
name = "github.com/weaveworks/common"
source = "https://github.com/tomwilkie/weaveworks-common"
source = "https://github.com/sandlis/weaveworks-common"
branch = "server-listen-addr"
[[constraint]]
@ -56,7 +56,7 @@
name = "github.com/prometheus/common"
[[override]]
branch = "master"
version = "v1.0.0"
name = "github.com/prometheus/client_golang"
[[override]]
@ -78,3 +78,11 @@
[[constraint]]
name = "github.com/stretchr/testify"
version = "1.3.0"
[[override]]
name = "go.etcd.io/etcd"
version = "v3.4.0-rc.1"
[[override]]
name = "github.com/prometheus/procfs"
version = "v0.0.2"

@ -207,14 +207,14 @@ publish: dist
########
lint:
GOGC=20 golangci-lint run
GOGC=10 golangci-lint run
########
# Test #
########
test: all
go test -p=8 ./...
go test -p=6 ./...
#########
# Clean #

@ -152,7 +152,7 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log
entries := make([]logproto.Entry, 0, len(stream.Entries))
for _, entry := range stream.Entries {
if err := d.overrides.ValidateSample(userID, metricName, cortex_client.Sample{
if err := validation.ValidateSample(d.overrides, userID, metricName, cortex_client.Sample{
TimestampMs: entry.Timestamp.UnixNano() / int64(time.Millisecond),
}); err != nil {
validationErr = err
@ -224,7 +224,7 @@ func (d *Distributor) validateLabels(userID, labels string) error {
return err
}
return d.overrides.ValidateLabels(userID, ls)
return validation.ValidateLabels(d.overrides, userID, ls)
}
// TODO taken from Cortex, see if we can refactor out an usable interface.

@ -7,6 +7,9 @@ import (
"testing"
"time"
"github.com/cortexproject/cortex/pkg/ring/kv"
"github.com/cortexproject/cortex/pkg/ring/kv/codec"
"github.com/cortexproject/cortex/pkg/chunk"
"github.com/cortexproject/cortex/pkg/ring"
"github.com/cortexproject/cortex/pkg/util/flagext"
@ -31,7 +34,7 @@ func init() {
}
func TestChunkFlushingIdle(t *testing.T) {
cfg := defaultIngesterTestConfig()
cfg := defaultIngesterTestConfig(t)
cfg.FlushCheckPeriod = 20 * time.Millisecond
cfg.MaxChunkIdle = 100 * time.Millisecond
cfg.RetainPeriod = 500 * time.Millisecond
@ -45,7 +48,7 @@ func TestChunkFlushingIdle(t *testing.T) {
}
func TestChunkFlushingShutdown(t *testing.T) {
store, ing := newTestStore(t, defaultIngesterTestConfig())
store, ing := newTestStore(t, defaultIngesterTestConfig(t))
userIDs, testData := pushTestSamples(t, ing)
ing.Shutdown()
store.checkData(t, userIDs, testData)
@ -69,14 +72,16 @@ func newTestStore(t require.TestingT, cfg Config) (*testStore, *Ingester) {
}
// nolint
func defaultIngesterTestConfig() Config {
consul := ring.NewInMemoryKVClient(ring.ProtoCodec{Factory: ring.ProtoDescFactory})
func defaultIngesterTestConfig(t *testing.T) Config {
kvClient, err := kv.NewClient(kv.Config{Store: "inmemory"}, codec.Proto{Factory: ring.ProtoDescFactory})
require.NoError(t, err)
cfg := Config{}
flagext.DefaultValues(&cfg)
cfg.FlushCheckPeriod = 99999 * time.Hour
cfg.MaxChunkIdle = 99999 * time.Hour
cfg.ConcurrentFlushes = 1
cfg.LifecyclerConfig.RingConfig.KVStore.Mock = consul
cfg.LifecyclerConfig.RingConfig.KVStore.Mock = kvClient
cfg.LifecyclerConfig.NumTokens = 1
cfg.LifecyclerConfig.ListenPort = func(i int) *int { return &i }(0)
cfg.LifecyclerConfig.Addr = "localhost"

@ -17,7 +17,7 @@ import (
)
func TestIngester(t *testing.T) {
ingesterConfig := defaultIngesterTestConfig()
ingesterConfig := defaultIngesterTestConfig(t)
store := &mockStore{
chunks: map[string][]chunk.Chunk{},
}

@ -12,6 +12,8 @@ import (
"google.golang.org/grpc/health/grpc_health_v1"
"github.com/cortexproject/cortex/pkg/ring"
"github.com/cortexproject/cortex/pkg/ring/kv"
"github.com/cortexproject/cortex/pkg/ring/kv/codec"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/weaveworks/common/user"
@ -24,7 +26,7 @@ import (
func TestTransferOut(t *testing.T) {
f := newTestIngesterFactory(t)
ing := f.getIngester(time.Duration(0))
ing := f.getIngester(time.Duration(0), t)
// Push some data into our original ingester
ctx := user.InjectOrgID(context.Background(), "test")
@ -54,7 +56,7 @@ func TestTransferOut(t *testing.T) {
}
// Create a new ingester and trasfer data to it
ing2 := f.getIngester(time.Second * 60)
ing2 := f.getIngester(time.Second*60, t)
ing.Shutdown()
assert.Len(t, ing2.instances, 1)
@ -93,25 +95,27 @@ func TestTransferOut(t *testing.T) {
type testIngesterFactory struct {
t *testing.T
store ring.KVClient
store kv.Client
n int
ingesters map[string]*Ingester
}
func newTestIngesterFactory(t *testing.T) *testIngesterFactory {
kvClient, err := kv.NewClient(kv.Config{Store: "inmemory"}, codec.Proto{Factory: ring.ProtoDescFactory})
require.NoError(t, err)
return &testIngesterFactory{
t: t,
store: ring.NewInMemoryKVClient(ring.ProtoCodec{Factory: ring.ProtoDescFactory}),
store: kvClient,
ingesters: make(map[string]*Ingester),
}
}
func (f *testIngesterFactory) getIngester(joinAfter time.Duration) *Ingester {
func (f *testIngesterFactory) getIngester(joinAfter time.Duration, t *testing.T) *Ingester {
f.n++
cfg := defaultIngesterTestConfig()
cfg := defaultIngesterTestConfig(t)
cfg.MaxTransferRetries = 1
cfg.LifecyclerConfig.ClaimOnRollout = true
cfg.LifecyclerConfig.ID = fmt.Sprintf("localhost-%d", f.n)
cfg.LifecyclerConfig.RingConfig.KVStore.Mock = f.store
cfg.LifecyclerConfig.JoinAfter = joinAfter

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

@ -0,0 +1,5 @@
CoreOS Project
Copyright 2018 CoreOS, Inc
This product includes software developed at CoreOS, Inc.
(http://www.coreos.com/).

@ -0,0 +1,296 @@
// Copyright 2013-2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Semantic Versions http://semver.org
package semver
import (
"bytes"
"errors"
"fmt"
"regexp"
"strconv"
"strings"
)
type Version struct {
Major int64
Minor int64
Patch int64
PreRelease PreRelease
Metadata string
}
type PreRelease string
func splitOff(input *string, delim string) (val string) {
parts := strings.SplitN(*input, delim, 2)
if len(parts) == 2 {
*input = parts[0]
val = parts[1]
}
return val
}
func New(version string) *Version {
return Must(NewVersion(version))
}
func NewVersion(version string) (*Version, error) {
v := Version{}
if err := v.Set(version); err != nil {
return nil, err
}
return &v, nil
}
// Must is a helper for wrapping NewVersion and will panic if err is not nil.
func Must(v *Version, err error) *Version {
if err != nil {
panic(err)
}
return v
}
// Set parses and updates v from the given version string. Implements flag.Value
func (v *Version) Set(version string) error {
metadata := splitOff(&version, "+")
preRelease := PreRelease(splitOff(&version, "-"))
dotParts := strings.SplitN(version, ".", 3)
if len(dotParts) != 3 {
return fmt.Errorf("%s is not in dotted-tri format", version)
}
if err := validateIdentifier(string(preRelease)); err != nil {
return fmt.Errorf("failed to validate pre-release: %v", err)
}
if err := validateIdentifier(metadata); err != nil {
return fmt.Errorf("failed to validate metadata: %v", err)
}
parsed := make([]int64, 3, 3)
for i, v := range dotParts[:3] {
val, err := strconv.ParseInt(v, 10, 64)
parsed[i] = val
if err != nil {
return err
}
}
v.Metadata = metadata
v.PreRelease = preRelease
v.Major = parsed[0]
v.Minor = parsed[1]
v.Patch = parsed[2]
return nil
}
func (v Version) String() string {
var buffer bytes.Buffer
fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch)
if v.PreRelease != "" {
fmt.Fprintf(&buffer, "-%s", v.PreRelease)
}
if v.Metadata != "" {
fmt.Fprintf(&buffer, "+%s", v.Metadata)
}
return buffer.String()
}
func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error {
var data string
if err := unmarshal(&data); err != nil {
return err
}
return v.Set(data)
}
func (v Version) MarshalJSON() ([]byte, error) {
return []byte(`"` + v.String() + `"`), nil
}
func (v *Version) UnmarshalJSON(data []byte) error {
l := len(data)
if l == 0 || string(data) == `""` {
return nil
}
if l < 2 || data[0] != '"' || data[l-1] != '"' {
return errors.New("invalid semver string")
}
return v.Set(string(data[1 : l-1]))
}
// Compare tests if v is less than, equal to, or greater than versionB,
// returning -1, 0, or +1 respectively.
func (v Version) Compare(versionB Version) int {
if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 {
return cmp
}
return preReleaseCompare(v, versionB)
}
// Equal tests if v is equal to versionB.
func (v Version) Equal(versionB Version) bool {
return v.Compare(versionB) == 0
}
// LessThan tests if v is less than versionB.
func (v Version) LessThan(versionB Version) bool {
return v.Compare(versionB) < 0
}
// Slice converts the comparable parts of the semver into a slice of integers.
func (v Version) Slice() []int64 {
return []int64{v.Major, v.Minor, v.Patch}
}
func (p PreRelease) Slice() []string {
preRelease := string(p)
return strings.Split(preRelease, ".")
}
func preReleaseCompare(versionA Version, versionB Version) int {
a := versionA.PreRelease
b := versionB.PreRelease
/* Handle the case where if two versions are otherwise equal it is the
* one without a PreRelease that is greater */
if len(a) == 0 && (len(b) > 0) {
return 1
} else if len(b) == 0 && (len(a) > 0) {
return -1
}
// If there is a prerelease, check and compare each part.
return recursivePreReleaseCompare(a.Slice(), b.Slice())
}
func recursiveCompare(versionA []int64, versionB []int64) int {
if len(versionA) == 0 {
return 0
}
a := versionA[0]
b := versionB[0]
if a > b {
return 1
} else if a < b {
return -1
}
return recursiveCompare(versionA[1:], versionB[1:])
}
func recursivePreReleaseCompare(versionA []string, versionB []string) int {
// A larger set of pre-release fields has a higher precedence than a smaller set,
// if all of the preceding identifiers are equal.
if len(versionA) == 0 {
if len(versionB) > 0 {
return -1
}
return 0
} else if len(versionB) == 0 {
// We're longer than versionB so return 1.
return 1
}
a := versionA[0]
b := versionB[0]
aInt := false
bInt := false
aI, err := strconv.Atoi(versionA[0])
if err == nil {
aInt = true
}
bI, err := strconv.Atoi(versionB[0])
if err == nil {
bInt = true
}
// Numeric identifiers always have lower precedence than non-numeric identifiers.
if aInt && !bInt {
return -1
} else if !aInt && bInt {
return 1
}
// Handle Integer Comparison
if aInt && bInt {
if aI > bI {
return 1
} else if aI < bI {
return -1
}
}
// Handle String Comparison
if a > b {
return 1
} else if a < b {
return -1
}
return recursivePreReleaseCompare(versionA[1:], versionB[1:])
}
// BumpMajor increments the Major field by 1 and resets all other fields to their default values
func (v *Version) BumpMajor() {
v.Major += 1
v.Minor = 0
v.Patch = 0
v.PreRelease = PreRelease("")
v.Metadata = ""
}
// BumpMinor increments the Minor field by 1 and resets all other fields to their default values
func (v *Version) BumpMinor() {
v.Minor += 1
v.Patch = 0
v.PreRelease = PreRelease("")
v.Metadata = ""
}
// BumpPatch increments the Patch field by 1 and resets all other fields to their default values
func (v *Version) BumpPatch() {
v.Patch += 1
v.PreRelease = PreRelease("")
v.Metadata = ""
}
// validateIdentifier makes sure the provided identifier satisfies semver spec
func validateIdentifier(id string) error {
if id != "" && !reIdentifier.MatchString(id) {
return fmt.Errorf("%s is not a valid semver identifier", id)
}
return nil
}
// reIdentifier is a regular expression used to check that pre-release and metadata
// identifiers satisfy the spec requirements
var reIdentifier = regexp.MustCompile(`^[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*$`)

@ -0,0 +1,38 @@
// Copyright 2013-2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package semver
import (
"sort"
)
type Versions []*Version
func (s Versions) Len() int {
return len(s)
}
func (s Versions) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s Versions) Less(i, j int) bool {
return s[i].LessThan(*s[j])
}
// Sort sorts the given slice of Version
func Sort(versions []*Version) {
sort.Sort(Versions(versions))
}

@ -0,0 +1,225 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package journal provides write bindings to the local systemd journal.
// It is implemented in pure Go and connects to the journal directly over its
// unix socket.
//
// To read from the journal, see the "sdjournal" package, which wraps the
// sd-journal a C API.
//
// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html
package journal
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"strconv"
"strings"
"sync"
"sync/atomic"
"syscall"
"unsafe"
)
// Priority of a journal message
type Priority int
const (
PriEmerg Priority = iota
PriAlert
PriCrit
PriErr
PriWarning
PriNotice
PriInfo
PriDebug
)
var (
// This can be overridden at build-time:
// https://github.com/golang/go/wiki/GcToolchainTricks#including-build-information-in-the-executable
journalSocket = "/run/systemd/journal/socket"
// unixConnPtr atomically holds the local unconnected Unix-domain socket.
// Concrete safe pointer type: *net.UnixConn
unixConnPtr unsafe.Pointer
// onceConn ensures that unixConnPtr is initialized exactly once.
onceConn sync.Once
)
func init() {
onceConn.Do(initConn)
}
// Enabled checks whether the local systemd journal is available for logging.
func Enabled() bool {
onceConn.Do(initConn)
if (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) == nil {
return false
}
if _, err := net.Dial("unixgram", journalSocket); err != nil {
return false
}
return true
}
// Send a message to the local systemd journal. vars is a map of journald
// fields to values. Fields must be composed of uppercase letters, numbers,
// and underscores, but must not start with an underscore. Within these
// restrictions, any arbitrary field name may be used. Some names have special
// significance: see the journalctl documentation
// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html)
// for more details. vars may be nil.
func Send(message string, priority Priority, vars map[string]string) error {
conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr))
if conn == nil {
return errors.New("could not initialize socket to journald")
}
socketAddr := &net.UnixAddr{
Name: journalSocket,
Net: "unixgram",
}
data := new(bytes.Buffer)
appendVariable(data, "PRIORITY", strconv.Itoa(int(priority)))
appendVariable(data, "MESSAGE", message)
for k, v := range vars {
appendVariable(data, k, v)
}
_, _, err := conn.WriteMsgUnix(data.Bytes(), nil, socketAddr)
if err == nil {
return nil
}
if !isSocketSpaceError(err) {
return err
}
// Large log entry, send it via tempfile and ancillary-fd.
file, err := tempFd()
if err != nil {
return err
}
defer file.Close()
_, err = io.Copy(file, data)
if err != nil {
return err
}
rights := syscall.UnixRights(int(file.Fd()))
_, _, err = conn.WriteMsgUnix([]byte{}, rights, socketAddr)
if err != nil {
return err
}
return nil
}
// Print prints a message to the local systemd journal using Send().
func Print(priority Priority, format string, a ...interface{}) error {
return Send(fmt.Sprintf(format, a...), priority, nil)
}
func appendVariable(w io.Writer, name, value string) {
if err := validVarName(name); err != nil {
fmt.Fprintf(os.Stderr, "variable name %s contains invalid character, ignoring\n", name)
}
if strings.ContainsRune(value, '\n') {
/* When the value contains a newline, we write:
* - the variable name, followed by a newline
* - the size (in 64bit little endian format)
* - the data, followed by a newline
*/
fmt.Fprintln(w, name)
binary.Write(w, binary.LittleEndian, uint64(len(value)))
fmt.Fprintln(w, value)
} else {
/* just write the variable and value all on one line */
fmt.Fprintf(w, "%s=%s\n", name, value)
}
}
// validVarName validates a variable name to make sure journald will accept it.
// The variable name must be in uppercase and consist only of characters,
// numbers and underscores, and may not begin with an underscore:
// https://www.freedesktop.org/software/systemd/man/sd_journal_print.html
func validVarName(name string) error {
if name == "" {
return errors.New("Empty variable name")
} else if name[0] == '_' {
return errors.New("Variable name begins with an underscore")
}
for _, c := range name {
if !(('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_') {
return errors.New("Variable name contains invalid characters")
}
}
return nil
}
// isSocketSpaceError checks whether the error is signaling
// an "overlarge message" condition.
func isSocketSpaceError(err error) bool {
opErr, ok := err.(*net.OpError)
if !ok || opErr == nil {
return false
}
sysErr, ok := opErr.Err.(*os.SyscallError)
if !ok || sysErr == nil {
return false
}
return sysErr.Err == syscall.EMSGSIZE || sysErr.Err == syscall.ENOBUFS
}
// tempFd creates a temporary, unlinked file under `/dev/shm`.
func tempFd() (*os.File, error) {
file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX")
if err != nil {
return nil, err
}
err = syscall.Unlink(file.Name())
if err != nil {
return nil, err
}
return file, nil
}
// initConn initializes the global `unixConnPtr` socket.
// It is meant to be called exactly once, at program startup.
func initConn() {
autobind, err := net.ResolveUnixAddr("unixgram", "")
if err != nil {
return
}
sock, err := net.ListenUnixgram("unixgram", autobind)
if err != nil {
return
}
atomic.StorePointer(&unixConnPtr, unsafe.Pointer(sock))
}

@ -0,0 +1,39 @@
# capnslog, the CoreOS logging package
There are far too many logging packages out there, with varying degrees of licenses, far too many features (colorization, all sorts of log frameworks) or are just a pain to use (lack of `Fatalln()`?).
capnslog provides a simple but consistent logging interface suitable for all kinds of projects.
### Design Principles
##### `package main` is the place where logging gets turned on and routed
A library should not touch log options, only generate log entries. Libraries are silent until main lets them speak.
##### All log options are runtime-configurable.
Still the job of `main` to expose these configurations. `main` may delegate this to, say, a configuration webhook, but does so explicitly.
##### There is one log object per package. It is registered under its repository and package name.
`main` activates logging for its repository and any dependency repositories it would also like to have output in its logstream. `main` also dictates at which level each subpackage logs.
##### There is *one* output stream, and it is an `io.Writer` composed with a formatter.
Splitting streams is probably not the job of your program, but rather, your log aggregation framework. If you must split output streams, again, `main` configures this and you can write a very simple two-output struct that satisfies io.Writer.
Fancy colorful formatting and JSON output are beyond the scope of a basic logging framework -- they're application/log-collector dependent. These are, at best, provided as options, but more likely, provided by your application.
##### Log objects are an interface
An object knows best how to print itself. Log objects can collect more interesting metadata if they wish, however, because text isn't going away anytime soon, they must all be marshalable to text. The simplest log object is a string, which returns itself. If you wish to do more fancy tricks for printing your log objects, see also JSON output -- introspect and write a formatter which can handle your advanced log interface. Making strings is the only thing guaranteed.
##### Log levels have specific meanings:
* Critical: Unrecoverable. Must fail.
* Error: Data has been lost, a request has failed for a bad reason, or a required resource has been lost
* Warning: (Hopefully) Temporary conditions that may cause errors, but may work fine. A replica disappearing (that may reconnect) is a warning.
* Notice: Normal, but important (uncommon) log information.
* Info: Normal, working log information, everything is fine, but helpful notices for auditing or common operations.
* Debug: Everything is still fine, but even common operations may be logged, and less helpful but more quantity of notices.
* Trace: Anything goes, from logging every function call as part of a common operation, to tracing execution of a query.

@ -0,0 +1,157 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package capnslog
import (
"bufio"
"fmt"
"io"
"log"
"runtime"
"strings"
"time"
)
type Formatter interface {
Format(pkg string, level LogLevel, depth int, entries ...interface{})
Flush()
}
func NewStringFormatter(w io.Writer) Formatter {
return &StringFormatter{
w: bufio.NewWriter(w),
}
}
type StringFormatter struct {
w *bufio.Writer
}
func (s *StringFormatter) Format(pkg string, l LogLevel, i int, entries ...interface{}) {
now := time.Now().UTC()
s.w.WriteString(now.Format(time.RFC3339))
s.w.WriteByte(' ')
writeEntries(s.w, pkg, l, i, entries...)
s.Flush()
}
func writeEntries(w *bufio.Writer, pkg string, _ LogLevel, _ int, entries ...interface{}) {
if pkg != "" {
w.WriteString(pkg + ": ")
}
str := fmt.Sprint(entries...)
endsInNL := strings.HasSuffix(str, "\n")
w.WriteString(str)
if !endsInNL {
w.WriteString("\n")
}
}
func (s *StringFormatter) Flush() {
s.w.Flush()
}
func NewPrettyFormatter(w io.Writer, debug bool) Formatter {
return &PrettyFormatter{
w: bufio.NewWriter(w),
debug: debug,
}
}
type PrettyFormatter struct {
w *bufio.Writer
debug bool
}
func (c *PrettyFormatter) Format(pkg string, l LogLevel, depth int, entries ...interface{}) {
now := time.Now()
ts := now.Format("2006-01-02 15:04:05")
c.w.WriteString(ts)
ms := now.Nanosecond() / 1000
c.w.WriteString(fmt.Sprintf(".%06d", ms))
if c.debug {
_, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call.
if !ok {
file = "???"
line = 1
} else {
slash := strings.LastIndex(file, "/")
if slash >= 0 {
file = file[slash+1:]
}
}
if line < 0 {
line = 0 // not a real line number
}
c.w.WriteString(fmt.Sprintf(" [%s:%d]", file, line))
}
c.w.WriteString(fmt.Sprint(" ", l.Char(), " | "))
writeEntries(c.w, pkg, l, depth, entries...)
c.Flush()
}
func (c *PrettyFormatter) Flush() {
c.w.Flush()
}
// LogFormatter emulates the form of the traditional built-in logger.
type LogFormatter struct {
logger *log.Logger
prefix string
}
// NewLogFormatter is a helper to produce a new LogFormatter struct. It uses the
// golang log package to actually do the logging work so that logs look similar.
func NewLogFormatter(w io.Writer, prefix string, flag int) Formatter {
return &LogFormatter{
logger: log.New(w, "", flag), // don't use prefix here
prefix: prefix, // save it instead
}
}
// Format builds a log message for the LogFormatter. The LogLevel is ignored.
func (lf *LogFormatter) Format(pkg string, _ LogLevel, _ int, entries ...interface{}) {
str := fmt.Sprint(entries...)
prefix := lf.prefix
if pkg != "" {
prefix = fmt.Sprintf("%s%s: ", prefix, pkg)
}
lf.logger.Output(5, fmt.Sprintf("%s%v", prefix, str)) // call depth is 5
}
// Flush is included so that the interface is complete, but is a no-op.
func (lf *LogFormatter) Flush() {
// noop
}
// NilFormatter is a no-op log formatter that does nothing.
type NilFormatter struct {
}
// NewNilFormatter is a helper to produce a new LogFormatter struct. It logs no
// messages so that you can cause part of your logging to be silent.
func NewNilFormatter() Formatter {
return &NilFormatter{}
}
// Format does nothing.
func (_ *NilFormatter) Format(_ string, _ LogLevel, _ int, _ ...interface{}) {
// noop
}
// Flush is included so that the interface is complete, but is a no-op.
func (_ *NilFormatter) Flush() {
// noop
}

@ -0,0 +1,96 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package capnslog
import (
"bufio"
"bytes"
"io"
"os"
"runtime"
"strconv"
"strings"
"time"
)
var pid = os.Getpid()
type GlogFormatter struct {
StringFormatter
}
func NewGlogFormatter(w io.Writer) *GlogFormatter {
g := &GlogFormatter{}
g.w = bufio.NewWriter(w)
return g
}
func (g GlogFormatter) Format(pkg string, level LogLevel, depth int, entries ...interface{}) {
g.w.Write(GlogHeader(level, depth+1))
g.StringFormatter.Format(pkg, level, depth+1, entries...)
}
func GlogHeader(level LogLevel, depth int) []byte {
// Lmmdd hh:mm:ss.uuuuuu threadid file:line]
now := time.Now().UTC()
_, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call.
if !ok {
file = "???"
line = 1
} else {
slash := strings.LastIndex(file, "/")
if slash >= 0 {
file = file[slash+1:]
}
}
if line < 0 {
line = 0 // not a real line number
}
buf := &bytes.Buffer{}
buf.Grow(30)
_, month, day := now.Date()
hour, minute, second := now.Clock()
buf.WriteString(level.Char())
twoDigits(buf, int(month))
twoDigits(buf, day)
buf.WriteByte(' ')
twoDigits(buf, hour)
buf.WriteByte(':')
twoDigits(buf, minute)
buf.WriteByte(':')
twoDigits(buf, second)
buf.WriteByte('.')
buf.WriteString(strconv.Itoa(now.Nanosecond() / 1000))
buf.WriteByte('Z')
buf.WriteByte(' ')
buf.WriteString(strconv.Itoa(pid))
buf.WriteByte(' ')
buf.WriteString(file)
buf.WriteByte(':')
buf.WriteString(strconv.Itoa(line))
buf.WriteByte(']')
buf.WriteByte(' ')
return buf.Bytes()
}
const digits = "0123456789"
func twoDigits(b *bytes.Buffer, d int) {
c2 := digits[d%10]
d /= 10
c1 := digits[d%10]
b.WriteByte(c1)
b.WriteByte(c2)
}

@ -0,0 +1,49 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// +build !windows
package capnslog
import (
"io"
"os"
"syscall"
)
// Here's where the opinionation comes in. We need some sensible defaults,
// especially after taking over the log package. Your project (whatever it may
// be) may see things differently. That's okay; there should be no defaults in
// the main package that cannot be controlled or overridden programatically,
// otherwise it's a bug. Doing so is creating your own init_log.go file much
// like this one.
func init() {
initHijack()
// Go `log` pacakge uses os.Stderr.
SetFormatter(NewDefaultFormatter(os.Stderr))
SetGlobalLogLevel(INFO)
}
func NewDefaultFormatter(out io.Writer) Formatter {
if syscall.Getppid() == 1 {
// We're running under init, which may be systemd.
f, err := NewJournaldFormatter()
if err == nil {
return f
}
}
return NewPrettyFormatter(out, false)
}

@ -0,0 +1,25 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package capnslog
import "os"
func init() {
initHijack()
// Go `log` package uses os.Stderr.
SetFormatter(NewPrettyFormatter(os.Stderr, false))
SetGlobalLogLevel(INFO)
}

@ -0,0 +1,68 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// +build !windows
package capnslog
import (
"errors"
"fmt"
"os"
"path/filepath"
"github.com/coreos/go-systemd/journal"
)
func NewJournaldFormatter() (Formatter, error) {
if !journal.Enabled() {
return nil, errors.New("No systemd detected")
}
return &journaldFormatter{}, nil
}
type journaldFormatter struct{}
func (j *journaldFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) {
var pri journal.Priority
switch l {
case CRITICAL:
pri = journal.PriCrit
case ERROR:
pri = journal.PriErr
case WARNING:
pri = journal.PriWarning
case NOTICE:
pri = journal.PriNotice
case INFO:
pri = journal.PriInfo
case DEBUG:
pri = journal.PriDebug
case TRACE:
pri = journal.PriDebug
default:
panic("Unhandled loglevel")
}
msg := fmt.Sprint(entries...)
tags := map[string]string{
"PACKAGE": pkg,
"SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]),
}
err := journal.Send(msg, pri, tags)
if err != nil {
fmt.Fprintln(os.Stderr, err)
}
}
func (j *journaldFormatter) Flush() {}

@ -0,0 +1,39 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package capnslog
import (
"log"
)
func initHijack() {
pkg := NewPackageLogger("log", "")
w := packageWriter{pkg}
log.SetFlags(0)
log.SetPrefix("")
log.SetOutput(w)
}
type packageWriter struct {
pl *PackageLogger
}
func (p packageWriter) Write(b []byte) (int, error) {
if p.pl.level < INFO {
return 0, nil
}
p.pl.internalLog(calldepth+2, INFO, string(b))
return len(b), nil
}

@ -0,0 +1,245 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package capnslog
import (
"errors"
"strings"
"sync"
)
// LogLevel is the set of all log levels.
type LogLevel int8
const (
// CRITICAL is the lowest log level; only errors which will end the program will be propagated.
CRITICAL LogLevel = iota - 1
// ERROR is for errors that are not fatal but lead to troubling behavior.
ERROR
// WARNING is for errors which are not fatal and not errors, but are unusual. Often sourced from misconfigurations.
WARNING
// NOTICE is for normal but significant conditions.
NOTICE
// INFO is a log level for common, everyday log updates.
INFO
// DEBUG is the default hidden level for more verbose updates about internal processes.
DEBUG
// TRACE is for (potentially) call by call tracing of programs.
TRACE
)
// Char returns a single-character representation of the log level.
func (l LogLevel) Char() string {
switch l {
case CRITICAL:
return "C"
case ERROR:
return "E"
case WARNING:
return "W"
case NOTICE:
return "N"
case INFO:
return "I"
case DEBUG:
return "D"
case TRACE:
return "T"
default:
panic("Unhandled loglevel")
}
}
// String returns a multi-character representation of the log level.
func (l LogLevel) String() string {
switch l {
case CRITICAL:
return "CRITICAL"
case ERROR:
return "ERROR"
case WARNING:
return "WARNING"
case NOTICE:
return "NOTICE"
case INFO:
return "INFO"
case DEBUG:
return "DEBUG"
case TRACE:
return "TRACE"
default:
panic("Unhandled loglevel")
}
}
// Update using the given string value. Fulfills the flag.Value interface.
func (l *LogLevel) Set(s string) error {
value, err := ParseLevel(s)
if err != nil {
return err
}
*l = value
return nil
}
// Returns an empty string, only here to fulfill the pflag.Value interface.
func (l *LogLevel) Type() string {
return ""
}
// ParseLevel translates some potential loglevel strings into their corresponding levels.
func ParseLevel(s string) (LogLevel, error) {
switch s {
case "CRITICAL", "C":
return CRITICAL, nil
case "ERROR", "0", "E":
return ERROR, nil
case "WARNING", "1", "W":
return WARNING, nil
case "NOTICE", "2", "N":
return NOTICE, nil
case "INFO", "3", "I":
return INFO, nil
case "DEBUG", "4", "D":
return DEBUG, nil
case "TRACE", "5", "T":
return TRACE, nil
}
return CRITICAL, errors.New("couldn't parse log level " + s)
}
type RepoLogger map[string]*PackageLogger
type loggerStruct struct {
sync.Mutex
repoMap map[string]RepoLogger
formatter Formatter
}
// logger is the global logger
var logger = new(loggerStruct)
// SetGlobalLogLevel sets the log level for all packages in all repositories
// registered with capnslog.
func SetGlobalLogLevel(l LogLevel) {
logger.Lock()
defer logger.Unlock()
for _, r := range logger.repoMap {
r.setRepoLogLevelInternal(l)
}
}
// GetRepoLogger may return the handle to the repository's set of packages' loggers.
func GetRepoLogger(repo string) (RepoLogger, error) {
logger.Lock()
defer logger.Unlock()
r, ok := logger.repoMap[repo]
if !ok {
return nil, errors.New("no packages registered for repo " + repo)
}
return r, nil
}
// MustRepoLogger returns the handle to the repository's packages' loggers.
func MustRepoLogger(repo string) RepoLogger {
r, err := GetRepoLogger(repo)
if err != nil {
panic(err)
}
return r
}
// SetRepoLogLevel sets the log level for all packages in the repository.
func (r RepoLogger) SetRepoLogLevel(l LogLevel) {
logger.Lock()
defer logger.Unlock()
r.setRepoLogLevelInternal(l)
}
func (r RepoLogger) setRepoLogLevelInternal(l LogLevel) {
for _, v := range r {
v.level = l
}
}
// ParseLogLevelConfig parses a comma-separated string of "package=loglevel", in
// order, and returns a map of the results, for use in SetLogLevel.
func (r RepoLogger) ParseLogLevelConfig(conf string) (map[string]LogLevel, error) {
setlist := strings.Split(conf, ",")
out := make(map[string]LogLevel)
for _, setstring := range setlist {
setting := strings.Split(setstring, "=")
if len(setting) != 2 {
return nil, errors.New("oddly structured `pkg=level` option: " + setstring)
}
l, err := ParseLevel(setting[1])
if err != nil {
return nil, err
}
out[setting[0]] = l
}
return out, nil
}
// SetLogLevel takes a map of package names within a repository to their desired
// loglevel, and sets the levels appropriately. Unknown packages are ignored.
// "*" is a special package name that corresponds to all packages, and will be
// processed first.
func (r RepoLogger) SetLogLevel(m map[string]LogLevel) {
logger.Lock()
defer logger.Unlock()
if l, ok := m["*"]; ok {
r.setRepoLogLevelInternal(l)
}
for k, v := range m {
l, ok := r[k]
if !ok {
continue
}
l.level = v
}
}
// SetFormatter sets the formatting function for all logs.
func SetFormatter(f Formatter) {
logger.Lock()
defer logger.Unlock()
logger.formatter = f
}
// NewPackageLogger creates a package logger object.
// This should be defined as a global var in your package, referencing your repo.
func NewPackageLogger(repo string, pkg string) (p *PackageLogger) {
logger.Lock()
defer logger.Unlock()
if logger.repoMap == nil {
logger.repoMap = make(map[string]RepoLogger)
}
r, rok := logger.repoMap[repo]
if !rok {
logger.repoMap[repo] = make(RepoLogger)
r = logger.repoMap[repo]
}
p, pok := r[pkg]
if !pok {
r[pkg] = &PackageLogger{
pkg: pkg,
level: INFO,
}
p = r[pkg]
}
return
}

@ -0,0 +1,191 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package capnslog
import (
"fmt"
"os"
)
type PackageLogger struct {
pkg string
level LogLevel
}
const calldepth = 2
func (p *PackageLogger) internalLog(depth int, inLevel LogLevel, entries ...interface{}) {
logger.Lock()
defer logger.Unlock()
if inLevel != CRITICAL && p.level < inLevel {
return
}
if logger.formatter != nil {
logger.formatter.Format(p.pkg, inLevel, depth+1, entries...)
}
}
// SetLevel allows users to change the current logging level.
func (p *PackageLogger) SetLevel(l LogLevel) {
logger.Lock()
defer logger.Unlock()
p.level = l
}
// LevelAt checks if the given log level will be outputted under current setting.
func (p *PackageLogger) LevelAt(l LogLevel) bool {
logger.Lock()
defer logger.Unlock()
return p.level >= l
}
// Log a formatted string at any level between ERROR and TRACE
func (p *PackageLogger) Logf(l LogLevel, format string, args ...interface{}) {
p.internalLog(calldepth, l, fmt.Sprintf(format, args...))
}
// Log a message at any level between ERROR and TRACE
func (p *PackageLogger) Log(l LogLevel, args ...interface{}) {
p.internalLog(calldepth, l, fmt.Sprint(args...))
}
// log stdlib compatibility
func (p *PackageLogger) Println(args ...interface{}) {
p.internalLog(calldepth, INFO, fmt.Sprintln(args...))
}
func (p *PackageLogger) Printf(format string, args ...interface{}) {
p.Logf(INFO, format, args...)
}
func (p *PackageLogger) Print(args ...interface{}) {
p.internalLog(calldepth, INFO, fmt.Sprint(args...))
}
// Panic and fatal
func (p *PackageLogger) Panicf(format string, args ...interface{}) {
s := fmt.Sprintf(format, args...)
p.internalLog(calldepth, CRITICAL, s)
panic(s)
}
func (p *PackageLogger) Panic(args ...interface{}) {
s := fmt.Sprint(args...)
p.internalLog(calldepth, CRITICAL, s)
panic(s)
}
func (p *PackageLogger) Panicln(args ...interface{}) {
s := fmt.Sprintln(args...)
p.internalLog(calldepth, CRITICAL, s)
panic(s)
}
func (p *PackageLogger) Fatalf(format string, args ...interface{}) {
p.Logf(CRITICAL, format, args...)
os.Exit(1)
}
func (p *PackageLogger) Fatal(args ...interface{}) {
s := fmt.Sprint(args...)
p.internalLog(calldepth, CRITICAL, s)
os.Exit(1)
}
func (p *PackageLogger) Fatalln(args ...interface{}) {
s := fmt.Sprintln(args...)
p.internalLog(calldepth, CRITICAL, s)
os.Exit(1)
}
// Error Functions
func (p *PackageLogger) Errorf(format string, args ...interface{}) {
p.Logf(ERROR, format, args...)
}
func (p *PackageLogger) Error(entries ...interface{}) {
p.internalLog(calldepth, ERROR, entries...)
}
// Warning Functions
func (p *PackageLogger) Warningf(format string, args ...interface{}) {
p.Logf(WARNING, format, args...)
}
func (p *PackageLogger) Warning(entries ...interface{}) {
p.internalLog(calldepth, WARNING, entries...)
}
// Notice Functions
func (p *PackageLogger) Noticef(format string, args ...interface{}) {
p.Logf(NOTICE, format, args...)
}
func (p *PackageLogger) Notice(entries ...interface{}) {
p.internalLog(calldepth, NOTICE, entries...)
}
// Info Functions
func (p *PackageLogger) Infof(format string, args ...interface{}) {
p.Logf(INFO, format, args...)
}
func (p *PackageLogger) Info(entries ...interface{}) {
p.internalLog(calldepth, INFO, entries...)
}
// Debug Functions
func (p *PackageLogger) Debugf(format string, args ...interface{}) {
if p.level < DEBUG {
return
}
p.Logf(DEBUG, format, args...)
}
func (p *PackageLogger) Debug(entries ...interface{}) {
if p.level < DEBUG {
return
}
p.internalLog(calldepth, DEBUG, entries...)
}
// Trace Functions
func (p *PackageLogger) Tracef(format string, args ...interface{}) {
if p.level < TRACE {
return
}
p.Logf(TRACE, format, args...)
}
func (p *PackageLogger) Trace(entries ...interface{}) {
if p.level < TRACE {
return
}
p.internalLog(calldepth, TRACE, entries...)
}
func (p *PackageLogger) Flush() {
logger.Lock()
defer logger.Unlock()
logger.formatter.Flush()
}

@ -0,0 +1,65 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// +build !windows
package capnslog
import (
"fmt"
"log/syslog"
)
func NewSyslogFormatter(w *syslog.Writer) Formatter {
return &syslogFormatter{w}
}
func NewDefaultSyslogFormatter(tag string) (Formatter, error) {
w, err := syslog.New(syslog.LOG_DEBUG, tag)
if err != nil {
return nil, err
}
return NewSyslogFormatter(w), nil
}
type syslogFormatter struct {
w *syslog.Writer
}
func (s *syslogFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) {
for _, entry := range entries {
str := fmt.Sprint(entry)
switch l {
case CRITICAL:
s.w.Crit(str)
case ERROR:
s.w.Err(str)
case WARNING:
s.w.Warning(str)
case NOTICE:
s.w.Notice(str)
case INFO:
s.w.Info(str)
case DEBUG:
s.w.Debug(str)
case TRACE:
s.w.Debug(str)
default:
panic("Unhandled loglevel")
}
}
}
func (s *syslogFormatter) Flush() {
}

@ -1,4 +1,3 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
@ -176,7 +175,18 @@
END OF TERMS AND CONDITIONS
Copyright 2014-2017 Weaveworks Ltd.
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

@ -2,12 +2,14 @@ package aws
import (
"context"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface"
"github.com/go-kit/kit/log/level"
"github.com/pkg/errors"
"golang.org/x/time/rate"
"github.com/cortexproject/cortex/pkg/chunk"
@ -182,7 +184,10 @@ func (d dynamoTableClient) CreateTable(ctx context.Context, desc chunk.TableDesc
ResourceArn: tableARN,
Tags: tags,
})
return err
if relevantError(err) {
return err
}
return nil
})
})
}
@ -247,14 +252,14 @@ func (d dynamoTableClient) DescribeTable(ctx context.Context, name string) (desc
out, err := d.DynamoDB.ListTagsOfResourceWithContext(ctx, &dynamodb.ListTagsOfResourceInput{
ResourceArn: tableARN,
})
if err != nil {
if relevantError(err) {
return err
}
desc.Tags = make(map[string]string, len(out.Tags))
for _, tag := range out.Tags {
desc.Tags[*tag.Key] = *tag.Value
}
return err
return nil
})
})
@ -264,6 +269,18 @@ func (d dynamoTableClient) DescribeTable(ctx context.Context, name string) (desc
return
}
// Filter out errors that we don't want to see
// (currently only relevant in integration tests)
func relevantError(err error) bool {
if err == nil {
return false
}
if strings.Contains(err.Error(), "Tagging is not currently supported in DynamoDB Local.") {
return false
}
return true
}
func (d dynamoTableClient) UpdateTable(ctx context.Context, current, expected chunk.TableDesc) error {
if d.autoscale != nil {
err := d.autoscale.UpdateTable(ctx, current, &expected)
@ -355,7 +372,10 @@ func (d dynamoTableClient) UpdateTable(ctx context.Context, current, expected ch
ResourceArn: tableARN,
Tags: chunkTagsToDynamoDB(expected.Tags),
})
return err
if relevantError(err) {
return errors.Wrap(err, "applying tags")
}
return nil
})
})
}

@ -44,6 +44,7 @@ type MetricsAutoScalingConfig struct {
URL string // URL to contact Prometheus store on
TargetQueueLen int64 // Queue length above which we will scale up capacity
ScaleUpFactor float64 // Scale up capacity by this multiple
MinThrottling float64 // Ignore throttling below this level
QueueLengthQuery string // Promql query to fetch ingester queue length
ThrottleQuery string // Promql query to fetch throttle rate per table
UsageQuery string // Promql query to fetch write capacity usage per table
@ -58,6 +59,7 @@ func (cfg *MetricsAutoScalingConfig) RegisterFlags(f *flag.FlagSet) {
f.StringVar(&cfg.URL, "metrics.url", "", "Use metrics-based autoscaling, via this query URL")
f.Int64Var(&cfg.TargetQueueLen, "metrics.target-queue-length", 100000, "Queue length above which we will scale up capacity")
f.Float64Var(&cfg.ScaleUpFactor, "metrics.scale-up-factor", 1.3, "Scale up capacity by this multiple")
f.Float64Var(&cfg.MinThrottling, "metrics.ignore-throttle-below", 1, "Ignore throttling below this level (rate per second)")
f.StringVar(&cfg.QueueLengthQuery, "metrics.queue-length-query", defaultQueueLenQuery, "query to fetch ingester queue length")
f.StringVar(&cfg.ThrottleQuery, "metrics.write-throttle-query", defaultThrottleRateQuery, "query to fetch throttle rates per table")
f.StringVar(&cfg.UsageQuery, "metrics.usage-query", defaultUsageQuery, "query to fetch write capacity usage per table")
@ -146,7 +148,7 @@ func (m *metricsData) UpdateTable(ctx context.Context, current chunk.TableDesc,
"write",
m.usageRates)
case throttleRate > 0 && m.queueLengths[2] > float64(m.cfg.TargetQueueLen)*targetMax:
// Too big queue, some throttling -> scale up
// Too big queue, some throttling -> scale up (note we don't apply MinThrottling in this case)
expected.ProvisionedWrite = scaleUp(current.Name,
current.ProvisionedWrite,
expected.WriteScale.MaxCapacity,
@ -155,7 +157,7 @@ func (m *metricsData) UpdateTable(ctx context.Context, current chunk.TableDesc,
expected.WriteScale.OutCooldown,
"metrics max queue scale-up",
"write")
case throttleRate > 0 &&
case throttleRate > m.cfg.MinThrottling &&
m.queueLengths[2] > float64(m.cfg.TargetQueueLen) &&
m.queueLengths[2] > m.queueLengths[1] && m.queueLengths[1] > m.queueLengths[0]:
// Growing queue, some throttling -> scale up
@ -363,10 +365,19 @@ func promQuery(ctx context.Context, promAPI promV1.API, query string, duration,
Step: step,
}
value, err := promAPI.QueryRange(ctx, query, queryRange)
value, wrngs, err := promAPI.QueryRange(ctx, query, queryRange)
if err != nil {
return nil, err
}
if wrngs != nil {
level.Warn(util.Logger).Log(
"query", query,
"start", queryRange.Start,
"end", queryRange.End,
"step", queryRange.Step,
"warnings", wrngs,
)
}
matrix, ok := value.(model.Matrix)
if !ok {
return nil, fmt.Errorf("Unable to convert value to matrix: %#v", value)

@ -7,6 +7,12 @@ import (
)
// Cache byte arrays by key.
//
// NB we intentionally do not return errors in this interface - caching is best
// effort by definition. We found that when these methods did return errors,
// the caller would just log them - so its easier for implementation to do that.
// Whatsmore, we found partially successful Fetchs were often treated as failed
// when they returned an error.
type Cache interface {
Store(ctx context.Context, key []string, buf [][]byte)
Fetch(ctx context.Context, keys []string) (found []string, bufs [][]byte, missing []string)

@ -78,7 +78,6 @@ type FifoCache struct {
// indexes into entries to identify the most recent and least recent entry.
first, last int
name string
entriesAdded prometheus.Counter
entriesAddedNew prometheus.Counter
entriesEvicted prometheus.Counter
@ -95,6 +94,7 @@ type cacheEntry struct {
}
// NewFifoCache returns a new initialised FifoCache of size.
// TODO(bwplotka): Fix metrics, get them out of globals, separate or allow prefixing.
func NewFifoCache(name string, cfg FifoCacheConfig) *FifoCache {
return &FifoCache{
size: cfg.Size,
@ -102,7 +102,7 @@ func NewFifoCache(name string, cfg FifoCacheConfig) *FifoCache {
entries: make([]cacheEntry, 0, cfg.Size),
index: make(map[string]int, cfg.Size),
name: name,
// TODO(bwplotka): There might be simple cache.Cache wrapper for those.
entriesAdded: cacheEntriesAdded.WithLabelValues(name),
entriesAddedNew: cacheEntriesAddedNew.WithLabelValues(name),
entriesEvicted: cacheEntriesEvicted.WithLabelValues(name),

@ -29,31 +29,51 @@ var (
Name: "cache_hits",
Help: "Total count of keys found in cache.",
}, []string{"name"})
valueSize = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: "cortex",
Name: "cache_value_size_bytes",
Help: "Size of values in the cache.",
// Cached chunks are generally in the KBs, but cached index can
// get big. Histogram goes from 1KB to 4MB.
// 1024 * 4^(7-1) = 4MB
Buckets: prometheus.ExponentialBuckets(1024, 4, 7),
}, []string{"name", "method"})
)
func init() {
requestDuration.Register()
prometheus.MustRegister(fetchedKeys)
prometheus.MustRegister(hits)
prometheus.MustRegister(valueSize)
}
// Instrument returns an instrumented cache.
func Instrument(name string, cache Cache) Cache {
return &instrumentedCache{
name: name,
fetchedKeys: fetchedKeys.WithLabelValues(name),
hits: hits.WithLabelValues(name),
Cache: cache,
name: name,
Cache: cache,
fetchedKeys: fetchedKeys.WithLabelValues(name),
hits: hits.WithLabelValues(name),
storedValueSize: valueSize.WithLabelValues(name, "store"),
fetchedValueSize: valueSize.WithLabelValues(name, "fetch"),
}
}
type instrumentedCache struct {
name string
fetchedKeys, hits prometheus.Counter
name string
Cache
fetchedKeys, hits prometheus.Counter
storedValueSize, fetchedValueSize prometheus.Observer
}
func (i *instrumentedCache) Store(ctx context.Context, keys []string, bufs [][]byte) {
for j := range bufs {
i.storedValueSize.Observe(float64(len(bufs[j])))
}
method := i.name + ".store"
instr.CollectedRequest(ctx, method, requestDuration, instr.ErrorCode, func(ctx context.Context) error {
sp := ot.SpanFromContext(ctx)
@ -82,6 +102,10 @@ func (i *instrumentedCache) Fetch(ctx context.Context, keys []string) ([]string,
i.fetchedKeys.Add(float64(len(keys)))
i.hits.Add(float64(len(found)))
for j := range bufs {
i.fetchedValueSize.Observe(float64(len(bufs[j])))
}
return found, bufs, missing
}

@ -65,7 +65,9 @@ type Memcached struct {
inputCh chan *work
}
// NewMemcached makes a new Memcache
// NewMemcached makes a new Memcache.
// TODO(bwplotka): Fix metrics, get them out of globals, separate or allow prefixing.
// TODO(bwplotka): Remove globals & util packages from cache package entirely (e.g util.Logger).
func NewMemcached(cfg MemcachedConfig, client MemcachedClient, name string) *Memcached {
c := &Memcached{
cfg: cfg,

@ -19,11 +19,16 @@ type MemcachedClient interface {
Set(item *memcache.Item) error
}
type serverSelector interface {
memcache.ServerSelector
SetServers(servers ...string) error
}
// memcachedClient is a memcache client that gets its server list from SRV
// records, and periodically updates that ServerList.
type memcachedClient struct {
*memcache.Client
serverList *memcache.ServerList
serverList serverSelector
hostname string
service string
@ -38,6 +43,7 @@ type MemcachedClientConfig struct {
Timeout time.Duration `yaml:"timeout,omitempty"`
MaxIdleConns int `yaml:"max_idle_conns,omitempty"`
UpdateInterval time.Duration `yaml:"update_interval,omitempty"`
ConsistentHash bool `yaml:"consistent_hash,omitempty"`
}
// RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet
@ -47,19 +53,26 @@ func (cfg *MemcachedClientConfig) RegisterFlagsWithPrefix(prefix, description st
f.IntVar(&cfg.MaxIdleConns, prefix+"memcached.max-idle-conns", 16, description+"Maximum number of idle connections in pool.")
f.DurationVar(&cfg.Timeout, prefix+"memcached.timeout", 100*time.Millisecond, description+"Maximum time to wait before giving up on memcached requests.")
f.DurationVar(&cfg.UpdateInterval, prefix+"memcached.update-interval", 1*time.Minute, description+"Period with which to poll DNS for memcache servers.")
f.BoolVar(&cfg.ConsistentHash, prefix+"memcached.consistent-hash", false, description+"Use consistent hashing to distribute to memcache servers.")
}
// NewMemcachedClient creates a new MemcacheClient that gets its server list
// from SRV and updates the server list on a regular basis.
func NewMemcachedClient(cfg MemcachedClientConfig) MemcachedClient {
var servers memcache.ServerList
client := memcache.NewFromSelector(&servers)
var selector serverSelector
if cfg.ConsistentHash {
selector = &MemcachedJumpHashSelector{}
} else {
selector = &memcache.ServerList{}
}
client := memcache.NewFromSelector(selector)
client.Timeout = cfg.Timeout
client.MaxIdleConns = cfg.MaxIdleConns
newClient := &memcachedClient{
Client: client,
serverList: &servers,
serverList: selector,
hostname: cfg.Host,
service: cfg.Service,
quit: make(chan struct{}),

@ -0,0 +1,131 @@
package cache
import (
"net"
"strings"
"sync"
"github.com/bradfitz/gomemcache/memcache"
"github.com/cespare/xxhash"
"github.com/facette/natsort"
)
// MemcachedJumpHashSelector implements the memcache.ServerSelector
// interface. MemcachedJumpHashSelector utilizes a jump hash to
// distribute keys to servers.
//
// While adding or removing servers only requires 1/N keys to move,
// servers are treated as a stack and can only be pushed/popped.
// Therefore, MemcachedJumpHashSelector works best for servers
// with consistent DNS names where the naturally sorted order
// is predictable.
type MemcachedJumpHashSelector struct {
mu sync.RWMutex
addrs []net.Addr
}
// staticAddr caches the Network() and String() values from
// any net.Addr.
//
// Copied from github.com/bradfitz/gomemcache/selector.go.
type staticAddr struct {
network, str string
}
func newStaticAddr(a net.Addr) net.Addr {
return &staticAddr{
network: a.Network(),
str: a.String(),
}
}
func (a *staticAddr) Network() string { return a.network }
func (a *staticAddr) String() string { return a.str }
// SetServers changes a MemcachedJumpHashSelector's set of servers at
// runtime and is safe for concurrent use by multiple goroutines.
//
// Each server is given equal weight. A server is given more weight
// if it's listed multiple times.
//
// SetServers returns an error if any of the server names fail to
// resolve. No attempt is made to connect to the server. If any
// error occurs, no changes are made to the internal server list.
//
// To minimize the number of rehashes for keys when scaling the
// number of servers in subsequent calls to SetServers, servers
// are stored in natural sort order.
func (s *MemcachedJumpHashSelector) SetServers(servers ...string) error {
sortedServers := make([]string, len(servers))
copy(sortedServers, servers)
natsort.Sort(sortedServers)
naddrs := make([]net.Addr, len(sortedServers))
for i, server := range sortedServers {
if strings.Contains(server, "/") {
addr, err := net.ResolveUnixAddr("unix", server)
if err != nil {
return err
}
naddrs[i] = newStaticAddr(addr)
} else {
tcpAddr, err := net.ResolveTCPAddr("tcp", server)
if err != nil {
return err
}
naddrs[i] = newStaticAddr(tcpAddr)
}
}
s.mu.Lock()
defer s.mu.Unlock()
s.addrs = naddrs
return nil
}
// jumpHash consistently chooses a hash bucket number in the range [0, numBuckets) for the given key.
// numBuckets must be >= 1.
//
// Copied from github.com/dgryski/go-jump/blob/master/jump.go
func jumpHash(key uint64, numBuckets int) int32 {
var b int64 = -1
var j int64
for j < int64(numBuckets) {
b = j
key = key*2862933555777941757 + 1
j = int64(float64(b+1) * (float64(int64(1)<<31) / float64((key>>33)+1)))
}
return int32(b)
}
// PickServer returns the server address that a given item
// should be shared onto.
func (s *MemcachedJumpHashSelector) PickServer(key string) (net.Addr, error) {
s.mu.RLock()
defer s.mu.RUnlock()
if len(s.addrs) == 0 {
return nil, memcache.ErrNoServers
} else if len(s.addrs) == 1 {
return s.addrs[0], nil
}
cs := xxhash.Sum64String(key)
idx := jumpHash(cs, len(s.addrs))
return s.addrs[idx], nil
}
// Each iterates over each server and calls the given function.
// If f returns a non-nil error, iteration will stop and that
// error will be returned.
func (s *MemcachedJumpHashSelector) Each(f func(net.Addr) error) error {
s.mu.RLock()
defer s.mu.RUnlock()
for _, def := range s.addrs {
if err := f(def); err != nil {
return err
}
}
return nil
}

@ -34,15 +34,6 @@ var (
Help: "Number of entries written to storage per chunk.",
Buckets: prometheus.ExponentialBuckets(1, 2, 5),
})
rowWrites = util.NewHashBucketHistogram(util.HashBucketHistogramOpts{
HistogramOpts: prometheus.HistogramOpts{
Namespace: "cortex",
Name: "chunk_store_row_writes_distribution",
Help: "Distribution of writes to individual storage rows",
Buckets: prometheus.DefBuckets,
},
HashBuckets: 1024,
})
cacheCorrupt = promauto.NewCounter(prometheus.CounterOpts{
Namespace: "cortex",
Name: "cache_corrupt_chunks_total",
@ -50,10 +41,6 @@ var (
})
)
func init() {
prometheus.MustRegister(rowWrites)
}
// StoreConfig specifies config for a ChunkStore
type StoreConfig struct {
ChunkCacheConfig cache.Config `yaml:"chunk_cache_config,omitempty"`
@ -167,7 +154,6 @@ func (c *store) calculateIndexEntries(userID string, from, through model.Time, c
key := fmt.Sprintf("%s:%s:%x", entry.TableName, entry.HashValue, entry.RangeValue)
if _, ok := seenIndexEntries[key]; !ok {
seenIndexEntries[key] = struct{}{}
rowWrites.Observe(entry.HashValue, 1)
result.Add(entry.TableName, entry.HashValue, entry.RangeValue, entry.Value)
}
}

@ -108,6 +108,7 @@ type decodeResponse struct {
// NewChunkFetcher makes a new ChunkFetcher.
func NewChunkFetcher(cfg cache.Config, cacheStubs bool, storage ObjectClient) (*Fetcher, error) {
cfg.Prefix = "chunks"
cache, err := cache.New(cfg)
if err != nil {
return nil, err
@ -150,7 +151,8 @@ func (c *Fetcher) worker() {
}
}
// FetchChunks fetchers a set of chunks from cache and store.
// FetchChunks fetches a set of chunks from cache and store. Note that the keys passed in must be
// lexicographically sorted, while the returned chunks are not in the same order as the passed in chunks.
func (c *Fetcher) FetchChunks(ctx context.Context, chunks []Chunk, keys []string) ([]Chunk, error) {
log, ctx := spanlogger.New(ctx, "ChunkStore.fetchChunks")
defer log.Span.Finish()

@ -46,7 +46,7 @@ func NewCompositeStore() CompositeStore {
// AddPeriod adds the configuration for a period of time to the CompositeStore
func (c *CompositeStore) AddPeriod(storeCfg StoreConfig, cfg PeriodConfig, index IndexClient, chunks ObjectClient, limits *validation.Overrides) error {
schema := cfg.createSchema()
schema := cfg.CreateSchema()
var store Store
var err error
switch cfg.Schema {

@ -76,6 +76,14 @@ var encodings = map[Encoding]encoding{
// Set implements flag.Value.
func (e *Encoding) Set(s string) error {
// First see if the name was given
for k, v := range encodings {
if s == v.Name {
*e = k
return nil
}
}
// Otherwise, accept a number
i, err := strconv.Atoi(s)
if err != nil {
return err

@ -36,7 +36,7 @@ func (cfg *GCSConfig) RegisterFlags(f *flag.FlagSet) {
// NewGCSObjectClient makes a new chunk.ObjectClient that writes chunks to GCS.
func NewGCSObjectClient(ctx context.Context, cfg GCSConfig, schemaCfg chunk.SchemaConfig) (chunk.ObjectClient, error) {
option, err := gcsInstrumentation(ctx)
option, err := gcsInstrumentation(ctx, storage.ScopeReadWrite)
if err != nil {
return nil, err
}

@ -50,8 +50,8 @@ func bigtableInstrumentation() ([]grpc.UnaryClientInterceptor, []grpc.StreamClie
}
}
func gcsInstrumentation(ctx context.Context) (option.ClientOption, error) {
transport, err := google_http.NewTransport(ctx, http.DefaultTransport)
func gcsInstrumentation(ctx context.Context, scope string) (option.ClientOption, error) {
transport, err := google_http.NewTransport(ctx, http.DefaultTransport, option.WithScopes(scope))
if err != nil {
return nil, err
}

@ -10,8 +10,8 @@ import (
"sync"
"time"
"github.com/etcd-io/bbolt"
"github.com/go-kit/kit/log/level"
"go.etcd.io/bbolt"
"github.com/cortexproject/cortex/pkg/chunk"
chunk_util "github.com/cortexproject/cortex/pkg/chunk/util"

@ -188,7 +188,8 @@ func (cfg *SchemaConfig) ForEachAfter(t model.Time, f func(config *PeriodConfig)
}
}
func (cfg PeriodConfig) createSchema() Schema {
// CreateSchema returns the schema defined by the PeriodConfig
func (cfg PeriodConfig) CreateSchema() Schema {
var s schema
switch cfg.Schema {
case "v1":

@ -404,7 +404,7 @@ func (c *seriesStore) PutOne(ctx context.Context, from, through model.Time, chun
chunks := []Chunk{chunk}
writeReqs, keysToCache, err := c.calculateIndexEntries(from, through, chunk)
writeReqs, keysToCache, err := c.calculateIndexEntries(ctx, from, through, chunk)
if err != nil {
return err
}
@ -430,7 +430,7 @@ func (c *seriesStore) PutOne(ctx context.Context, from, through model.Time, chun
}
// calculateIndexEntries creates a set of batched WriteRequests for all the chunks it is given.
func (c *seriesStore) calculateIndexEntries(from, through model.Time, chunk Chunk) (WriteBatch, []string, error) {
func (c *seriesStore) calculateIndexEntries(ctx context.Context, from, through model.Time, chunk Chunk) (WriteBatch, []string, error) {
seenIndexEntries := map[string]struct{}{}
entries := []IndexEntry{}
@ -443,7 +443,7 @@ func (c *seriesStore) calculateIndexEntries(from, through model.Time, chunk Chun
if err != nil {
return nil, nil, err
}
_, _, missing := c.writeDedupeCache.Fetch(context.Background(), keys)
_, _, missing := c.writeDedupeCache.Fetch(ctx, keys)
// keys and labelEntries are matched in order, but Fetch() may
// return missing keys in any order so check against all of them.
for _, missingKey := range missing {
@ -468,7 +468,6 @@ func (c *seriesStore) calculateIndexEntries(from, through model.Time, chunk Chun
key := fmt.Sprintf("%s:%s:%x", entry.TableName, entry.HashValue, entry.RangeValue)
if _, ok := seenIndexEntries[key]; !ok {
seenIndexEntries[key] = struct{}{}
rowWrites.Observe(entry.HashValue, 1)
result.Add(entry.TableName, entry.HashValue, entry.RangeValue, entry.Value)
}
}

@ -157,6 +157,9 @@ func NewTableClient(name string, cfg Config) (chunk.TableClient, error) {
case "inmemory":
return chunk.NewMockStorage(), nil
case "aws", "aws-dynamo":
if cfg.AWSStorageConfig.DynamoDB.URL == nil {
return nil, fmt.Errorf("Must set -dynamodb.url in aws mode")
}
path := strings.TrimPrefix(cfg.AWSStorageConfig.DynamoDB.URL.Path, "/")
if len(path) > 0 {
level.Warn(util.Logger).Log("msg", "ignoring DynamoDB URL path", "path", path)

@ -2,8 +2,10 @@ package chunk
import (
"context"
"errors"
"flag"
"fmt"
"math/rand"
"sort"
"strings"
"sync"
@ -98,10 +100,10 @@ func (cfg *TableManagerConfig) RegisterFlags(f *flag.FlagSet) {
func (cfg *ProvisionConfig) RegisterFlags(argPrefix string, f *flag.FlagSet) {
f.Int64Var(&cfg.ProvisionedWriteThroughput, argPrefix+".write-throughput", 3000, "DynamoDB table default write throughput.")
f.Int64Var(&cfg.ProvisionedReadThroughput, argPrefix+".read-throughput", 300, "DynamoDB table default read throughput.")
f.BoolVar(&cfg.ProvisionedThroughputOnDemandMode, argPrefix+".enable-ondemand-throughput-mode", false, "Enables on demand througput provisioning for the storage provider (if supported). Applies only to tables which are not autoscaled")
f.BoolVar(&cfg.ProvisionedThroughputOnDemandMode, argPrefix+".enable-ondemand-throughput-mode", false, "Enables on demand throughput provisioning for the storage provider (if supported). Applies only to tables which are not autoscaled")
f.Int64Var(&cfg.InactiveWriteThroughput, argPrefix+".inactive-write-throughput", 1, "DynamoDB table write throughput for inactive tables.")
f.Int64Var(&cfg.InactiveReadThroughput, argPrefix+".inactive-read-throughput", 300, "DynamoDB table read throughput for inactive tables.")
f.BoolVar(&cfg.InactiveThroughputOnDemandMode, argPrefix+".inactive-enable-ondemand-throughput-mode", false, "Enables on demand througput provisioning for the storage provider (if supported). Applies only to tables which are not autoscaled")
f.BoolVar(&cfg.InactiveThroughputOnDemandMode, argPrefix+".inactive-enable-ondemand-throughput-mode", false, "Enables on demand throughput provisioning for the storage provider (if supported). Applies only to tables which are not autoscaled")
cfg.WriteScale.RegisterFlags(argPrefix+".write-throughput.scale", f)
cfg.InactiveWriteScale.RegisterFlags(argPrefix+".inactive-write-throughput.scale", f)
@ -126,6 +128,15 @@ type TableManager struct {
// NewTableManager makes a new TableManager
func NewTableManager(cfg TableManagerConfig, schemaCfg SchemaConfig, maxChunkAge time.Duration, tableClient TableClient,
objectClient BucketClient) (*TableManager, error) {
if cfg.RetentionPeriod != 0 {
// Assume the newest config is the one to use for validation of retention
indexTablesPeriod := schemaCfg.Configs[len(schemaCfg.Configs)-1].IndexTables.Period
if indexTablesPeriod != 0 && cfg.RetentionPeriod%indexTablesPeriod != 0 {
return nil, errors.New("retention period should now be a multiple of periodic table duration")
}
}
return &TableManager{
cfg: cfg,
schemaCfg: schemaCfg,
@ -156,6 +167,9 @@ func (m *TableManager) Stop() {
func (m *TableManager) loop() {
defer m.wait.Done()
// Sleep for a bit to spread the sync load across different times if the tablemanagers are all started at once.
time.Sleep(time.Duration(rand.Int63n(int64(m.cfg.DynamoDBPollInterval))))
ticker := time.NewTicker(m.cfg.DynamoDBPollInterval)
defer ticker.Stop()

@ -184,7 +184,7 @@ func (p *Pool) removeStaleClients() {
}
}
// cleanUnhealthy loops through all ingesters and deletes any that fails a healtcheck.
// cleanUnhealthy loops through all ingesters and deletes any that fails a healthcheck.
func (p *Pool) cleanUnhealthy() {
for _, addr := range p.RegisteredAddresses() {
client, ok := p.fromCache(addr)
@ -192,7 +192,7 @@ func (p *Pool) cleanUnhealthy() {
if ok {
err := healthCheck(client, p.cfg.RemoteTimeout)
if err != nil {
level.Warn(util.Logger).Log("msg", "removing ingester failing healtcheck", "addr", addr, "reason", err)
level.Warn(util.Logger).Log("msg", "removing ingester failing healthcheck", "addr", addr, "reason", err)
p.RemoveClientFor(addr)
}
}

@ -5,6 +5,7 @@ import (
"fmt"
"io"
"strings"
"sync"
"unsafe"
"github.com/prometheus/prometheus/pkg/labels"
@ -14,6 +15,21 @@ var (
expectedTimeseries = 100
expectedLabels = 20
expectedSamplesPerSeries = 10
slicePool = sync.Pool{
New: func() interface{} {
return make([]PreallocTimeseries, 0, expectedTimeseries)
},
}
timeSeriesPool = sync.Pool{
New: func() interface{} {
return TimeSeries{
Labels: make([]LabelAdapter, 0, expectedLabels),
Samples: make([]Sample, 0, expectedSamplesPerSeries),
}
},
}
)
// PreallocConfig configures how structures will be preallocated to optimise
@ -27,26 +43,25 @@ func (PreallocConfig) RegisterFlags(f *flag.FlagSet) {
f.IntVar(&expectedSamplesPerSeries, "ingester-client.expected-samples-per-series", expectedSamplesPerSeries, "Expected number of samples per timeseries, used for preallocations.")
}
// PreallocWriteRequest is a WriteRequest which preallocs slices on Unmarshall.
// PreallocWriteRequest is a WriteRequest which preallocs slices on Unmarshal.
type PreallocWriteRequest struct {
WriteRequest
}
// Unmarshal implements proto.Message.
func (p *PreallocWriteRequest) Unmarshal(dAtA []byte) error {
p.Timeseries = make([]PreallocTimeseries, 0, expectedTimeseries)
p.Timeseries = slicePool.Get().([]PreallocTimeseries)
return p.WriteRequest.Unmarshal(dAtA)
}
// PreallocTimeseries is a TimeSeries which preallocs slices on Unmarshall.
// PreallocTimeseries is a TimeSeries which preallocs slices on Unmarshal.
type PreallocTimeseries struct {
TimeSeries
}
// Unmarshal implements proto.Message.
func (p *PreallocTimeseries) Unmarshal(dAtA []byte) error {
p.Labels = make([]LabelAdapter, 0, expectedLabels)
p.Samples = make([]Sample, 0, expectedSamplesPerSeries)
p.TimeSeries = timeSeriesPool.Get().(TimeSeries)
return p.TimeSeries.Unmarshal(dAtA)
}
@ -224,3 +239,18 @@ func (bs *LabelAdapter) Compare(other LabelAdapter) int {
}
return strings.Compare(bs.Value, other.Value)
}
// ReuseSlice puts the slice back into a sync.Pool for reuse.
func ReuseSlice(slice []PreallocTimeseries) {
for i := range slice {
ReuseTimeseries(slice[i].TimeSeries)
}
slicePool.Put(slice[:0])
}
// ReuseTimeseries puts the timeseries back into a sync.Pool for reuse.
func ReuseTimeseries(ts TimeSeries) {
ts.Labels = ts.Labels[:0]
ts.Samples = ts.Samples[:0]
timeSeriesPool.Put(ts)
}

@ -2,6 +2,7 @@ package ring
import (
"context"
"sync"
"sync/atomic"
)
@ -35,7 +36,7 @@ type itemTracker struct {
// to send to that ingester.
//
// Not implemented as a method on Ring so we can test separately.
func DoBatch(ctx context.Context, r ReadRing, keys []uint32, callback func(IngesterDesc, []int) error) error {
func DoBatch(ctx context.Context, r ReadRing, keys []uint32, callback func(IngesterDesc, []int) error, cleanup func()) error {
replicationSets, err := r.BatchGet(keys, Write)
if err != nil {
return err
@ -63,13 +64,24 @@ func DoBatch(ctx context.Context, r ReadRing, keys []uint32, callback func(Inges
err: make(chan error),
}
var wg sync.WaitGroup
wg.Add(len(ingesters))
for _, i := range ingesters {
go func(i ingester) {
err := callback(i.desc, i.indexes)
tracker.record(i.itemTrackers, err)
wg.Done()
}(i)
}
// Perform cleanup at the end.
go func() {
wg.Wait()
cleanup()
}()
select {
case err := <-tracker.err:
return err

@ -0,0 +1,112 @@
package kv
import (
"context"
"flag"
"fmt"
"sync"
"github.com/cortexproject/cortex/pkg/ring/kv/codec"
"github.com/cortexproject/cortex/pkg/ring/kv/consul"
"github.com/cortexproject/cortex/pkg/ring/kv/etcd"
)
// The NewInMemoryKVClient returned by NewClient() is a singleton, so
// that distributors and ingesters started in the same process can
// find themselves.
var inmemoryStoreInit sync.Once
var inmemoryStore Client
// Config is config for a KVStore currently used by ring and HA tracker,
// where store can be consul or inmemory.
type Config struct {
Store string `yaml:"store,omitempty"`
Consul consul.Config `yaml:"consul,omitempty"`
Etcd etcd.Config `yaml:"etcd,omitempty"`
Prefix string `yaml:"prefix,omitempty"`
Mock Client
}
// RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet.
// If prefix is an empty string we will register consul flags with no prefix and the
// store flag with the prefix ring, so ring.store. For everything else we pass the prefix
// to the Consul flags.
// If prefix is not an empty string it should end with a period.
func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
// We need Consul flags to not have the ring prefix to maintain compatibility.
// This needs to be fixed in the future (1.0 release maybe?) when we normalize flags.
// At the moment we have consul.<flag-name>, and ring.store, going forward it would
// be easier to have everything under ring, so ring.consul.<flag-name>
cfg.Consul.RegisterFlags(f, prefix)
cfg.Etcd.RegisterFlagsWithPrefix(f, prefix)
if prefix == "" {
prefix = "ring."
}
f.StringVar(&cfg.Prefix, prefix+"prefix", "collectors/", "The prefix for the keys in the store. Should end with a /.")
f.StringVar(&cfg.Store, prefix+"store", "consul", "Backend storage to use for the ring (consul, etcd, inmemory).")
}
// Client is a high-level client for key-value stores (such as Etcd and
// Consul) that exposes operations such as CAS and Watch which take callbacks.
// It also deals with serialisation by using a Codec and having a instance of
// the the desired type passed in to methods ala json.Unmarshal.
type Client interface {
// Get a spefic key. Will use a codec to deserialise key to appropriate type.
Get(ctx context.Context, key string) (interface{}, error)
// CAS stands for Compare-And-Swap. Will call provided callback f with the
// current value of the key and allow callback to return a different value.
// Will then attempt to atomically swap the current value for the new value.
// If that doesn't succeed will try again - callback will be called again
// with new value etc. Guarantees that only a single concurrent CAS
// succeeds. Callback can return nil to indicate it is happy with existing
// value.
CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error
// WatchKey calls f whenever the value stored under key changes.
WatchKey(ctx context.Context, key string, f func(interface{}) bool)
// WatchPrefix calls f whenever any value stored under prefix changes.
WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool)
}
// NewClient creates a new Client (consul, etcd or inmemory) based on the config,
// encodes and decodes data for storage using the codec.
func NewClient(cfg Config, codec codec.Codec) (Client, error) {
if cfg.Mock != nil {
return cfg.Mock, nil
}
var client Client
var err error
switch cfg.Store {
case "consul":
client, err = consul.NewClient(cfg.Consul, codec)
case "etcd":
client, err = etcd.New(cfg.Etcd, codec)
case "inmemory":
// If we use the in-memory store, make sure everyone gets the same instance
// within the same process.
inmemoryStoreInit.Do(func() {
inmemoryStore = consul.NewInMemoryClient(codec)
})
client = inmemoryStore
default:
return nil, fmt.Errorf("invalid KV store type: %s", cfg.Store)
}
if err != nil {
return nil, err
}
if cfg.Prefix != "" {
client = PrefixClient(client, cfg.Prefix)
}
return metrics{client}, nil
}

@ -0,0 +1,52 @@
package codec
import (
"github.com/golang/protobuf/proto"
"github.com/golang/snappy"
)
// Codec allows KV clients to serialise and deserialise values.
type Codec interface {
Decode([]byte) (interface{}, error)
Encode(interface{}) ([]byte, error)
}
// Proto is a Codec for proto/snappy
type Proto struct {
Factory func() proto.Message
}
// Decode implements Codec
func (p Proto) Decode(bytes []byte) (interface{}, error) {
out := p.Factory()
bytes, err := snappy.Decode(nil, bytes)
if err != nil {
return nil, err
}
if err := proto.Unmarshal(bytes, out); err != nil {
return nil, err
}
return out, nil
}
// Encode implements Codec
func (p Proto) Encode(msg interface{}) ([]byte, error) {
bytes, err := proto.Marshal(msg.(proto.Message))
if err != nil {
return nil, err
}
return snappy.Encode(nil, bytes), nil
}
// String is a code for strings.
type String struct{}
// Decode implements Codec.
func (String) Decode(bytes []byte) (interface{}, error) {
return string(bytes), nil
}
// Encode implements Codec.
func (String) Encode(msg interface{}) ([]byte, error) {
return []byte(msg.(string)), nil
}

@ -1,4 +1,4 @@
package ring
package consul
import (
"context"
@ -11,35 +11,36 @@ import (
"github.com/go-kit/kit/log/level"
consul "github.com/hashicorp/consul/api"
cleanhttp "github.com/hashicorp/go-cleanhttp"
"github.com/weaveworks/common/instrument"
"github.com/cortexproject/cortex/pkg/ring/kv/codec"
"github.com/cortexproject/cortex/pkg/util"
"github.com/weaveworks/common/httpgrpc"
"github.com/weaveworks/common/instrument"
)
const (
longPollDuration = 10 * time.Second
)
// ConsulConfig to create a ConsulClient
type ConsulConfig struct {
var (
writeOptions = &consul.WriteOptions{}
// ErrNotFound is returned by ConsulClient.Get.
ErrNotFound = fmt.Errorf("Not found")
backoffConfig = util.BackoffConfig{
MinBackoff: 1 * time.Second,
MaxBackoff: 1 * time.Minute,
}
)
// Config to create a ConsulClient
type Config struct {
Host string
Prefix string
ACLToken string
HTTPClientTimeout time.Duration
ConsistentReads bool
}
// RegisterFlags adds the flags required to config this to the given FlagSet
// If prefix is not an empty string it should end with a period.
func (cfg *ConsulConfig) RegisterFlags(f *flag.FlagSet, prefix string) {
f.StringVar(&cfg.Host, prefix+"consul.hostname", "localhost:8500", "Hostname and port of Consul.")
f.StringVar(&cfg.Prefix, prefix+"consul.prefix", "collectors/", "Prefix for keys in Consul. Should end with a /.")
f.StringVar(&cfg.ACLToken, prefix+"consul.acltoken", "", "ACL Token used to interact with Consul.")
f.DurationVar(&cfg.HTTPClientTimeout, prefix+"consul.client-timeout", 2*longPollDuration, "HTTP timeout when talking to consul")
f.BoolVar(&cfg.ConsistentReads, prefix+"consul.consistent-reads", true, "Enable consistent reads to consul.")
}
type kv interface {
CAS(p *consul.KVPair, q *consul.WriteOptions) (bool, *consul.WriteMeta, error)
Get(key string, q *consul.QueryOptions) (*consul.KVPair, *consul.QueryMeta, error)
@ -47,14 +48,24 @@ type kv interface {
Put(p *consul.KVPair, q *consul.WriteOptions) (*consul.WriteMeta, error)
}
type consulClient struct {
// Client is a KV.Client for Consul.
type Client struct {
kv
codec Codec
cfg ConsulConfig
codec codec.Codec
cfg Config
}
// NewConsulClient returns a new ConsulClient.
func NewConsulClient(cfg ConsulConfig, codec Codec) (KVClient, error) {
// RegisterFlags adds the flags required to config this to the given FlagSet
// If prefix is not an empty string it should end with a period.
func (cfg *Config) RegisterFlags(f *flag.FlagSet, prefix string) {
f.StringVar(&cfg.Host, prefix+"consul.hostname", "localhost:8500", "Hostname and port of Consul.")
f.StringVar(&cfg.ACLToken, prefix+"consul.acltoken", "", "ACL Token used to interact with Consul.")
f.DurationVar(&cfg.HTTPClientTimeout, prefix+"consul.client-timeout", 2*longPollDuration, "HTTP timeout when talking to Consul")
f.BoolVar(&cfg.ConsistentReads, prefix+"consul.consistent-reads", true, "Enable consistent reads to Consul.")
}
// NewClient returns a new Client.
func NewClient(cfg Config, codec codec.Codec) (*Client, error) {
client, err := consul.NewClient(&consul.Config{
Address: cfg.Host,
Token: cfg.ACLToken,
@ -68,33 +79,23 @@ func NewConsulClient(cfg ConsulConfig, codec Codec) (KVClient, error) {
if err != nil {
return nil, err
}
var c KVClient = &consulClient{
c := &Client{
kv: consulMetrics{client.KV()},
codec: codec,
cfg: cfg,
}
if cfg.Prefix != "" {
c = PrefixClient(c, cfg.Prefix)
}
return c, nil
}
var (
writeOptions = &consul.WriteOptions{}
// ErrNotFound is returned by ConsulClient.Get.
ErrNotFound = fmt.Errorf("Not found")
)
// CAS atomically modifies a value in a callback.
// If value doesn't exist you'll get nil as an argument to your callback.
func (c *consulClient) CAS(ctx context.Context, key string, f CASCallback) error {
func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error {
return instrument.CollectedRequest(ctx, "CAS loop", consulRequestDuration, instrument.ErrorCode, func(ctx context.Context) error {
return c.cas(ctx, key, f)
})
}
func (c *consulClient) cas(ctx context.Context, key string, f CASCallback) error {
func (c *Client) cas(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error {
var (
index = uint64(0)
retries = 10
@ -102,6 +103,7 @@ func (c *consulClient) cas(ctx context.Context, key string, f CASCallback) error
)
for i := 0; i < retries; i++ {
options := &consul.QueryOptions{
AllowStale: !c.cfg.ConsistentReads,
RequireConsistent: c.cfg.ConsistentReads,
}
kvp, _, err := c.kv.Get(key, options.WithContext(ctx))
@ -124,9 +126,6 @@ func (c *consulClient) cas(ctx context.Context, key string, f CASCallback) error
intermediate, retry, err = f(intermediate)
if err != nil {
if !retry {
if resp, ok := httpgrpc.HTTPResponseFromError(err); ok && resp.GetCode() != 202 {
level.Error(util.Logger).Log("msg", "error CASing", "key", key, "err", err)
}
return err
}
continue
@ -161,25 +160,21 @@ func (c *consulClient) cas(ctx context.Context, key string, f CASCallback) error
return fmt.Errorf("failed to CAS %s", key)
}
var backoffConfig = util.BackoffConfig{
MinBackoff: 1 * time.Second,
MaxBackoff: 1 * time.Minute,
}
// WatchKey will watch a given key in consul for changes. When the value
// under said key changes, the f callback is called with the deserialised
// value. To construct the deserialised value, a factory function should be
// supplied which generates an empty struct for WatchKey to deserialise
// into. Values in Consul are assumed to be JSON. This function blocks until
// the context is cancelled.
func (c *consulClient) WatchKey(ctx context.Context, key string, f func(interface{}) bool) {
func (c *Client) WatchKey(ctx context.Context, key string, f func(interface{}) bool) {
var (
backoff = util.NewBackoff(ctx, backoffConfig)
index = uint64(0)
)
for backoff.Ongoing() {
queryOptions := &consul.QueryOptions{
RequireConsistent: true,
AllowStale: !c.cfg.ConsistentReads,
RequireConsistent: c.cfg.ConsistentReads,
WaitIndex: index,
WaitTime: longPollDuration,
}
@ -213,14 +208,15 @@ func (c *consulClient) WatchKey(ctx context.Context, key string, f func(interfac
// WatchPrefix will watch a given prefix in Consul for new keys and changes to existing keys under that prefix.
// When the value under said key changes, the f callback is called with the deserialised value.
// Values in Consul are assumed to be JSON. This function blocks until the context is cancelled.
func (c *consulClient) WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) {
func (c *Client) WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) {
var (
backoff = util.NewBackoff(ctx, backoffConfig)
index = uint64(0)
)
for backoff.Ongoing() {
queryOptions := &consul.QueryOptions{
RequireConsistent: true,
AllowStale: !c.cfg.ConsistentReads,
RequireConsistent: c.cfg.ConsistentReads,
WaitIndex: index,
WaitTime: longPollDuration,
}
@ -254,16 +250,10 @@ func (c *consulClient) WatchPrefix(ctx context.Context, prefix string, f func(st
}
}
func (c *consulClient) PutBytes(ctx context.Context, key string, buf []byte) error {
_, err := c.kv.Put(&consul.KVPair{
Key: key,
Value: buf,
}, writeOptions.WithContext(ctx))
return err
}
func (c *consulClient) Get(ctx context.Context, key string) (interface{}, error) {
// Get implements kv.Get.
func (c *Client) Get(ctx context.Context, key string) (interface{}, error) {
options := &consul.QueryOptions{
AllowStale: !c.cfg.ConsistentReads,
RequireConsistent: c.cfg.ConsistentReads,
}
kvp, _, err := c.kv.Get(key, options.WithContext(ctx))
@ -274,40 +264,3 @@ func (c *consulClient) Get(ctx context.Context, key string) (interface{}, error)
}
return c.codec.Decode(kvp.Value)
}
type prefixedConsulClient struct {
prefix string
consul KVClient
}
// PrefixClient takes a ConsulClient and forces a prefix on all its operations.
func PrefixClient(client KVClient, prefix string) KVClient {
return &prefixedConsulClient{prefix, client}
}
// CAS atomically modifies a value in a callback. If the value doesn't exist,
// you'll get 'nil' as an argument to your callback.
func (c *prefixedConsulClient) CAS(ctx context.Context, key string, f CASCallback) error {
return c.consul.CAS(ctx, c.prefix+key, f)
}
// WatchKey watches a key.
func (c *prefixedConsulClient) WatchKey(ctx context.Context, key string, f func(interface{}) bool) {
c.consul.WatchKey(ctx, c.prefix+key, f)
}
// WatchPrefix watches a prefix. For a prefix client it appends the prefix argument to the clients prefix.
func (c *prefixedConsulClient) WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) {
c.consul.WatchPrefix(ctx, fmt.Sprintf("%s%s", c.prefix, prefix), func(k string, i interface{}) bool {
return f(strings.TrimPrefix(k, c.prefix), i)
})
}
// PutBytes writes bytes to Consul.
func (c *prefixedConsulClient) PutBytes(ctx context.Context, key string, buf []byte) error {
return c.consul.PutBytes(ctx, c.prefix+key, buf)
}
func (c *prefixedConsulClient) Get(ctx context.Context, key string) (interface{}, error) {
return c.consul.Get(ctx, c.prefix+key)
}

@ -1,13 +1,15 @@
package ring
package consul
import (
fmt "fmt"
"sync"
"time"
"github.com/cortexproject/cortex/pkg/util"
"github.com/go-kit/kit/log/level"
consul "github.com/hashicorp/consul/api"
"github.com/cortexproject/cortex/pkg/ring/kv/codec"
"github.com/cortexproject/cortex/pkg/util"
)
type mockKV struct {
@ -17,14 +19,14 @@ type mockKV struct {
current uint64 // the current 'index in the log'
}
// NewInMemoryKVClient makes a new mock consul client.
func NewInMemoryKVClient(codec Codec) KVClient {
// NewInMemoryClient makes a new mock consul client.
func NewInMemoryClient(codec codec.Codec) *Client {
m := mockKV{
kvps: map[string]*consul.KVPair{},
}
m.cond = sync.NewCond(&m.mtx)
go m.loop()
return &consulClient{
return &Client{
kv: &m,
codec: codec,
}

@ -0,0 +1,196 @@
package etcd
import (
"context"
"flag"
"fmt"
"time"
"github.com/go-kit/kit/log/level"
"go.etcd.io/etcd/clientv3"
"github.com/cortexproject/cortex/pkg/ring/kv/codec"
"github.com/cortexproject/cortex/pkg/util"
"github.com/cortexproject/cortex/pkg/util/flagext"
)
// Config for a new etcd.Client.
type Config struct {
Endpoints []string `yaml:"endpoints"`
DialTimeout time.Duration `yaml:"dial_timeout"`
MaxRetries int `yaml:"max_retries"`
}
// Client implements ring.KVClient for etcd.
type Client struct {
cfg Config
codec codec.Codec
cli *clientv3.Client
}
// RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet.
func (cfg *Config) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) {
cfg.Endpoints = []string{}
f.Var((*flagext.Strings)(&cfg.Endpoints), prefix+"etcd.endpoints", "The etcd endpoints to connect to.")
f.DurationVar(&cfg.DialTimeout, prefix+"etcd.dial-timeout", 10*time.Second, "The dial timeout for the etcd connection.")
f.IntVar(&cfg.MaxRetries, prefix+"etcd.max-retries", 10, "The maximum number of retries to do for failed ops.")
}
// New makes a new Client.
func New(cfg Config, codec codec.Codec) (*Client, error) {
cli, err := clientv3.New(clientv3.Config{
Endpoints: cfg.Endpoints,
DialTimeout: cfg.DialTimeout,
})
if err != nil {
return nil, err
}
return &Client{
cfg: cfg,
codec: codec,
cli: cli,
}, nil
}
// CAS implements kv.Client.
func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error {
var revision int64
var lastErr error
for i := 0; i < c.cfg.MaxRetries; i++ {
resp, err := c.cli.Get(ctx, key)
if err != nil {
level.Error(util.Logger).Log("msg", "error getting key", "key", key, "err", err)
lastErr = err
continue
}
var intermediate interface{}
if len(resp.Kvs) > 0 {
intermediate, err = c.codec.Decode(resp.Kvs[0].Value)
if err != nil {
level.Error(util.Logger).Log("msg", "error decoding key", "key", key, "err", err)
lastErr = err
continue
}
revision = resp.Kvs[0].Version
}
var retry bool
intermediate, retry, err = f(intermediate)
if err != nil {
if !retry {
return err
}
lastErr = err
continue
}
// Callback returning nil means it doesn't want to CAS anymore.
if intermediate == nil {
return nil
}
buf, err := c.codec.Encode(intermediate)
if err != nil {
level.Error(util.Logger).Log("msg", "error serialising value", "key", key, "err", err)
lastErr = err
continue
}
result, err := c.cli.Txn(ctx).
If(clientv3.Compare(clientv3.Version(key), "=", revision)).
Then(clientv3.OpPut(key, string(buf))).
Commit()
if err != nil {
level.Error(util.Logger).Log("msg", "error CASing", "key", key, "err", err)
lastErr = err
continue
}
// result is not Succeeded if the the comparison was false, meaning if the modify indexes did not match.
if !result.Succeeded {
level.Debug(util.Logger).Log("msg", "failed to CAS, revision and version did not match in etcd", "key", key, "revision", revision)
continue
}
return nil
}
if lastErr != nil {
return lastErr
}
return fmt.Errorf("failed to CAS %s", key)
}
// WatchKey implements kv.Client.
func (c *Client) WatchKey(ctx context.Context, key string, f func(interface{}) bool) {
backoff := util.NewBackoff(ctx, util.BackoffConfig{
MinBackoff: 1 * time.Second,
MaxBackoff: 1 * time.Minute,
})
for backoff.Ongoing() {
watchChan := c.cli.Watch(ctx, key)
for {
resp, ok := <-watchChan
if !ok {
break
}
backoff.Reset()
for _, event := range resp.Events {
out, err := c.codec.Decode(event.Kv.Value)
if err != nil {
level.Error(util.Logger).Log("msg", "error decoding key", "key", key, "err", err)
continue
}
if !f(out) {
return
}
}
}
}
}
// WatchPrefix implements kv.Client.
func (c *Client) WatchPrefix(ctx context.Context, key string, f func(string, interface{}) bool) {
backoff := util.NewBackoff(ctx, util.BackoffConfig{
MinBackoff: 1 * time.Second,
MaxBackoff: 1 * time.Minute,
})
for backoff.Ongoing() {
watchChan := c.cli.Watch(ctx, key, clientv3.WithPrefix())
for {
resp, ok := <-watchChan
if !ok {
break
}
backoff.Reset()
for _, event := range resp.Events {
out, err := c.codec.Decode(event.Kv.Value)
if err != nil {
level.Error(util.Logger).Log("msg", "error decoding key", "key", key, "err", err)
continue
}
if !f(string(event.Kv.Key), out) {
return
}
}
}
}
}
// Get implements kv.Client.
func (c *Client) Get(ctx context.Context, key string) (interface{}, error) {
resp, err := c.cli.Get(ctx, key)
if err != nil {
return nil, err
}
if len(resp.Kvs) != 1 {
return nil, fmt.Errorf("got %d kvs, expected 1", len(resp.Kvs))
}
return c.codec.Decode(resp.Kvs[0].Value)
}

@ -0,0 +1,81 @@
package etcd
import (
"flag"
"fmt"
"io"
"io/ioutil"
"net/url"
"time"
"github.com/cortexproject/cortex/pkg/util/flagext"
"go.etcd.io/etcd/embed"
"go.etcd.io/etcd/etcdserver/api/v3client"
"github.com/cortexproject/cortex/pkg/ring/kv/codec"
)
const etcdStartTimeout = 30 * time.Second
// Mock returns a Mock Etcd client.
// Inspired by https://github.com/ligato/cn-infra/blob/master/db/keyval/etcd/mocks/embeded_etcd.go.
func Mock(codec codec.Codec) (*Client, io.Closer, error) {
dir, err := ioutil.TempDir("", "etcd")
if err != nil {
return nil, nil, err
}
cfg := embed.NewConfig()
cfg.Dir = dir
lpurl, _ := url.Parse("http://localhost:0")
lcurl, _ := url.Parse("http://localhost:0")
cfg.LPUrls = []url.URL{*lpurl}
cfg.LCUrls = []url.URL{*lcurl}
etcd, err := embed.StartEtcd(cfg)
if err != nil {
return nil, nil, err
}
select {
case <-etcd.Server.ReadyNotify():
case <-time.After(etcdStartTimeout):
etcd.Server.Stop() // trigger a shutdown
return nil, nil, fmt.Errorf("server took too long to start")
}
closer := CloserFunc(func() error {
etcd.Server.Stop()
return nil
})
var config Config
flagext.DefaultValues(&config)
client := &Client{
cfg: config,
codec: codec,
cli: v3client.New(etcd.Server),
}
return client, closer, nil
}
// CloserFunc is like http.HandlerFunc but for io.Closers.
type CloserFunc func() error
// Close implements io.Closer.
func (f CloserFunc) Close() error {
return f()
}
// NopCloser does nothing.
var NopCloser = CloserFunc(func() error {
return nil
})
// RegisterFlags adds the flags required to config this to the given FlagSet.
func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
cfg.RegisterFlagsWithPrefix(f, "")
}

@ -0,0 +1,53 @@
package kv
import (
"context"
"github.com/prometheus/client_golang/prometheus"
"github.com/weaveworks/common/instrument"
)
var requestDuration = instrument.NewHistogramCollector(prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: "cortex",
Name: "kv_request_duration_seconds",
Help: "Time spent on consul requests.",
Buckets: prometheus.DefBuckets,
}, []string{"operation", "status_code"}))
func init() {
requestDuration.Register()
}
type metrics struct {
c Client
}
func (m metrics) Get(ctx context.Context, key string) (interface{}, error) {
var result interface{}
err := instrument.CollectedRequest(ctx, "GET", requestDuration, instrument.ErrorCode, func(ctx context.Context) error {
var err error
result, err = m.c.Get(ctx, key)
return err
})
return result, err
}
func (m metrics) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error {
return instrument.CollectedRequest(ctx, "CAS", requestDuration, instrument.ErrorCode, func(ctx context.Context) error {
return m.c.CAS(ctx, key, f)
})
}
func (m metrics) WatchKey(ctx context.Context, key string, f func(interface{}) bool) {
instrument.CollectedRequest(ctx, "WatchKey", requestDuration, instrument.ErrorCode, func(ctx context.Context) error {
m.c.WatchKey(ctx, key, f)
return nil
})
}
func (m metrics) WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) {
instrument.CollectedRequest(ctx, "WatchPrefix", requestDuration, instrument.ErrorCode, func(ctx context.Context) error {
m.c.WatchPrefix(ctx, prefix, f)
return nil
})
}

@ -0,0 +1,39 @@
package kv
import (
"context"
"fmt"
"strings"
)
type prefixedKVClient struct {
prefix string
client Client
}
// PrefixClient takes a KVClient and forces a prefix on all its operations.
func PrefixClient(client Client, prefix string) Client {
return &prefixedKVClient{prefix, client}
}
// CAS atomically modifies a value in a callback. If the value doesn't exist,
// you'll get 'nil' as an argument to your callback.
func (c *prefixedKVClient) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error {
return c.client.CAS(ctx, c.prefix+key, f)
}
// WatchKey watches a key.
func (c *prefixedKVClient) WatchKey(ctx context.Context, key string, f func(interface{}) bool) {
c.client.WatchKey(ctx, c.prefix+key, f)
}
// WatchPrefix watches a prefix. For a prefix client it appends the prefix argument to the clients prefix.
func (c *prefixedKVClient) WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) {
c.client.WatchPrefix(ctx, fmt.Sprintf("%s%s", c.prefix, prefix), func(k string, i interface{}) bool {
return f(strings.TrimPrefix(k, c.prefix), i)
})
}
func (c *prefixedKVClient) Get(ctx context.Context, key string) (interface{}, error) {
return c.client.Get(ctx, c.prefix+key)
}

@ -1,109 +0,0 @@
package ring
import (
"context"
"flag"
"fmt"
"sync"
"github.com/golang/protobuf/proto"
"github.com/golang/snappy"
)
var inmemoryStoreInit sync.Once
var inmemoryStore KVClient
// KVClient is a high-level client for Consul, that exposes operations
// such as CAS and Watch which take callbacks. It also deals with serialisation
// by having an instance factory passed in to methods and deserialising into that.
type KVClient interface {
CAS(ctx context.Context, key string, f CASCallback) error
WatchKey(ctx context.Context, key string, f func(interface{}) bool)
WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool)
Get(ctx context.Context, key string) (interface{}, error)
PutBytes(ctx context.Context, key string, buf []byte) error
}
// KVConfig is config for a KVStore currently used by ring and HA tracker,
// where store can be consul or inmemory.
type KVConfig struct {
Store string `yaml:"store,omitempty"`
Consul ConsulConfig `yaml:"consul,omitempty"`
Mock KVClient
}
// RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet.
// If prefix is an empty string we will register consul flags with no prefix and the
// store flag with the prefix ring, so ring.store. For everything else we pass the prefix
// to the Consul flags.
// If prefix is not an empty string it should end with a period.
func (cfg *KVConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
// We need Consul flags to not have the ring prefix to maintain compatibility.
// This needs to be fixed in the future (1.0 release maybe?) when we normalize flags.
// At the moment we have consul.<flag-name>, and ring.store, going forward it would
// be easier to have everything under ring, so ring.consul.<flag-name>
cfg.Consul.RegisterFlags(f, prefix)
if prefix == "" {
prefix = "ring."
}
f.StringVar(&cfg.Store, prefix+"store", "consul", "Backend storage to use for the ring (consul, inmemory).")
}
// CASCallback is the type of the callback to CAS. If err is nil, out must be non-nil.
type CASCallback func(in interface{}) (out interface{}, retry bool, err error)
// NewKVStore creates a new KVstore client (inmemory or consul) based on the config,
// encodes and decodes data for storage using the codec.
func NewKVStore(cfg KVConfig, codec Codec) (KVClient, error) {
if cfg.Mock != nil {
return cfg.Mock, nil
}
switch cfg.Store {
case "consul":
return NewConsulClient(cfg.Consul, codec)
case "inmemory":
// If we use the in-memory store, make sure everyone gets the same instance
// within the same process.
inmemoryStoreInit.Do(func() {
inmemoryStore = NewInMemoryKVClient(codec)
})
return inmemoryStore, nil
default:
return nil, fmt.Errorf("invalid KV store type: %s", cfg.Store)
}
}
// Codec allows the consul client to serialise and deserialise values.
type Codec interface {
Decode([]byte) (interface{}, error)
Encode(interface{}) ([]byte, error)
}
// ProtoCodec is a Codec for proto/snappy
type ProtoCodec struct {
Factory func() proto.Message
}
// Decode implements Codec
func (p ProtoCodec) Decode(bytes []byte) (interface{}, error) {
out := p.Factory()
bytes, err := snappy.Decode(nil, bytes)
if err != nil {
return nil, err
}
if err := proto.Unmarshal(bytes, out); err != nil {
return nil, err
}
return out, nil
}
// Encode implements Codec
func (p ProtoCodec) Encode(msg interface{}) ([]byte, error) {
bytes, err := proto.Marshal(msg.(proto.Message))
if err != nil {
return nil, err
}
return snappy.Encode(nil, bytes), nil
}

@ -13,6 +13,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/cortexproject/cortex/pkg/ring/kv"
"github.com/cortexproject/cortex/pkg/util"
"github.com/cortexproject/cortex/pkg/util/flagext"
)
@ -47,7 +48,7 @@ type LifecyclerConfig struct {
HeartbeatPeriod time.Duration `yaml:"heartbeat_period,omitempty"`
JoinAfter time.Duration `yaml:"join_after,omitempty"`
MinReadyDuration time.Duration `yaml:"min_ready_duration,omitempty"`
ClaimOnRollout bool `yaml:"claim_on_rollout,omitempty"`
UnusedFlag bool `yaml:"claim_on_rollout,omitempty"` // DEPRECATED - left for backwards-compatibility
NormaliseTokens bool `yaml:"normalise_tokens,omitempty"`
InfNames []string `yaml:"interface_names"`
FinalSleep time.Duration `yaml:"final_sleep"`
@ -64,7 +65,7 @@ func (cfg *LifecyclerConfig) RegisterFlags(f *flag.FlagSet) {
cfg.RegisterFlagsWithPrefix("", f)
}
// RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet
// RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet.
func (cfg *LifecyclerConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
cfg.RingConfig.RegisterFlagsWithPrefix(prefix, f)
@ -78,7 +79,7 @@ func (cfg *LifecyclerConfig) RegisterFlagsWithPrefix(prefix string, f *flag.Flag
f.DurationVar(&cfg.HeartbeatPeriod, prefix+"heartbeat-period", 5*time.Second, "Period at which to heartbeat to consul.")
f.DurationVar(&cfg.JoinAfter, prefix+"join-after", 0*time.Second, "Period to wait for a claim from another member; will join automatically after this.")
f.DurationVar(&cfg.MinReadyDuration, prefix+"min-ready-duration", 1*time.Minute, "Minimum duration to wait before becoming ready. This is to work around race conditions with ingesters exiting and updating the ring.")
f.BoolVar(&cfg.ClaimOnRollout, prefix+"claim-on-rollout", false, "Send chunks to PENDING ingesters on exit.")
flagext.DeprecatedFlag(f, prefix+"claim-on-rollout", "DEPRECATED. This feature is no longer optional.")
f.BoolVar(&cfg.NormaliseTokens, prefix+"normalise-tokens", false, "Store tokens in a normalised fashion to reduce allocations.")
f.DurationVar(&cfg.FinalSleep, prefix+"final-sleep", 30*time.Second, "Duration to sleep for before exiting, to ensure metrics are scraped.")
@ -106,7 +107,7 @@ type FlushTransferer interface {
type Lifecycler struct {
cfg LifecyclerConfig
flushTransferer FlushTransferer
KVStore KVClient
KVStore kv.Client
// Controls the lifecycle of the ingester
quit chan struct{}
@ -144,8 +145,8 @@ func NewLifecycler(cfg LifecyclerConfig, flushTransferer FlushTransferer, name s
if port == 0 {
port = *cfg.ListenPort
}
codec := ProtoCodec{Factory: ProtoDescFactory}
store, err := NewKVStore(cfg.RingConfig.KVStore, codec)
codec := GetCodec()
store, err := kv.NewClient(cfg.RingConfig.KVStore, codec)
if err != nil {
return nil, err
}
@ -473,15 +474,13 @@ func (i *Lifecycler) changeState(ctx context.Context, state IngesterState) error
func (i *Lifecycler) processShutdown(ctx context.Context) {
flushRequired := true
if i.cfg.ClaimOnRollout {
transferStart := time.Now()
if err := i.flushTransferer.TransferOut(ctx); err != nil {
level.Error(util.Logger).Log("msg", "Failed to transfer chunks to another ingester", "err", err)
shutdownDuration.WithLabelValues("transfer", "fail", i.RingName).Observe(time.Since(transferStart).Seconds())
} else {
flushRequired = false
shutdownDuration.WithLabelValues("transfer", "success", i.RingName).Observe(time.Since(transferStart).Seconds())
}
transferStart := time.Now()
if err := i.flushTransferer.TransferOut(ctx); err != nil {
level.Error(util.Logger).Log("msg", "Failed to transfer chunks to another ingester", "err", err)
shutdownDuration.WithLabelValues("transfer", "fail", i.RingName).Observe(time.Since(transferStart).Seconds())
} else {
flushRequired = false
shutdownDuration.WithLabelValues("transfer", "success", i.RingName).Observe(time.Since(transferStart).Seconds())
}
if flushRequired {

@ -6,6 +6,8 @@ import (
"time"
"github.com/golang/protobuf/proto"
"github.com/cortexproject/cortex/pkg/ring/kv/codec"
)
// ByToken is a sortable list of TokenDescs
@ -20,6 +22,11 @@ func ProtoDescFactory() proto.Message {
return NewDesc()
}
// GetCodec returns the codec used to encode and decode data being put by ring.
func GetCodec() codec.Codec {
return codec.Proto{Factory: ProtoDescFactory}
}
// NewDesc returns an empty ring.Desc
func NewDesc() *Desc {
return &Desc{

@ -15,6 +15,8 @@ import (
"github.com/go-kit/kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/cortexproject/cortex/pkg/ring/kv"
"github.com/cortexproject/cortex/pkg/ring/kv/codec"
"github.com/cortexproject/cortex/pkg/util"
)
@ -56,7 +58,7 @@ var ErrEmptyRing = errors.New("empty ring")
// Config for a Ring
type Config struct {
KVStore KVConfig `yaml:"kvstore,omitempty"`
KVStore kv.Config `yaml:"kvstore,omitempty"`
HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout,omitempty"`
ReplicationFactor int `yaml:"replication_factor,omitempty"`
}
@ -78,7 +80,7 @@ func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
type Ring struct {
name string
cfg Config
KVClient KVClient
KVClient kv.Client
done chan struct{}
quit context.CancelFunc
@ -96,8 +98,8 @@ func New(cfg Config, name string) (*Ring, error) {
if cfg.ReplicationFactor <= 0 {
return nil, fmt.Errorf("ReplicationFactor must be greater than zero: %d", cfg.ReplicationFactor)
}
codec := ProtoCodec{Factory: ProtoDescFactory}
store, err := NewKVStore(cfg.KVStore, codec)
codec := codec.Proto{Factory: ProtoDescFactory}
store, err := kv.NewClient(cfg.KVStore, codec)
if err != nil {
return nil, err
}

@ -0,0 +1,19 @@
package flagext
import (
"strings"
)
// StringSlice is a slice of strings that implements flag.Value
type StringSlice []string
// String implements flag.Value
func (v StringSlice) String() string {
return strings.Join(v, " ")
}
// Set implements flag.Value
func (v *StringSlice) Set(s string) error {
*v = append(*v, s)
return nil
}

@ -1,120 +0,0 @@
package util
import (
"hash/fnv"
"reflect"
"sync"
"sync/atomic"
"time"
"unsafe"
"github.com/prometheus/client_golang/prometheus"
)
// HashBucketHistogramOpts are the options for making a HashBucketHistogram
type HashBucketHistogramOpts struct {
prometheus.HistogramOpts
HashBuckets int
}
// HashBucketHistogram is used to track a histogram of per-bucket rates.
//
// For instance, I want to know that 50% of rows are getting X QPS or lower
// and 99% are getting Y QPS of lower. At first glance, this would involve
// tracking write rate per row, and periodically sticking those numbers in
// a histogram. To make this fit in memory: instead of per-row, we keep
// N buckets of counters and hash the key to a bucket. Then every second
// we update a histogram with the bucket values (and zero the buckets).
//
// Note, we want this metric to be relatively independent of the number of
// hash buckets and QPS of the service - we're trying to measure how well
// load balanced the write load is. So we normalise the values in the hash
// buckets such that if all buckets are '1', then we have even load. We
// do this by multiplying the number of ops per bucket by the number of
// buckets, and dividing by the number of ops.
type HashBucketHistogram interface {
prometheus.Metric
prometheus.Collector
Observe(string, uint32)
Stop()
}
type hashBucketHistogram struct {
prometheus.Histogram
mtx sync.RWMutex
buckets *hashBuckets
quit chan struct{}
opts HashBucketHistogramOpts
}
type hashBuckets struct {
ops uint32
buckets []uint32
}
// NewHashBucketHistogram makes a new HashBucketHistogram
func NewHashBucketHistogram(opts HashBucketHistogramOpts) HashBucketHistogram {
result := &hashBucketHistogram{
Histogram: prometheus.NewHistogram(opts.HistogramOpts),
quit: make(chan struct{}),
opts: opts,
}
result.swapBuckets()
go result.loop()
return result
}
// Stop the background goroutine
func (h *hashBucketHistogram) Stop() {
h.quit <- struct{}{}
}
func (h *hashBucketHistogram) swapBuckets() *hashBuckets {
h.mtx.Lock()
buckets := h.buckets
h.buckets = &hashBuckets{
buckets: make([]uint32, h.opts.HashBuckets, h.opts.HashBuckets),
}
h.mtx.Unlock()
return buckets
}
func (h *hashBucketHistogram) loop() {
ticker := time.NewTicker(1 * time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
buckets := h.swapBuckets()
for _, v := range buckets.buckets {
if buckets.ops > 0 {
h.Histogram.Observe(float64(v) * float64(h.opts.HashBuckets) / float64(buckets.ops))
}
}
case <-h.quit:
return
}
}
}
// Observe implements HashBucketHistogram
func (h *hashBucketHistogram) Observe(key string, value uint32) {
h.mtx.RLock()
hash := fnv.New32()
hash.Write(bytesView(key))
i := hash.Sum32() % uint32(h.opts.HashBuckets)
atomic.AddUint32(&h.buckets.ops, 1)
atomic.AddUint32(&h.buckets.buckets[i], value)
h.mtx.RUnlock()
}
func bytesView(v string) []byte {
strHeader := (*reflect.StringHeader)(unsafe.Pointer(&v))
bytesHeader := reflect.SliceHeader{
Data: strHeader.Data,
Len: strHeader.Len,
Cap: strHeader.Len,
}
return *(*[]byte)(unsafe.Pointer(&bytesHeader))
}

@ -12,7 +12,8 @@ import (
"github.com/blang/semver"
"github.com/gogo/protobuf/proto"
"github.com/golang/snappy"
"github.com/weaveworks/common/instrument"
"github.com/opentracing/opentracing-go"
otlog "github.com/opentracing/opentracing-go/log"
)
// WriteJSONResponse writes some JSON as a HTTP response.
@ -59,6 +60,10 @@ func CompressionTypeFor(version string) CompressionType {
func ParseProtoReader(ctx context.Context, reader io.Reader, req proto.Message, compression CompressionType) ([]byte, error) {
var body []byte
var err error
sp := opentracing.SpanFromContext(ctx)
if sp != nil {
sp.LogFields(otlog.String("event", "util.ParseProtoRequest[start reading]"))
}
switch compression {
case NoCompression:
body, err = ioutil.ReadAll(reader)
@ -66,6 +71,10 @@ func ParseProtoReader(ctx context.Context, reader io.Reader, req proto.Message,
body, err = ioutil.ReadAll(snappy.NewReader(reader))
case RawSnappy:
body, err = ioutil.ReadAll(reader)
if sp != nil {
sp.LogFields(otlog.String("event", "util.ParseProtoRequest[decompress]"),
otlog.Int("size", len(body)))
}
if err == nil {
body, err = snappy.Decode(nil, body)
}
@ -74,9 +83,20 @@ func ParseProtoReader(ctx context.Context, reader io.Reader, req proto.Message,
return nil, err
}
if err := instrument.CollectedRequest(ctx, "util.ParseProtoRequest[unmarshal]", &instrument.HistogramCollector{}, instrument.ErrorCode, func(_ context.Context) error {
return proto.Unmarshal(body, req)
}); err != nil {
if sp != nil {
sp.LogFields(otlog.String("event", "util.ParseProtoRequest[unmarshal]"),
otlog.Int("size", len(body)))
}
// We re-implement proto.Unmarshal here as it calls XXX_Unmarshal first,
// which we can't override without upsetting golint.
req.Reset()
if u, ok := req.(proto.Unmarshaler); ok {
err = u.Unmarshal(body)
} else {
err = proto.NewBuffer(body).Unmarshal(req)
}
if err != nil {
return nil, err
}

@ -2,7 +2,10 @@ package validation
import (
"flag"
"os"
"time"
"gopkg.in/yaml.v2"
)
// Limits describe all the limits for users; can be used to describe global default
@ -43,9 +46,9 @@ type Limits struct {
func (l *Limits) RegisterFlags(f *flag.FlagSet) {
f.Float64Var(&l.IngestionRate, "distributor.ingestion-rate-limit", 25000, "Per-user ingestion rate limit in samples per second.")
f.IntVar(&l.IngestionBurstSize, "distributor.ingestion-burst-size", 50000, "Per-user allowed ingestion burst size (in number of samples). Warning, very high limits will be reset every -distributor.limiter-reload-period.")
f.BoolVar(&l.AcceptHASamples, "distributor.accept-ha-samples", false, "Per-user flag to enable handling of samples with external labels for identifying replicas in an HA Prometheus setup.")
f.StringVar(&l.HAReplicaLabel, "ha-tracker.replica", "__replica__", "Prometheus label to look for in samples to identify a Proemtheus HA replica.")
f.StringVar(&l.HAClusterLabel, "ha-tracker.cluster", "cluster", "Prometheus label to look for in samples to identify a Poemtheus HA cluster.")
f.BoolVar(&l.AcceptHASamples, "distributor.ha-tracker.enable-for-all-users", false, "Flag to enable, for all users, handling of samples with external labels identifying replicas in an HA Prometheus setup.")
f.StringVar(&l.HAReplicaLabel, "distributor.ha-tracker.replica", "__replica__", "Prometheus label to look for in samples to identify a Prometheus HA replica.")
f.StringVar(&l.HAClusterLabel, "distributor.ha-tracker.cluster", "cluster", "Prometheus label to look for in samples to identify a Prometheus HA cluster.")
f.IntVar(&l.MaxLabelNameLength, "validation.max-length-label-name", 1024, "Maximum length accepted for label names")
f.IntVar(&l.MaxLabelValueLength, "validation.max-length-label-value", 2048, "Maximum length accepted for label value. This setting also applies to the metric name")
f.IntVar(&l.MaxLabelNamesPerSeries, "validation.max-label-names-per-series", 30, "Maximum number of label names per series.")
@ -73,7 +76,184 @@ func (l *Limits) UnmarshalYAML(unmarshal func(interface{}) error) error {
// We want to set c to the defaults and then overwrite it with the input.
// To make unmarshal fill the plain data struct rather than calling UnmarshalYAML
// again, we have to hide it using a type indirection. See prometheus/config.
*l = defaultLimits
// During startup we wont have a default value so we don't want to overwrite them
if defaultLimits != nil {
*l = *defaultLimits
}
type plain Limits
return unmarshal((*plain)(l))
}
// When we load YAML from disk, we want the various per-customer limits
// to default to any values specified on the command line, not default
// command line values. This global contains those values. I (Tom) cannot
// find a nicer way I'm afraid.
var defaultLimits *Limits
// Overrides periodically fetch a set of per-user overrides, and provides convenience
// functions for fetching the correct value.
type Overrides struct {
overridesManager *OverridesManager
}
// NewOverrides makes a new Overrides.
// We store the supplied limits in a global variable to ensure per-tenant limits
// are defaulted to those values. As such, the last call to NewOverrides will
// become the new global defaults.
func NewOverrides(defaults Limits) (*Overrides, error) {
defaultLimits = &defaults
overridesManagerConfig := OverridesManagerConfig{
OverridesReloadPeriod: defaults.PerTenantOverridePeriod,
OverridesLoadPath: defaults.PerTenantOverrideConfig,
OverridesLoader: loadOverrides,
Defaults: &defaults,
}
overridesManager, err := NewOverridesManager(overridesManagerConfig)
if err != nil {
return nil, err
}
return &Overrides{
overridesManager: overridesManager,
}, nil
}
// Stop background reloading of overrides.
func (o *Overrides) Stop() {
o.overridesManager.Stop()
}
// IngestionRate returns the limit on ingester rate (samples per second).
func (o *Overrides) IngestionRate(userID string) float64 {
return o.overridesManager.GetLimits(userID).(*Limits).IngestionRate
}
// IngestionBurstSize returns the burst size for ingestion rate.
func (o *Overrides) IngestionBurstSize(userID string) int {
return o.overridesManager.GetLimits(userID).(*Limits).IngestionBurstSize
}
// AcceptHASamples returns whether the distributor should track and accept samples from HA replicas for this user.
func (o *Overrides) AcceptHASamples(userID string) bool {
return o.overridesManager.GetLimits(userID).(*Limits).AcceptHASamples
}
// HAReplicaLabel returns the replica label to look for when deciding whether to accept a sample from a Prometheus HA replica.
func (o *Overrides) HAReplicaLabel(userID string) string {
return o.overridesManager.GetLimits(userID).(*Limits).HAReplicaLabel
}
// HAClusterLabel returns the cluster label to look for when deciding whether to accept a sample from a Prometheus HA replica.
func (o *Overrides) HAClusterLabel(userID string) string {
return o.overridesManager.GetLimits(userID).(*Limits).HAClusterLabel
}
// MaxLabelNameLength returns maximum length a label name can be.
func (o *Overrides) MaxLabelNameLength(userID string) int {
return o.overridesManager.GetLimits(userID).(*Limits).MaxLabelNameLength
}
// MaxLabelValueLength returns maximum length a label value can be. This also is
// the maximum length of a metric name.
func (o *Overrides) MaxLabelValueLength(userID string) int {
return o.overridesManager.GetLimits(userID).(*Limits).MaxLabelValueLength
}
// MaxLabelNamesPerSeries returns maximum number of label/value pairs timeseries.
func (o *Overrides) MaxLabelNamesPerSeries(userID string) int {
return o.overridesManager.GetLimits(userID).(*Limits).MaxLabelNamesPerSeries
}
// RejectOldSamples returns true when we should reject samples older than certain
// age.
func (o *Overrides) RejectOldSamples(userID string) bool {
return o.overridesManager.GetLimits(userID).(*Limits).RejectOldSamples
}
// RejectOldSamplesMaxAge returns the age at which samples should be rejected.
func (o *Overrides) RejectOldSamplesMaxAge(userID string) time.Duration {
return o.overridesManager.GetLimits(userID).(*Limits).RejectOldSamplesMaxAge
}
// CreationGracePeriod is misnamed, and actually returns how far into the future
// we should accept samples.
func (o *Overrides) CreationGracePeriod(userID string) time.Duration {
return o.overridesManager.GetLimits(userID).(*Limits).CreationGracePeriod
}
// MaxSeriesPerQuery returns the maximum number of series a query is allowed to hit.
func (o *Overrides) MaxSeriesPerQuery(userID string) int {
return o.overridesManager.GetLimits(userID).(*Limits).MaxSeriesPerQuery
}
// MaxSamplesPerQuery returns the maximum number of samples in a query (from the ingester).
func (o *Overrides) MaxSamplesPerQuery(userID string) int {
return o.overridesManager.GetLimits(userID).(*Limits).MaxSamplesPerQuery
}
// MaxSeriesPerUser returns the maximum number of series a user is allowed to store.
func (o *Overrides) MaxSeriesPerUser(userID string) int {
return o.overridesManager.GetLimits(userID).(*Limits).MaxSeriesPerUser
}
// MaxSeriesPerMetric returns the maximum number of series allowed per metric.
func (o *Overrides) MaxSeriesPerMetric(userID string) int {
return o.overridesManager.GetLimits(userID).(*Limits).MaxSeriesPerMetric
}
// MaxChunksPerQuery returns the maximum number of chunks allowed per query.
func (o *Overrides) MaxChunksPerQuery(userID string) int {
return o.overridesManager.GetLimits(userID).(*Limits).MaxChunksPerQuery
}
// MaxQueryLength returns the limit of the length (in time) of a query.
func (o *Overrides) MaxQueryLength(userID string) time.Duration {
return o.overridesManager.GetLimits(userID).(*Limits).MaxQueryLength
}
// MaxQueryParallelism returns the limit to the number of sub-queries the
// frontend will process in parallel.
func (o *Overrides) MaxQueryParallelism(userID string) int {
return o.overridesManager.GetLimits(userID).(*Limits).MaxQueryParallelism
}
// EnforceMetricName whether to enforce the presence of a metric name.
func (o *Overrides) EnforceMetricName(userID string) bool {
return o.overridesManager.GetLimits(userID).(*Limits).EnforceMetricName
}
// CardinalityLimit whether to enforce the presence of a metric name.
func (o *Overrides) CardinalityLimit(userID string) int {
return o.overridesManager.GetLimits(userID).(*Limits).CardinalityLimit
}
// Loads overrides and returns the limits as an interface to store them in OverridesManager.
// We need to implement it here since OverridesManager must store type Limits in an interface but
// it doesn't know its definition to initialize it.
// We could have used yamlv3.Node for this but there is no way to enforce strict decoding due to a bug in it
// TODO: Use yamlv3.Node to move this to OverridesManager after https://github.com/go-yaml/yaml/issues/460 is fixed
func loadOverrides(filename string) (map[string]interface{}, error) {
f, err := os.Open(filename)
if err != nil {
return nil, err
}
var overrides struct {
Overrides map[string]*Limits `yaml:"overrides"`
}
decoder := yaml.NewDecoder(f)
decoder.SetStrict(true)
if err := decoder.Decode(&overrides); err != nil {
return nil, err
}
overridesAsInterface := map[string]interface{}{}
for userID := range overrides.Overrides {
overridesAsInterface[userID] = overrides.Overrides[userID]
}
return overridesAsInterface, nil
}

@ -1,16 +1,13 @@
package validation
import (
"os"
"sync"
"time"
"github.com/cortexproject/cortex/pkg/util"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
yaml "gopkg.in/yaml.v2"
"github.com/cortexproject/cortex/pkg/util"
)
var overridesReloadSuccess = promauto.NewGauge(prometheus.GaugeOpts{
@ -22,290 +19,88 @@ func init() {
overridesReloadSuccess.Set(1) // Default to 1
}
// When we load YAML from disk, we want the various per-customer limits
// to default to any values specified on the command line, not default
// command line values. This global contains those values. I (Tom) cannot
// find a nicer way I'm afraid.
var defaultLimits Limits
// OverridesLoader loads the overrides
type OverridesLoader func(string) (map[string]interface{}, error)
// OverridesManagerConfig holds the config for an OverridesManager instance.
// It holds config related to loading per-tentant overrides and the default limits
type OverridesManagerConfig struct {
OverridesReloadPeriod time.Duration
OverridesLoadPath string
OverridesLoader OverridesLoader
Defaults interface{}
}
// Overrides periodically fetch a set of per-user overrides, and provides convenience
// functions for fetching the correct value.
type Overrides struct {
Defaults Limits
// OverridesManager manages default and per user limits i.e overrides.
// It can periodically keep reloading overrides based on config.
type OverridesManager struct {
cfg OverridesManagerConfig
overrides map[string]interface{}
overridesMtx sync.RWMutex
overrides map[string]*Limits
quit chan struct{}
}
// NewOverrides makes a new Overrides.
// We store the supplied limits in a global variable to ensure per-tenant limits
// are defaulted to those values. As such, the last call to NewOverrides will
// become the new global defaults.
func NewOverrides(defaults Limits) (*Overrides, error) {
defaultLimits = defaults
if defaults.PerTenantOverrideConfig == "" {
level.Info(util.Logger).Log("msg", "per-tenant overides disabled")
return &Overrides{
Defaults: defaults,
overrides: map[string]*Limits{},
quit: make(chan struct{}),
}, nil
}
overrides, err := loadOverrides(defaults.PerTenantOverrideConfig)
if err != nil {
return nil, err
// NewOverridesManager creates an instance of OverridesManager and starts reload overrides loop based on config
func NewOverridesManager(cfg OverridesManagerConfig) (*OverridesManager, error) {
overridesManager := OverridesManager{
cfg: cfg,
quit: make(chan struct{}),
}
o := &Overrides{
Defaults: defaults,
overrides: overrides,
quit: make(chan struct{}),
if cfg.OverridesLoadPath != "" {
go overridesManager.loop()
} else {
level.Info(util.Logger).Log("msg", "per-tenant overrides disabled")
}
go o.loop()
return o, nil
return &overridesManager, nil
}
func (o *Overrides) loop() {
ticker := time.NewTicker(o.Defaults.PerTenantOverridePeriod)
func (om *OverridesManager) loop() {
ticker := time.NewTicker(om.cfg.OverridesReloadPeriod)
defer ticker.Stop()
for {
select {
case <-ticker.C:
overrides, err := loadOverrides(o.Defaults.PerTenantOverrideConfig)
err := om.loadOverrides()
if err != nil {
overridesReloadSuccess.Set(0)
level.Error(util.Logger).Log("msg", "failed to reload overrides", "err", err)
continue
level.Error(util.Logger).Log("msg", "failed to load limit overrides", "err", err)
}
overridesReloadSuccess.Set(1)
o.overridesMtx.Lock()
o.overrides = overrides
o.overridesMtx.Unlock()
case <-o.quit:
case <-om.quit:
return
}
}
}
// Stop background reloading of overrides.
func (o *Overrides) Stop() {
close(o.quit)
}
func loadOverrides(filename string) (map[string]*Limits, error) {
f, err := os.Open(filename)
func (om *OverridesManager) loadOverrides() error {
overrides, err := om.cfg.OverridesLoader(om.cfg.OverridesLoadPath)
if err != nil {
return nil, err
}
var overrides struct {
Overrides map[string]*Limits `yaml:"overrides"`
}
decoder := yaml.NewDecoder(f)
decoder.SetStrict(true)
if err := decoder.Decode(&overrides); err != nil {
return nil, err
}
return overrides.Overrides, nil
}
func (o *Overrides) getBool(userID string, f func(*Limits) bool) bool {
o.overridesMtx.RLock()
defer o.overridesMtx.RUnlock()
override, ok := o.overrides[userID]
if !ok {
return f(&o.Defaults)
overridesReloadSuccess.Set(0)
return err
}
return f(override)
}
overridesReloadSuccess.Set(1)
func (o *Overrides) getInt(userID string, f func(*Limits) int) int {
o.overridesMtx.RLock()
defer o.overridesMtx.RUnlock()
override, ok := o.overrides[userID]
if !ok {
return f(&o.Defaults)
}
return f(override)
om.overridesMtx.Lock()
defer om.overridesMtx.Unlock()
om.overrides = overrides
return nil
}
func (o *Overrides) getFloat(userID string, f func(*Limits) float64) float64 {
o.overridesMtx.RLock()
defer o.overridesMtx.RUnlock()
override, ok := o.overrides[userID]
if !ok {
return f(&o.Defaults)
}
return f(override)
// Stop stops the OverridesManager
func (om *OverridesManager) Stop() {
close(om.quit)
}
func (o *Overrides) getDuration(userID string, f func(*Limits) time.Duration) time.Duration {
o.overridesMtx.RLock()
defer o.overridesMtx.RUnlock()
override, ok := o.overrides[userID]
if !ok {
return f(&o.Defaults)
}
return f(override)
}
// GetLimits returns Limits for a specific userID if its set otherwise the default Limits
func (om *OverridesManager) GetLimits(userID string) interface{} {
om.overridesMtx.RLock()
defer om.overridesMtx.RUnlock()
func (o *Overrides) getString(userID string, f func(*Limits) string) string {
o.overridesMtx.RLock()
defer o.overridesMtx.RUnlock()
override, ok := o.overrides[userID]
override, ok := om.overrides[userID]
if !ok {
return f(&o.Defaults)
return om.cfg.Defaults
}
return f(override)
}
// IngestionRate returns the limit on ingester rate (samples per second).
func (o *Overrides) IngestionRate(userID string) float64 {
return o.getFloat(userID, func(l *Limits) float64 {
return l.IngestionRate
})
}
// IngestionBurstSize returns the burst size for ingestion rate.
func (o *Overrides) IngestionBurstSize(userID string) int {
return o.getInt(userID, func(l *Limits) int {
return l.IngestionBurstSize
})
}
// AcceptHASamples returns whether the distributor should track and accept samples from HA replicas for this user.
func (o *Overrides) AcceptHASamples(userID string) bool {
return o.getBool(userID, func(l *Limits) bool {
return l.AcceptHASamples
})
}
// HAReplicaLabel returns the replica label to look for when deciding whether to accept a sample from a Prometheus HA replica.
func (o *Overrides) HAReplicaLabel(userID string) string {
return o.getString(userID, func(l *Limits) string {
return l.HAReplicaLabel
})
}
// HAClusterLabel returns the cluster label to look for when deciding whether to accept a sample from a Prometheus HA replica.
func (o *Overrides) HAClusterLabel(userID string) string {
return o.getString(userID, func(l *Limits) string {
return l.HAClusterLabel
})
}
// MaxLabelNameLength returns maximum length a label name can be.
func (o *Overrides) MaxLabelNameLength(userID string) int {
return o.getInt(userID, func(l *Limits) int {
return l.MaxLabelNameLength
})
}
// MaxLabelValueLength returns maximum length a label value can be. This also is
// the maximum length of a metric name.
func (o *Overrides) MaxLabelValueLength(userID string) int {
return o.getInt(userID, func(l *Limits) int {
return l.MaxLabelValueLength
})
}
// MaxLabelNamesPerSeries returns maximum number of label/value pairs timeseries.
func (o *Overrides) MaxLabelNamesPerSeries(userID string) int {
return o.getInt(userID, func(l *Limits) int {
return l.MaxLabelNamesPerSeries
})
}
// RejectOldSamples returns true when we should reject samples older than certain
// age.
func (o *Overrides) RejectOldSamples(userID string) bool {
return o.getBool(userID, func(l *Limits) bool {
return l.RejectOldSamples
})
}
// RejectOldSamplesMaxAge returns the age at which samples should be rejected.
func (o *Overrides) RejectOldSamplesMaxAge(userID string) time.Duration {
return o.getDuration(userID, func(l *Limits) time.Duration {
return l.RejectOldSamplesMaxAge
})
}
// CreationGracePeriod is misnamed, and actually returns how far into the future
// we should accept samples.
func (o *Overrides) CreationGracePeriod(userID string) time.Duration {
return o.getDuration(userID, func(l *Limits) time.Duration {
return l.CreationGracePeriod
})
}
// MaxSeriesPerQuery returns the maximum number of series a query is allowed to hit.
func (o *Overrides) MaxSeriesPerQuery(userID string) int {
return o.getInt(userID, func(l *Limits) int {
return l.MaxSeriesPerQuery
})
}
// MaxSamplesPerQuery returns the maximum number of samples in a query (from the ingester).
func (o *Overrides) MaxSamplesPerQuery(userID string) int {
return o.getInt(userID, func(l *Limits) int {
return l.MaxSamplesPerQuery
})
}
// MaxSeriesPerUser returns the maximum number of series a user is allowed to store.
func (o *Overrides) MaxSeriesPerUser(userID string) int {
return o.getInt(userID, func(l *Limits) int {
return l.MaxSeriesPerUser
})
}
// MaxSeriesPerMetric returns the maximum number of series allowed per metric.
func (o *Overrides) MaxSeriesPerMetric(userID string) int {
return o.getInt(userID, func(l *Limits) int {
return l.MaxSeriesPerMetric
})
}
// MaxChunksPerQuery returns the maximum number of chunks allowed per query.
func (o *Overrides) MaxChunksPerQuery(userID string) int {
return o.getInt(userID, func(l *Limits) int {
return l.MaxChunksPerQuery
})
}
// MaxQueryLength returns the limit of the length (in time) of a query.
func (o *Overrides) MaxQueryLength(userID string) time.Duration {
return o.getDuration(userID, func(l *Limits) time.Duration {
return l.MaxQueryLength
})
}
// MaxQueryParallelism returns the limit to the number of sub-queries the
// frontend will process in parallel.
func (o *Overrides) MaxQueryParallelism(userID string) int {
return o.getInt(userID, func(l *Limits) int {
return l.MaxQueryParallelism
})
}
// EnforceMetricName whether to enforce the presence of a metric name.
func (o *Overrides) EnforceMetricName(userID string) bool {
return o.getBool(userID, func(l *Limits) bool {
return l.EnforceMetricName
})
}
// CardinalityLimit whether to enforce the presence of a metric name.
func (o *Overrides) CardinalityLimit(userID string) int {
return o.getInt(userID, func(l *Limits) int {
return l.CardinalityLimit
})
return override
}

@ -2,6 +2,7 @@ package validation
import (
"net/http"
"time"
"github.com/cortexproject/cortex/pkg/ingester/client"
"github.com/cortexproject/cortex/pkg/util/extract"
@ -50,8 +51,15 @@ func init() {
prometheus.MustRegister(DiscardedSamples)
}
// SampleValidationConfig helps with getting required config to validate sample.
type SampleValidationConfig interface {
RejectOldSamples(userID string) bool
RejectOldSamplesMaxAge(userID string) time.Duration
CreationGracePeriod(userID string) time.Duration
}
// ValidateSample returns an err if the sample is invalid.
func (cfg *Overrides) ValidateSample(userID string, metricName string, s client.Sample) error {
func ValidateSample(cfg SampleValidationConfig, userID string, metricName string, s client.Sample) error {
if cfg.RejectOldSamples(userID) && model.Time(s.TimestampMs) < model.Now().Add(-cfg.RejectOldSamplesMaxAge(userID)) {
DiscardedSamples.WithLabelValues(greaterThanMaxSampleAge, userID).Inc()
return httpgrpc.Errorf(http.StatusBadRequest, errTooOld, metricName, model.Time(s.TimestampMs))
@ -65,8 +73,16 @@ func (cfg *Overrides) ValidateSample(userID string, metricName string, s client.
return nil
}
// LabelValidationConfig helps with getting required config to validate labels.
type LabelValidationConfig interface {
EnforceMetricName(userID string) bool
MaxLabelNamesPerSeries(userID string) int
MaxLabelNameLength(userID string) int
MaxLabelValueLength(userID string) int
}
// ValidateLabels returns an err if the labels are invalid.
func (cfg *Overrides) ValidateLabels(userID string, ls []client.LabelAdapter) error {
func ValidateLabels(cfg LabelValidationConfig, userID string, ls []client.LabelAdapter) error {
metricName, err := extract.MetricNameFromLabelAdapters(ls)
if cfg.EnforceMetricName(userID) {
if err != nil {

@ -0,0 +1,21 @@
sudo: false
language: go
go:
- 1.3.x
- 1.5.x
- 1.6.x
- 1.7.x
- 1.8.x
- 1.9.x
- master
matrix:
allow_failures:
- go: master
fast_finish: true
install:
- # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
script:
- go get -t -v ./...
- diff -u <(echo -n) <(gofmt -d -s .)
- go tool vet .
- go test -v -race ./...

@ -0,0 +1,21 @@
Copyright (c) 2005-2008 Dustin Sallings <dustin@spy.net>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
<http://www.opensource.org/licenses/mit-license.php>

@ -0,0 +1,124 @@
# Humane Units [![Build Status](https://travis-ci.org/dustin/go-humanize.svg?branch=master)](https://travis-ci.org/dustin/go-humanize) [![GoDoc](https://godoc.org/github.com/dustin/go-humanize?status.svg)](https://godoc.org/github.com/dustin/go-humanize)
Just a few functions for helping humanize times and sizes.
`go get` it as `github.com/dustin/go-humanize`, import it as
`"github.com/dustin/go-humanize"`, use it as `humanize`.
See [godoc](https://godoc.org/github.com/dustin/go-humanize) for
complete documentation.
## Sizes
This lets you take numbers like `82854982` and convert them to useful
strings like, `83 MB` or `79 MiB` (whichever you prefer).
Example:
```go
fmt.Printf("That file is %s.", humanize.Bytes(82854982)) // That file is 83 MB.
```
## Times
This lets you take a `time.Time` and spit it out in relative terms.
For example, `12 seconds ago` or `3 days from now`.
Example:
```go
fmt.Printf("This was touched %s.", humanize.Time(someTimeInstance)) // This was touched 7 hours ago.
```
Thanks to Kyle Lemons for the time implementation from an IRC
conversation one day. It's pretty neat.
## Ordinals
From a [mailing list discussion][odisc] where a user wanted to be able
to label ordinals.
0 -> 0th
1 -> 1st
2 -> 2nd
3 -> 3rd
4 -> 4th
[...]
Example:
```go
fmt.Printf("You're my %s best friend.", humanize.Ordinal(193)) // You are my 193rd best friend.
```
## Commas
Want to shove commas into numbers? Be my guest.
0 -> 0
100 -> 100
1000 -> 1,000
1000000000 -> 1,000,000,000
-100000 -> -100,000
Example:
```go
fmt.Printf("You owe $%s.\n", humanize.Comma(6582491)) // You owe $6,582,491.
```
## Ftoa
Nicer float64 formatter that removes trailing zeros.
```go
fmt.Printf("%f", 2.24) // 2.240000
fmt.Printf("%s", humanize.Ftoa(2.24)) // 2.24
fmt.Printf("%f", 2.0) // 2.000000
fmt.Printf("%s", humanize.Ftoa(2.0)) // 2
```
## SI notation
Format numbers with [SI notation][sinotation].
Example:
```go
humanize.SI(0.00000000223, "M") // 2.23 nM
```
## English-specific functions
The following functions are in the `humanize/english` subpackage.
### Plurals
Simple English pluralization
```go
english.PluralWord(1, "object", "") // object
english.PluralWord(42, "object", "") // objects
english.PluralWord(2, "bus", "") // buses
english.PluralWord(99, "locus", "loci") // loci
english.Plural(1, "object", "") // 1 object
english.Plural(42, "object", "") // 42 objects
english.Plural(2, "bus", "") // 2 buses
english.Plural(99, "locus", "loci") // 99 loci
```
### Word series
Format comma-separated words lists with conjuctions:
```go
english.WordSeries([]string{"foo"}, "and") // foo
english.WordSeries([]string{"foo", "bar"}, "and") // foo and bar
english.WordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar and baz
english.OxfordWordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar, and baz
```
[odisc]: https://groups.google.com/d/topic/golang-nuts/l8NhI74jl-4/discussion
[sinotation]: http://en.wikipedia.org/wiki/Metric_prefix

@ -0,0 +1,31 @@
package humanize
import (
"math/big"
)
// order of magnitude (to a max order)
func oomm(n, b *big.Int, maxmag int) (float64, int) {
mag := 0
m := &big.Int{}
for n.Cmp(b) >= 0 {
n.DivMod(n, b, m)
mag++
if mag == maxmag && maxmag >= 0 {
break
}
}
return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
}
// total order of magnitude
// (same as above, but with no upper limit)
func oom(n, b *big.Int) (float64, int) {
mag := 0
m := &big.Int{}
for n.Cmp(b) >= 0 {
n.DivMod(n, b, m)
mag++
}
return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
}

@ -0,0 +1,173 @@
package humanize
import (
"fmt"
"math/big"
"strings"
"unicode"
)
var (
bigIECExp = big.NewInt(1024)
// BigByte is one byte in bit.Ints
BigByte = big.NewInt(1)
// BigKiByte is 1,024 bytes in bit.Ints
BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp)
// BigMiByte is 1,024 k bytes in bit.Ints
BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp)
// BigGiByte is 1,024 m bytes in bit.Ints
BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp)
// BigTiByte is 1,024 g bytes in bit.Ints
BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp)
// BigPiByte is 1,024 t bytes in bit.Ints
BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp)
// BigEiByte is 1,024 p bytes in bit.Ints
BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp)
// BigZiByte is 1,024 e bytes in bit.Ints
BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp)
// BigYiByte is 1,024 z bytes in bit.Ints
BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp)
)
var (
bigSIExp = big.NewInt(1000)
// BigSIByte is one SI byte in big.Ints
BigSIByte = big.NewInt(1)
// BigKByte is 1,000 SI bytes in big.Ints
BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp)
// BigMByte is 1,000 SI k bytes in big.Ints
BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp)
// BigGByte is 1,000 SI m bytes in big.Ints
BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp)
// BigTByte is 1,000 SI g bytes in big.Ints
BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp)
// BigPByte is 1,000 SI t bytes in big.Ints
BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp)
// BigEByte is 1,000 SI p bytes in big.Ints
BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp)
// BigZByte is 1,000 SI e bytes in big.Ints
BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp)
// BigYByte is 1,000 SI z bytes in big.Ints
BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp)
)
var bigBytesSizeTable = map[string]*big.Int{
"b": BigByte,
"kib": BigKiByte,
"kb": BigKByte,
"mib": BigMiByte,
"mb": BigMByte,
"gib": BigGiByte,
"gb": BigGByte,
"tib": BigTiByte,
"tb": BigTByte,
"pib": BigPiByte,
"pb": BigPByte,
"eib": BigEiByte,
"eb": BigEByte,
"zib": BigZiByte,
"zb": BigZByte,
"yib": BigYiByte,
"yb": BigYByte,
// Without suffix
"": BigByte,
"ki": BigKiByte,
"k": BigKByte,
"mi": BigMiByte,
"m": BigMByte,
"gi": BigGiByte,
"g": BigGByte,
"ti": BigTiByte,
"t": BigTByte,
"pi": BigPiByte,
"p": BigPByte,
"ei": BigEiByte,
"e": BigEByte,
"z": BigZByte,
"zi": BigZiByte,
"y": BigYByte,
"yi": BigYiByte,
}
var ten = big.NewInt(10)
func humanateBigBytes(s, base *big.Int, sizes []string) string {
if s.Cmp(ten) < 0 {
return fmt.Sprintf("%d B", s)
}
c := (&big.Int{}).Set(s)
val, mag := oomm(c, base, len(sizes)-1)
suffix := sizes[mag]
f := "%.0f %s"
if val < 10 {
f = "%.1f %s"
}
return fmt.Sprintf(f, val, suffix)
}
// BigBytes produces a human readable representation of an SI size.
//
// See also: ParseBigBytes.
//
// BigBytes(82854982) -> 83 MB
func BigBytes(s *big.Int) string {
sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
return humanateBigBytes(s, bigSIExp, sizes)
}
// BigIBytes produces a human readable representation of an IEC size.
//
// See also: ParseBigBytes.
//
// BigIBytes(82854982) -> 79 MiB
func BigIBytes(s *big.Int) string {
sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
return humanateBigBytes(s, bigIECExp, sizes)
}
// ParseBigBytes parses a string representation of bytes into the number
// of bytes it represents.
//
// See also: BigBytes, BigIBytes.
//
// ParseBigBytes("42 MB") -> 42000000, nil
// ParseBigBytes("42 mib") -> 44040192, nil
func ParseBigBytes(s string) (*big.Int, error) {
lastDigit := 0
hasComma := false
for _, r := range s {
if !(unicode.IsDigit(r) || r == '.' || r == ',') {
break
}
if r == ',' {
hasComma = true
}
lastDigit++
}
num := s[:lastDigit]
if hasComma {
num = strings.Replace(num, ",", "", -1)
}
val := &big.Rat{}
_, err := fmt.Sscanf(num, "%f", val)
if err != nil {
return nil, err
}
extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
if m, ok := bigBytesSizeTable[extra]; ok {
mv := (&big.Rat{}).SetInt(m)
val.Mul(val, mv)
rv := &big.Int{}
rv.Div(val.Num(), val.Denom())
return rv, nil
}
return nil, fmt.Errorf("unhandled size name: %v", extra)
}

@ -0,0 +1,143 @@
package humanize
import (
"fmt"
"math"
"strconv"
"strings"
"unicode"
)
// IEC Sizes.
// kibis of bits
const (
Byte = 1 << (iota * 10)
KiByte
MiByte
GiByte
TiByte
PiByte
EiByte
)
// SI Sizes.
const (
IByte = 1
KByte = IByte * 1000
MByte = KByte * 1000
GByte = MByte * 1000
TByte = GByte * 1000
PByte = TByte * 1000
EByte = PByte * 1000
)
var bytesSizeTable = map[string]uint64{
"b": Byte,
"kib": KiByte,
"kb": KByte,
"mib": MiByte,
"mb": MByte,
"gib": GiByte,
"gb": GByte,
"tib": TiByte,
"tb": TByte,
"pib": PiByte,
"pb": PByte,
"eib": EiByte,
"eb": EByte,
// Without suffix
"": Byte,
"ki": KiByte,
"k": KByte,
"mi": MiByte,
"m": MByte,
"gi": GiByte,
"g": GByte,
"ti": TiByte,
"t": TByte,
"pi": PiByte,
"p": PByte,
"ei": EiByte,
"e": EByte,
}
func logn(n, b float64) float64 {
return math.Log(n) / math.Log(b)
}
func humanateBytes(s uint64, base float64, sizes []string) string {
if s < 10 {
return fmt.Sprintf("%d B", s)
}
e := math.Floor(logn(float64(s), base))
suffix := sizes[int(e)]
val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10
f := "%.0f %s"
if val < 10 {
f = "%.1f %s"
}
return fmt.Sprintf(f, val, suffix)
}
// Bytes produces a human readable representation of an SI size.
//
// See also: ParseBytes.
//
// Bytes(82854982) -> 83 MB
func Bytes(s uint64) string {
sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"}
return humanateBytes(s, 1000, sizes)
}
// IBytes produces a human readable representation of an IEC size.
//
// See also: ParseBytes.
//
// IBytes(82854982) -> 79 MiB
func IBytes(s uint64) string {
sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"}
return humanateBytes(s, 1024, sizes)
}
// ParseBytes parses a string representation of bytes into the number
// of bytes it represents.
//
// See Also: Bytes, IBytes.
//
// ParseBytes("42 MB") -> 42000000, nil
// ParseBytes("42 mib") -> 44040192, nil
func ParseBytes(s string) (uint64, error) {
lastDigit := 0
hasComma := false
for _, r := range s {
if !(unicode.IsDigit(r) || r == '.' || r == ',') {
break
}
if r == ',' {
hasComma = true
}
lastDigit++
}
num := s[:lastDigit]
if hasComma {
num = strings.Replace(num, ",", "", -1)
}
f, err := strconv.ParseFloat(num, 64)
if err != nil {
return 0, err
}
extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
if m, ok := bytesSizeTable[extra]; ok {
f *= float64(m)
if f >= math.MaxUint64 {
return 0, fmt.Errorf("too large: %v", s)
}
return uint64(f), nil
}
return 0, fmt.Errorf("unhandled size name: %v", extra)
}

@ -0,0 +1,116 @@
package humanize
import (
"bytes"
"math"
"math/big"
"strconv"
"strings"
)
// Comma produces a string form of the given number in base 10 with
// commas after every three orders of magnitude.
//
// e.g. Comma(834142) -> 834,142
func Comma(v int64) string {
sign := ""
// Min int64 can't be negated to a usable value, so it has to be special cased.
if v == math.MinInt64 {
return "-9,223,372,036,854,775,808"
}
if v < 0 {
sign = "-"
v = 0 - v
}
parts := []string{"", "", "", "", "", "", ""}
j := len(parts) - 1
for v > 999 {
parts[j] = strconv.FormatInt(v%1000, 10)
switch len(parts[j]) {
case 2:
parts[j] = "0" + parts[j]
case 1:
parts[j] = "00" + parts[j]
}
v = v / 1000
j--
}
parts[j] = strconv.Itoa(int(v))
return sign + strings.Join(parts[j:], ",")
}
// Commaf produces a string form of the given number in base 10 with
// commas after every three orders of magnitude.
//
// e.g. Commaf(834142.32) -> 834,142.32
func Commaf(v float64) string {
buf := &bytes.Buffer{}
if v < 0 {
buf.Write([]byte{'-'})
v = 0 - v
}
comma := []byte{','}
parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".")
pos := 0
if len(parts[0])%3 != 0 {
pos += len(parts[0]) % 3
buf.WriteString(parts[0][:pos])
buf.Write(comma)
}
for ; pos < len(parts[0]); pos += 3 {
buf.WriteString(parts[0][pos : pos+3])
buf.Write(comma)
}
buf.Truncate(buf.Len() - 1)
if len(parts) > 1 {
buf.Write([]byte{'.'})
buf.WriteString(parts[1])
}
return buf.String()
}
// CommafWithDigits works like the Commaf but limits the resulting
// string to the given number of decimal places.
//
// e.g. CommafWithDigits(834142.32, 1) -> 834,142.3
func CommafWithDigits(f float64, decimals int) string {
return stripTrailingDigits(Commaf(f), decimals)
}
// BigComma produces a string form of the given big.Int in base 10
// with commas after every three orders of magnitude.
func BigComma(b *big.Int) string {
sign := ""
if b.Sign() < 0 {
sign = "-"
b.Abs(b)
}
athousand := big.NewInt(1000)
c := (&big.Int{}).Set(b)
_, m := oom(c, athousand)
parts := make([]string, m+1)
j := len(parts) - 1
mod := &big.Int{}
for b.Cmp(athousand) >= 0 {
b.DivMod(b, athousand, mod)
parts[j] = strconv.FormatInt(mod.Int64(), 10)
switch len(parts[j]) {
case 2:
parts[j] = "0" + parts[j]
case 1:
parts[j] = "00" + parts[j]
}
j--
}
parts[j] = strconv.Itoa(int(b.Int64()))
return sign + strings.Join(parts[j:], ",")
}

@ -0,0 +1,40 @@
// +build go1.6
package humanize
import (
"bytes"
"math/big"
"strings"
)
// BigCommaf produces a string form of the given big.Float in base 10
// with commas after every three orders of magnitude.
func BigCommaf(v *big.Float) string {
buf := &bytes.Buffer{}
if v.Sign() < 0 {
buf.Write([]byte{'-'})
v.Abs(v)
}
comma := []byte{','}
parts := strings.Split(v.Text('f', -1), ".")
pos := 0
if len(parts[0])%3 != 0 {
pos += len(parts[0]) % 3
buf.WriteString(parts[0][:pos])
buf.Write(comma)
}
for ; pos < len(parts[0]); pos += 3 {
buf.WriteString(parts[0][pos : pos+3])
buf.Write(comma)
}
buf.Truncate(buf.Len() - 1)
if len(parts) > 1 {
buf.Write([]byte{'.'})
buf.WriteString(parts[1])
}
return buf.String()
}

@ -0,0 +1,46 @@
package humanize
import (
"strconv"
"strings"
)
func stripTrailingZeros(s string) string {
offset := len(s) - 1
for offset > 0 {
if s[offset] == '.' {
offset--
break
}
if s[offset] != '0' {
break
}
offset--
}
return s[:offset+1]
}
func stripTrailingDigits(s string, digits int) string {
if i := strings.Index(s, "."); i >= 0 {
if digits <= 0 {
return s[:i]
}
i++
if i+digits >= len(s) {
return s
}
return s[:i+digits]
}
return s
}
// Ftoa converts a float to a string with no trailing zeros.
func Ftoa(num float64) string {
return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64))
}
// FtoaWithDigits converts a float to a string but limits the resulting string
// to the given number of decimal places, and no trailing zeros.
func FtoaWithDigits(num float64, digits int) string {
return stripTrailingZeros(stripTrailingDigits(strconv.FormatFloat(num, 'f', 6, 64), digits))
}

@ -0,0 +1,8 @@
/*
Package humanize converts boring ugly numbers to human-friendly strings and back.
Durations can be turned into strings such as "3 days ago", numbers
representing sizes like 82854982 into useful strings like, "83 MB" or
"79 MiB" (whichever you prefer).
*/
package humanize

@ -0,0 +1,192 @@
package humanize
/*
Slightly adapted from the source to fit go-humanize.
Author: https://github.com/gorhill
Source: https://gist.github.com/gorhill/5285193
*/
import (
"math"
"strconv"
)
var (
renderFloatPrecisionMultipliers = [...]float64{
1,
10,
100,
1000,
10000,
100000,
1000000,
10000000,
100000000,
1000000000,
}
renderFloatPrecisionRounders = [...]float64{
0.5,
0.05,
0.005,
0.0005,
0.00005,
0.000005,
0.0000005,
0.00000005,
0.000000005,
0.0000000005,
}
)
// FormatFloat produces a formatted number as string based on the following user-specified criteria:
// * thousands separator
// * decimal separator
// * decimal precision
//
// Usage: s := RenderFloat(format, n)
// The format parameter tells how to render the number n.
//
// See examples: http://play.golang.org/p/LXc1Ddm1lJ
//
// Examples of format strings, given n = 12345.6789:
// "#,###.##" => "12,345.67"
// "#,###." => "12,345"
// "#,###" => "12345,678"
// "#\u202F###,##" => "12345,68"
// "#.###,###### => 12.345,678900
// "" (aka default format) => 12,345.67
//
// The highest precision allowed is 9 digits after the decimal symbol.
// There is also a version for integer number, FormatInteger(),
// which is convenient for calls within template.
func FormatFloat(format string, n float64) string {
// Special cases:
// NaN = "NaN"
// +Inf = "+Infinity"
// -Inf = "-Infinity"
if math.IsNaN(n) {
return "NaN"
}
if n > math.MaxFloat64 {
return "Infinity"
}
if n < -math.MaxFloat64 {
return "-Infinity"
}
// default format
precision := 2
decimalStr := "."
thousandStr := ","
positiveStr := ""
negativeStr := "-"
if len(format) > 0 {
format := []rune(format)
// If there is an explicit format directive,
// then default values are these:
precision = 9
thousandStr = ""
// collect indices of meaningful formatting directives
formatIndx := []int{}
for i, char := range format {
if char != '#' && char != '0' {
formatIndx = append(formatIndx, i)
}
}
if len(formatIndx) > 0 {
// Directive at index 0:
// Must be a '+'
// Raise an error if not the case
// index: 0123456789
// +0.000,000
// +000,000.0
// +0000.00
// +0000
if formatIndx[0] == 0 {
if format[formatIndx[0]] != '+' {
panic("RenderFloat(): invalid positive sign directive")
}
positiveStr = "+"
formatIndx = formatIndx[1:]
}
// Two directives:
// First is thousands separator
// Raise an error if not followed by 3-digit
// 0123456789
// 0.000,000
// 000,000.00
if len(formatIndx) == 2 {
if (formatIndx[1] - formatIndx[0]) != 4 {
panic("RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers")
}
thousandStr = string(format[formatIndx[0]])
formatIndx = formatIndx[1:]
}
// One directive:
// Directive is decimal separator
// The number of digit-specifier following the separator indicates wanted precision
// 0123456789
// 0.00
// 000,0000
if len(formatIndx) == 1 {
decimalStr = string(format[formatIndx[0]])
precision = len(format) - formatIndx[0] - 1
}
}
}
// generate sign part
var signStr string
if n >= 0.000000001 {
signStr = positiveStr
} else if n <= -0.000000001 {
signStr = negativeStr
n = -n
} else {
signStr = ""
n = 0.0
}
// split number into integer and fractional parts
intf, fracf := math.Modf(n + renderFloatPrecisionRounders[precision])
// generate integer part string
intStr := strconv.FormatInt(int64(intf), 10)
// add thousand separator if required
if len(thousandStr) > 0 {
for i := len(intStr); i > 3; {
i -= 3
intStr = intStr[:i] + thousandStr + intStr[i:]
}
}
// no fractional part, we can leave now
if precision == 0 {
return signStr + intStr
}
// generate fractional part
fracStr := strconv.Itoa(int(fracf * renderFloatPrecisionMultipliers[precision]))
// may need padding
if len(fracStr) < precision {
fracStr = "000000000000000"[:precision-len(fracStr)] + fracStr
}
return signStr + intStr + decimalStr + fracStr
}
// FormatInteger produces a formatted number as string.
// See FormatFloat.
func FormatInteger(format string, n int) string {
return FormatFloat(format, float64(n))
}

@ -0,0 +1,25 @@
package humanize
import "strconv"
// Ordinal gives you the input number in a rank/ordinal format.
//
// Ordinal(3) -> 3rd
func Ordinal(x int) string {
suffix := "th"
switch x % 10 {
case 1:
if x%100 != 11 {
suffix = "st"
}
case 2:
if x%100 != 12 {
suffix = "nd"
}
case 3:
if x%100 != 13 {
suffix = "rd"
}
}
return strconv.Itoa(x) + suffix
}

@ -0,0 +1,123 @@
package humanize
import (
"errors"
"math"
"regexp"
"strconv"
)
var siPrefixTable = map[float64]string{
-24: "y", // yocto
-21: "z", // zepto
-18: "a", // atto
-15: "f", // femto
-12: "p", // pico
-9: "n", // nano
-6: "µ", // micro
-3: "m", // milli
0: "",
3: "k", // kilo
6: "M", // mega
9: "G", // giga
12: "T", // tera
15: "P", // peta
18: "E", // exa
21: "Z", // zetta
24: "Y", // yotta
}
var revSIPrefixTable = revfmap(siPrefixTable)
// revfmap reverses the map and precomputes the power multiplier
func revfmap(in map[float64]string) map[string]float64 {
rv := map[string]float64{}
for k, v := range in {
rv[v] = math.Pow(10, k)
}
return rv
}
var riParseRegex *regexp.Regexp
func init() {
ri := `^([\-0-9.]+)\s?([`
for _, v := range siPrefixTable {
ri += v
}
ri += `]?)(.*)`
riParseRegex = regexp.MustCompile(ri)
}
// ComputeSI finds the most appropriate SI prefix for the given number
// and returns the prefix along with the value adjusted to be within
// that prefix.
//
// See also: SI, ParseSI.
//
// e.g. ComputeSI(2.2345e-12) -> (2.2345, "p")
func ComputeSI(input float64) (float64, string) {
if input == 0 {
return 0, ""
}
mag := math.Abs(input)
exponent := math.Floor(logn(mag, 10))
exponent = math.Floor(exponent/3) * 3
value := mag / math.Pow(10, exponent)
// Handle special case where value is exactly 1000.0
// Should return 1 M instead of 1000 k
if value == 1000.0 {
exponent += 3
value = mag / math.Pow(10, exponent)
}
value = math.Copysign(value, input)
prefix := siPrefixTable[exponent]
return value, prefix
}
// SI returns a string with default formatting.
//
// SI uses Ftoa to format float value, removing trailing zeros.
//
// See also: ComputeSI, ParseSI.
//
// e.g. SI(1000000, "B") -> 1 MB
// e.g. SI(2.2345e-12, "F") -> 2.2345 pF
func SI(input float64, unit string) string {
value, prefix := ComputeSI(input)
return Ftoa(value) + " " + prefix + unit
}
// SIWithDigits works like SI but limits the resulting string to the
// given number of decimal places.
//
// e.g. SIWithDigits(1000000, 0, "B") -> 1 MB
// e.g. SIWithDigits(2.2345e-12, 2, "F") -> 2.23 pF
func SIWithDigits(input float64, decimals int, unit string) string {
value, prefix := ComputeSI(input)
return FtoaWithDigits(value, decimals) + " " + prefix + unit
}
var errInvalid = errors.New("invalid input")
// ParseSI parses an SI string back into the number and unit.
//
// See also: SI, ComputeSI.
//
// e.g. ParseSI("2.2345 pF") -> (2.2345e-12, "F", nil)
func ParseSI(input string) (float64, string, error) {
found := riParseRegex.FindStringSubmatch(input)
if len(found) != 4 {
return 0, "", errInvalid
}
mag := revSIPrefixTable[found[2]]
unit := found[3]
base, err := strconv.ParseFloat(found[1], 64)
return base * mag, unit, err
}

@ -0,0 +1,117 @@
package humanize
import (
"fmt"
"math"
"sort"
"time"
)
// Seconds-based time units
const (
Day = 24 * time.Hour
Week = 7 * Day
Month = 30 * Day
Year = 12 * Month
LongTime = 37 * Year
)
// Time formats a time into a relative string.
//
// Time(someT) -> "3 weeks ago"
func Time(then time.Time) string {
return RelTime(then, time.Now(), "ago", "from now")
}
// A RelTimeMagnitude struct contains a relative time point at which
// the relative format of time will switch to a new format string. A
// slice of these in ascending order by their "D" field is passed to
// CustomRelTime to format durations.
//
// The Format field is a string that may contain a "%s" which will be
// replaced with the appropriate signed label (e.g. "ago" or "from
// now") and a "%d" that will be replaced by the quantity.
//
// The DivBy field is the amount of time the time difference must be
// divided by in order to display correctly.
//
// e.g. if D is 2*time.Minute and you want to display "%d minutes %s"
// DivBy should be time.Minute so whatever the duration is will be
// expressed in minutes.
type RelTimeMagnitude struct {
D time.Duration
Format string
DivBy time.Duration
}
var defaultMagnitudes = []RelTimeMagnitude{
{time.Second, "now", time.Second},
{2 * time.Second, "1 second %s", 1},
{time.Minute, "%d seconds %s", time.Second},
{2 * time.Minute, "1 minute %s", 1},
{time.Hour, "%d minutes %s", time.Minute},
{2 * time.Hour, "1 hour %s", 1},
{Day, "%d hours %s", time.Hour},
{2 * Day, "1 day %s", 1},
{Week, "%d days %s", Day},
{2 * Week, "1 week %s", 1},
{Month, "%d weeks %s", Week},
{2 * Month, "1 month %s", 1},
{Year, "%d months %s", Month},
{18 * Month, "1 year %s", 1},
{2 * Year, "2 years %s", 1},
{LongTime, "%d years %s", Year},
{math.MaxInt64, "a long while %s", 1},
}
// RelTime formats a time into a relative string.
//
// It takes two times and two labels. In addition to the generic time
// delta string (e.g. 5 minutes), the labels are used applied so that
// the label corresponding to the smaller time is applied.
//
// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier"
func RelTime(a, b time.Time, albl, blbl string) string {
return CustomRelTime(a, b, albl, blbl, defaultMagnitudes)
}
// CustomRelTime formats a time into a relative string.
//
// It takes two times two labels and a table of relative time formats.
// In addition to the generic time delta string (e.g. 5 minutes), the
// labels are used applied so that the label corresponding to the
// smaller time is applied.
func CustomRelTime(a, b time.Time, albl, blbl string, magnitudes []RelTimeMagnitude) string {
lbl := albl
diff := b.Sub(a)
if a.After(b) {
lbl = blbl
diff = a.Sub(b)
}
n := sort.Search(len(magnitudes), func(i int) bool {
return magnitudes[i].D > diff
})
if n >= len(magnitudes) {
n = len(magnitudes) - 1
}
mag := magnitudes[n]
args := []interface{}{}
escaped := false
for _, ch := range mag.Format {
if escaped {
switch ch {
case 's':
args = append(args, lbl)
case 'd':
args = append(args, diff/mag.DivBy)
}
escaped = false
} else {
escaped = ch == '%'
}
}
return fmt.Sprintf(mag.Format, args...)
}

@ -1,30 +0,0 @@
BRANCH=`git rev-parse --abbrev-ref HEAD`
COMMIT=`git rev-parse --short HEAD`
GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
default: build
race:
@go test -v -race -test.run="TestSimulate_(100op|1000op)"
fmt:
!(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]')
# go get honnef.co/go/tools/simple
gosimple:
gosimple ./...
# go get honnef.co/go/tools/unused
unused:
unused ./...
# go get github.com/kisielk/errcheck
errcheck:
@errcheck -ignorepkg=bytes -ignore=os:Remove go.etcd.io/bbolt
test:
go test -timeout 20m -v -coverprofile cover.out -covermode atomic
# Note: gets "program not an importable package" in out of path builds
go test -v ./cmd/bbolt
.PHONY: race fmt errcheck test gosimple unused

@ -0,0 +1,29 @@
Copyright (c) 2015, Vincent Batoufflet and Marc Falzon
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the authors nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.

@ -0,0 +1,104 @@
# natsort: natural strings sorting in Go
This is an implementation of the "Alphanum Algorithm" by [Dave Koelle][0] in Go.
[![GoDoc](https://godoc.org/facette.io/natsort?status.svg)](https://godoc.org/facette.io/natsort)
## Usage
```go
package main
import (
"fmt"
"strings"
"facette.io/natsort"
)
func main() {
list := []string{
"1000X Radonius Maximus",
"10X Radonius",
"200X Radonius",
"20X Radonius",
"20X Radonius Prime",
"30X Radonius",
"40X Radonius",
"Allegia 50 Clasteron",
"Allegia 500 Clasteron",
"Allegia 50B Clasteron",
"Allegia 51 Clasteron",
"Allegia 6R Clasteron",
"Alpha 100",
"Alpha 2",
"Alpha 200",
"Alpha 2A",
"Alpha 2A-8000",
"Alpha 2A-900",
"Callisto Morphamax",
"Callisto Morphamax 500",
"Callisto Morphamax 5000",
"Callisto Morphamax 600",
"Callisto Morphamax 6000 SE",
"Callisto Morphamax 6000 SE2",
"Callisto Morphamax 700",
"Callisto Morphamax 7000",
"Xiph Xlater 10000",
"Xiph Xlater 2000",
"Xiph Xlater 300",
"Xiph Xlater 40",
"Xiph Xlater 5",
"Xiph Xlater 50",
"Xiph Xlater 500",
"Xiph Xlater 5000",
"Xiph Xlater 58",
}
natsort.Sort(list)
fmt.Println(strings.Join(list, "\n"))
}
```
Output:
```
10X Radonius
20X Radonius
20X Radonius Prime
30X Radonius
40X Radonius
200X Radonius
1000X Radonius Maximus
Allegia 6R Clasteron
Allegia 50 Clasteron
Allegia 50B Clasteron
Allegia 51 Clasteron
Allegia 500 Clasteron
Alpha 2
Alpha 2A
Alpha 2A-900
Alpha 2A-8000
Alpha 100
Alpha 200
Callisto Morphamax
Callisto Morphamax 500
Callisto Morphamax 600
Callisto Morphamax 700
Callisto Morphamax 5000
Callisto Morphamax 6000 SE
Callisto Morphamax 6000 SE2
Callisto Morphamax 7000
Xiph Xlater 5
Xiph Xlater 40
Xiph Xlater 50
Xiph Xlater 58
Xiph Xlater 300
Xiph Xlater 500
Xiph Xlater 2000
Xiph Xlater 5000
Xiph Xlater 10000
```
[0]: http://davekoelle.com/alphanum.html

@ -0,0 +1,85 @@
// Package natsort implements natural strings sorting
package natsort
import (
"regexp"
"sort"
"strconv"
)
type stringSlice []string
func (s stringSlice) Len() int {
return len(s)
}
func (s stringSlice) Less(a, b int) bool {
return Compare(s[a], s[b])
}
func (s stringSlice) Swap(a, b int) {
s[a], s[b] = s[b], s[a]
}
var chunkifyRegexp = regexp.MustCompile(`(\d+|\D+)`)
func chunkify(s string) []string {
return chunkifyRegexp.FindAllString(s, -1)
}
// Sort sorts a list of strings in a natural order
func Sort(l []string) {
sort.Sort(stringSlice(l))
}
// Compare returns true if the first string precedes the second one according to natural order
func Compare(a, b string) bool {
chunksA := chunkify(a)
chunksB := chunkify(b)
nChunksA := len(chunksA)
nChunksB := len(chunksB)
for i := range chunksA {
if i >= nChunksB {
return false
}
aInt, aErr := strconv.Atoi(chunksA[i])
bInt, bErr := strconv.Atoi(chunksB[i])
// If both chunks are numeric, compare them as integers
if aErr == nil && bErr == nil {
if aInt == bInt {
if i == nChunksA-1 {
// We reached the last chunk of A, thus B is greater than A
return true
} else if i == nChunksB-1 {
// We reached the last chunk of B, thus A is greater than B
return false
}
continue
}
return aInt < bInt
}
// So far both strings are equal, continue to next chunk
if chunksA[i] == chunksB[i] {
if i == nChunksA-1 {
// We reached the last chunk of A, thus B is greater than A
return true
} else if i == nChunksB-1 {
// We reached the last chunk of B, thus A is greater than B
return false
}
continue
}
return chunksA[i] < chunksB[i]
}
return false
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -0,0 +1,117 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2017 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/*
Package remap handles tracking the locations of Go tokens in a source text
across a rewrite by the Go formatter.
*/
package remap
import (
"fmt"
"go/scanner"
"go/token"
)
// A Location represents a span of byte offsets in the source text.
type Location struct {
Pos, End int // End is exclusive
}
// A Map represents a mapping between token locations in an input source text
// and locations in the correspnding output text.
type Map map[Location]Location
// Find reports whether the specified span is recorded by m, and if so returns
// the new location it was mapped to. If the input span was not found, the
// returned location is the same as the input.
func (m Map) Find(pos, end int) (Location, bool) {
key := Location{
Pos: pos,
End: end,
}
if loc, ok := m[key]; ok {
return loc, true
}
return key, false
}
func (m Map) add(opos, oend, npos, nend int) {
m[Location{Pos: opos, End: oend}] = Location{Pos: npos, End: nend}
}
// Compute constructs a location mapping from input to output. An error is
// reported if any of the tokens of output cannot be mapped.
func Compute(input, output []byte) (Map, error) {
itok := tokenize(input)
otok := tokenize(output)
if len(itok) != len(otok) {
return nil, fmt.Errorf("wrong number of tokens, %d ≠ %d", len(itok), len(otok))
}
m := make(Map)
for i, ti := range itok {
to := otok[i]
if ti.Token != to.Token {
return nil, fmt.Errorf("token %d type mismatch: %s ≠ %s", i+1, ti, to)
}
m.add(ti.pos, ti.end, to.pos, to.end)
}
return m, nil
}
// tokinfo records the span and type of a source token.
type tokinfo struct {
pos, end int
token.Token
}
func tokenize(src []byte) []tokinfo {
fs := token.NewFileSet()
var s scanner.Scanner
s.Init(fs.AddFile("src", fs.Base(), len(src)), src, nil, scanner.ScanComments)
var info []tokinfo
for {
pos, next, lit := s.Scan()
switch next {
case token.SEMICOLON:
continue
}
info = append(info, tokinfo{
pos: int(pos - 1),
end: int(pos + token.Pos(len(lit)) - 1),
Token: next,
})
if next == token.EOF {
break
}
}
return info
}

@ -0,0 +1,369 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/compiler/plugin.proto
/*
Package plugin_go is a generated protocol buffer package.
It is generated from these files:
google/protobuf/compiler/plugin.proto
It has these top-level messages:
Version
CodeGeneratorRequest
CodeGeneratorResponse
*/
package plugin_go
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// The version number of protocol compiler.
type Version struct {
Major *int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"`
Minor *int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"`
Patch *int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"`
// A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
// be empty for mainline stable releases.
Suffix *string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Version) Reset() { *m = Version{} }
func (m *Version) String() string { return proto.CompactTextString(m) }
func (*Version) ProtoMessage() {}
func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *Version) Unmarshal(b []byte) error {
return xxx_messageInfo_Version.Unmarshal(m, b)
}
func (m *Version) Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Version.Marshal(b, m, deterministic)
}
func (dst *Version) XXX_Merge(src proto.Message) {
xxx_messageInfo_Version.Merge(dst, src)
}
func (m *Version) XXX_Size() int {
return xxx_messageInfo_Version.Size(m)
}
func (m *Version) XXX_DiscardUnknown() {
xxx_messageInfo_Version.DiscardUnknown(m)
}
var xxx_messageInfo_Version proto.InternalMessageInfo
func (m *Version) GetMajor() int32 {
if m != nil && m.Major != nil {
return *m.Major
}
return 0
}
func (m *Version) GetMinor() int32 {
if m != nil && m.Minor != nil {
return *m.Minor
}
return 0
}
func (m *Version) GetPatch() int32 {
if m != nil && m.Patch != nil {
return *m.Patch
}
return 0
}
func (m *Version) GetSuffix() string {
if m != nil && m.Suffix != nil {
return *m.Suffix
}
return ""
}
// An encoded CodeGeneratorRequest is written to the plugin's stdin.
type CodeGeneratorRequest struct {
// The .proto files that were explicitly listed on the command-line. The
// code generator should generate code only for these files. Each file's
// descriptor will be included in proto_file, below.
FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate,json=fileToGenerate" json:"file_to_generate,omitempty"`
// The generator parameter passed on the command-line.
Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"`
// FileDescriptorProtos for all files in files_to_generate and everything
// they import. The files will appear in topological order, so each file
// appears before any file that imports it.
//
// protoc guarantees that all proto_files will be written after
// the fields above, even though this is not technically guaranteed by the
// protobuf wire format. This theoretically could allow a plugin to stream
// in the FileDescriptorProtos and handle them one by one rather than read
// the entire set into memory at once. However, as of this writing, this
// is not similarly optimized on protoc's end -- it will store all fields in
// memory at once before sending them to the plugin.
//
// Type names of fields and extensions in the FileDescriptorProto are always
// fully qualified.
ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"`
// The version number of protocol compiler.
CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CodeGeneratorRequest) Reset() { *m = CodeGeneratorRequest{} }
func (m *CodeGeneratorRequest) String() string { return proto.CompactTextString(m) }
func (*CodeGeneratorRequest) ProtoMessage() {}
func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *CodeGeneratorRequest) Unmarshal(b []byte) error {
return xxx_messageInfo_CodeGeneratorRequest.Unmarshal(m, b)
}
func (m *CodeGeneratorRequest) Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CodeGeneratorRequest.Marshal(b, m, deterministic)
}
func (dst *CodeGeneratorRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_CodeGeneratorRequest.Merge(dst, src)
}
func (m *CodeGeneratorRequest) XXX_Size() int {
return xxx_messageInfo_CodeGeneratorRequest.Size(m)
}
func (m *CodeGeneratorRequest) XXX_DiscardUnknown() {
xxx_messageInfo_CodeGeneratorRequest.DiscardUnknown(m)
}
var xxx_messageInfo_CodeGeneratorRequest proto.InternalMessageInfo
func (m *CodeGeneratorRequest) GetFileToGenerate() []string {
if m != nil {
return m.FileToGenerate
}
return nil
}
func (m *CodeGeneratorRequest) GetParameter() string {
if m != nil && m.Parameter != nil {
return *m.Parameter
}
return ""
}
func (m *CodeGeneratorRequest) GetProtoFile() []*google_protobuf.FileDescriptorProto {
if m != nil {
return m.ProtoFile
}
return nil
}
func (m *CodeGeneratorRequest) GetCompilerVersion() *Version {
if m != nil {
return m.CompilerVersion
}
return nil
}
// The plugin writes an encoded CodeGeneratorResponse to stdout.
type CodeGeneratorResponse struct {
// Error message. If non-empty, code generation failed. The plugin process
// should exit with status code zero even if it reports an error in this way.
//
// This should be used to indicate errors in .proto files which prevent the
// code generator from generating correct code. Errors which indicate a
// problem in protoc itself -- such as the input CodeGeneratorRequest being
// unparseable -- should be reported by writing a message to stderr and
// exiting with a non-zero status code.
Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CodeGeneratorResponse) Reset() { *m = CodeGeneratorResponse{} }
func (m *CodeGeneratorResponse) String() string { return proto.CompactTextString(m) }
func (*CodeGeneratorResponse) ProtoMessage() {}
func (*CodeGeneratorResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *CodeGeneratorResponse) Unmarshal(b []byte) error {
return xxx_messageInfo_CodeGeneratorResponse.Unmarshal(m, b)
}
func (m *CodeGeneratorResponse) Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CodeGeneratorResponse.Marshal(b, m, deterministic)
}
func (dst *CodeGeneratorResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_CodeGeneratorResponse.Merge(dst, src)
}
func (m *CodeGeneratorResponse) XXX_Size() int {
return xxx_messageInfo_CodeGeneratorResponse.Size(m)
}
func (m *CodeGeneratorResponse) XXX_DiscardUnknown() {
xxx_messageInfo_CodeGeneratorResponse.DiscardUnknown(m)
}
var xxx_messageInfo_CodeGeneratorResponse proto.InternalMessageInfo
func (m *CodeGeneratorResponse) GetError() string {
if m != nil && m.Error != nil {
return *m.Error
}
return ""
}
func (m *CodeGeneratorResponse) GetFile() []*CodeGeneratorResponse_File {
if m != nil {
return m.File
}
return nil
}
// Represents a single generated file.
type CodeGeneratorResponse_File struct {
// The file name, relative to the output directory. The name must not
// contain "." or ".." components and must be relative, not be absolute (so,
// the file cannot lie outside the output directory). "/" must be used as
// the path separator, not "\".
//
// If the name is omitted, the content will be appended to the previous
// file. This allows the generator to break large files into small chunks,
// and allows the generated text to be streamed back to protoc so that large
// files need not reside completely in memory at one time. Note that as of
// this writing protoc does not optimize for this -- it will read the entire
// CodeGeneratorResponse before writing files to disk.
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
// If non-empty, indicates that the named file should already exist, and the
// content here is to be inserted into that file at a defined insertion
// point. This feature allows a code generator to extend the output
// produced by another code generator. The original generator may provide
// insertion points by placing special annotations in the file that look
// like:
// @@protoc_insertion_point(NAME)
// The annotation can have arbitrary text before and after it on the line,
// which allows it to be placed in a comment. NAME should be replaced with
// an identifier naming the point -- this is what other generators will use
// as the insertion_point. Code inserted at this point will be placed
// immediately above the line containing the insertion point (thus multiple
// insertions to the same point will come out in the order they were added).
// The double-@ is intended to make it unlikely that the generated code
// could contain things that look like insertion points by accident.
//
// For example, the C++ code generator places the following line in the
// .pb.h files that it generates:
// // @@protoc_insertion_point(namespace_scope)
// This line appears within the scope of the file's package namespace, but
// outside of any particular class. Another plugin can then specify the
// insertion_point "namespace_scope" to generate additional classes or
// other declarations that should be placed in this scope.
//
// Note that if the line containing the insertion point begins with
// whitespace, the same whitespace will be added to every line of the
// inserted text. This is useful for languages like Python, where
// indentation matters. In these languages, the insertion point comment
// should be indented the same amount as any inserted code will need to be
// in order to work correctly in that context.
//
// The code generator that generates the initial file and the one which
// inserts into it must both run as part of a single invocation of protoc.
// Code generators are executed in the order in which they appear on the
// command line.
//
// If |insertion_point| is present, |name| must also be present.
InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"`
// The file contents.
Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CodeGeneratorResponse_File) Reset() { *m = CodeGeneratorResponse_File{} }
func (m *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(m) }
func (*CodeGeneratorResponse_File) ProtoMessage() {}
func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} }
func (m *CodeGeneratorResponse_File) Unmarshal(b []byte) error {
return xxx_messageInfo_CodeGeneratorResponse_File.Unmarshal(m, b)
}
func (m *CodeGeneratorResponse_File) Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CodeGeneratorResponse_File.Marshal(b, m, deterministic)
}
func (dst *CodeGeneratorResponse_File) XXX_Merge(src proto.Message) {
xxx_messageInfo_CodeGeneratorResponse_File.Merge(dst, src)
}
func (m *CodeGeneratorResponse_File) XXX_Size() int {
return xxx_messageInfo_CodeGeneratorResponse_File.Size(m)
}
func (m *CodeGeneratorResponse_File) XXX_DiscardUnknown() {
xxx_messageInfo_CodeGeneratorResponse_File.DiscardUnknown(m)
}
var xxx_messageInfo_CodeGeneratorResponse_File proto.InternalMessageInfo
func (m *CodeGeneratorResponse_File) GetName() string {
if m != nil && m.Name != nil {
return *m.Name
}
return ""
}
func (m *CodeGeneratorResponse_File) GetInsertionPoint() string {
if m != nil && m.InsertionPoint != nil {
return *m.InsertionPoint
}
return ""
}
func (m *CodeGeneratorResponse_File) GetContent() string {
if m != nil && m.Content != nil {
return *m.Content
}
return ""
}
func init() {
proto.RegisterType((*Version)(nil), "google.protobuf.compiler.Version")
proto.RegisterType((*CodeGeneratorRequest)(nil), "google.protobuf.compiler.CodeGeneratorRequest")
proto.RegisterType((*CodeGeneratorResponse)(nil), "google.protobuf.compiler.CodeGeneratorResponse")
proto.RegisterType((*CodeGeneratorResponse_File)(nil), "google.protobuf.compiler.CodeGeneratorResponse.File")
}
func init() { proto.RegisterFile("google/protobuf/compiler/plugin.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 417 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xcf, 0x6a, 0x14, 0x41,
0x10, 0xc6, 0x19, 0x77, 0x63, 0x98, 0x8a, 0x64, 0x43, 0x13, 0xa5, 0x09, 0x39, 0x8c, 0x8b, 0xe2,
0x5c, 0x32, 0x0b, 0xc1, 0x8b, 0x78, 0x4b, 0x44, 0x3d, 0x78, 0x58, 0x1a, 0xf1, 0x20, 0xc8, 0x30,
0x99, 0xd4, 0x74, 0x5a, 0x66, 0xba, 0xc6, 0xee, 0x1e, 0xf1, 0x49, 0x7d, 0x0f, 0xdf, 0x40, 0xfa,
0xcf, 0x24, 0xb2, 0xb8, 0xa7, 0xee, 0xef, 0x57, 0xd5, 0xd5, 0x55, 0x1f, 0x05, 0x2f, 0x25, 0x91,
0xec, 0x71, 0x33, 0x1a, 0x72, 0x74, 0x33, 0x75, 0x9b, 0x96, 0x86, 0x51, 0xf5, 0x68, 0x36, 0x63,
0x3f, 0x49, 0xa5, 0xab, 0x10, 0x60, 0x3c, 0xa6, 0x55, 0x73, 0x5a, 0x35, 0xa7, 0x9d, 0x15, 0xbb,
0x05, 0x6e, 0xd1, 0xb6, 0x46, 0x8d, 0x8e, 0x4c, 0xcc, 0x5e, 0xb7, 0x70, 0xf8, 0x05, 0x8d, 0x55,
0xa4, 0xd9, 0x29, 0x1c, 0x0c, 0xcd, 0x77, 0x32, 0x3c, 0x2b, 0xb2, 0xf2, 0x40, 0x44, 0x11, 0xa8,
0xd2, 0x64, 0xf8, 0xa3, 0x44, 0xbd, 0xf0, 0x74, 0x6c, 0x5c, 0x7b, 0xc7, 0x17, 0x91, 0x06, 0xc1,
0x9e, 0xc1, 0x63, 0x3b, 0x75, 0x9d, 0xfa, 0xc5, 0x97, 0x45, 0x56, 0xe6, 0x22, 0xa9, 0xf5, 0x9f,
0x0c, 0x4e, 0xaf, 0xe9, 0x16, 0x3f, 0xa0, 0x46, 0xd3, 0x38, 0x32, 0x02, 0x7f, 0x4c, 0x68, 0x1d,
0x2b, 0xe1, 0xa4, 0x53, 0x3d, 0xd6, 0x8e, 0x6a, 0x19, 0x63, 0xc8, 0xb3, 0x62, 0x51, 0xe6, 0xe2,
0xd8, 0xf3, 0xcf, 0x94, 0x5e, 0x20, 0x3b, 0x87, 0x7c, 0x6c, 0x4c, 0x33, 0xa0, 0xc3, 0xd8, 0x4a,
0x2e, 0x1e, 0x00, 0xbb, 0x06, 0x08, 0xe3, 0xd4, 0xfe, 0x15, 0x5f, 0x15, 0x8b, 0xf2, 0xe8, 0xf2,
0x45, 0xb5, 0x6b, 0xcb, 0x7b, 0xd5, 0xe3, 0xbb, 0x7b, 0x03, 0xb6, 0x1e, 0x8b, 0x3c, 0x44, 0x7d,
0x84, 0x7d, 0x82, 0x93, 0xd9, 0xb8, 0xfa, 0x67, 0xf4, 0x24, 0x8c, 0x77, 0x74, 0xf9, 0xbc, 0xda,
0xe7, 0x70, 0x95, 0xcc, 0x13, 0xab, 0x99, 0x24, 0xb0, 0xfe, 0x9d, 0xc1, 0xd3, 0x9d, 0x99, 0xed,
0x48, 0xda, 0xa2, 0xf7, 0x0e, 0x8d, 0x49, 0x3e, 0xe7, 0x22, 0x0a, 0xf6, 0x11, 0x96, 0xff, 0x34,
0xff, 0x7a, 0xff, 0x8f, 0xff, 0x2d, 0x1a, 0x66, 0x13, 0xa1, 0xc2, 0xd9, 0x37, 0x58, 0x86, 0x79,
0x18, 0x2c, 0x75, 0x33, 0x60, 0xfa, 0x26, 0xdc, 0xd9, 0x2b, 0x58, 0x29, 0x6d, 0xd1, 0x38, 0x45,
0xba, 0x1e, 0x49, 0x69, 0x97, 0xcc, 0x3c, 0xbe, 0xc7, 0x5b, 0x4f, 0x19, 0x87, 0xc3, 0x96, 0xb4,
0x43, 0xed, 0xf8, 0x2a, 0x24, 0xcc, 0xf2, 0x4a, 0xc2, 0x79, 0x4b, 0xc3, 0xde, 0xfe, 0xae, 0x9e,
0x6c, 0xc3, 0x6e, 0x06, 0x7b, 0xed, 0xd7, 0x37, 0x52, 0xb9, 0xbb, 0xe9, 0xc6, 0x87, 0x37, 0x92,
0xfa, 0x46, 0xcb, 0x87, 0x65, 0x0c, 0x97, 0xf6, 0x42, 0xa2, 0xbe, 0x90, 0x94, 0x56, 0xfa, 0x6d,
0x3c, 0x6a, 0x49, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x15, 0x40, 0xc5, 0xfe, 0x02, 0x00,
0x00,
}

@ -0,0 +1,83 @@
// Code generated by protoc-gen-go.
// source: google/protobuf/compiler/plugin.proto
// DO NOT EDIT!
package google_protobuf_compiler
import proto "github.com/golang/protobuf/proto"
import "math"
import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
// Reference proto and math imports to suppress error if they are not otherwise used.
var _ = proto.GetString
var _ = math.Inf
type CodeGeneratorRequest struct {
FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate" json:"file_to_generate,omitempty"`
Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"`
ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file" json:"proto_file,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (this *CodeGeneratorRequest) Reset() { *this = CodeGeneratorRequest{} }
func (this *CodeGeneratorRequest) String() string { return proto.CompactTextString(this) }
func (*CodeGeneratorRequest) ProtoMessage() {}
func (this *CodeGeneratorRequest) GetParameter() string {
if this != nil && this.Parameter != nil {
return *this.Parameter
}
return ""
}
type CodeGeneratorResponse struct {
Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (this *CodeGeneratorResponse) Reset() { *this = CodeGeneratorResponse{} }
func (this *CodeGeneratorResponse) String() string { return proto.CompactTextString(this) }
func (*CodeGeneratorResponse) ProtoMessage() {}
func (this *CodeGeneratorResponse) GetError() string {
if this != nil && this.Error != nil {
return *this.Error
}
return ""
}
type CodeGeneratorResponse_File struct {
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point" json:"insertion_point,omitempty"`
Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (this *CodeGeneratorResponse_File) Reset() { *this = CodeGeneratorResponse_File{} }
func (this *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(this) }
func (*CodeGeneratorResponse_File) ProtoMessage() {}
func (this *CodeGeneratorResponse_File) GetName() string {
if this != nil && this.Name != nil {
return *this.Name
}
return ""
}
func (this *CodeGeneratorResponse_File) GetInsertionPoint() string {
if this != nil && this.InsertionPoint != nil {
return *this.InsertionPoint
}
return ""
}
func (this *CodeGeneratorResponse_File) GetContent() string {
if this != nil && this.Content != nil {
return *this.Content
}
return ""
}
func init() {
}

@ -0,0 +1,167 @@
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Author: kenton@google.com (Kenton Varda)
//
// WARNING: The plugin interface is currently EXPERIMENTAL and is subject to
// change.
//
// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is
// just a program that reads a CodeGeneratorRequest from stdin and writes a
// CodeGeneratorResponse to stdout.
//
// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead
// of dealing with the raw protocol defined here.
//
// A plugin executable needs only to be placed somewhere in the path. The
// plugin should be named "protoc-gen-$NAME", and will then be used when the
// flag "--${NAME}_out" is passed to protoc.
syntax = "proto2";
package google.protobuf.compiler;
option java_package = "com.google.protobuf.compiler";
option java_outer_classname = "PluginProtos";
option go_package = "github.com/golang/protobuf/protoc-gen-go/plugin;plugin_go";
import "google/protobuf/descriptor.proto";
// The version number of protocol compiler.
message Version {
optional int32 major = 1;
optional int32 minor = 2;
optional int32 patch = 3;
// A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
// be empty for mainline stable releases.
optional string suffix = 4;
}
// An encoded CodeGeneratorRequest is written to the plugin's stdin.
message CodeGeneratorRequest {
// The .proto files that were explicitly listed on the command-line. The
// code generator should generate code only for these files. Each file's
// descriptor will be included in proto_file, below.
repeated string file_to_generate = 1;
// The generator parameter passed on the command-line.
optional string parameter = 2;
// FileDescriptorProtos for all files in files_to_generate and everything
// they import. The files will appear in topological order, so each file
// appears before any file that imports it.
//
// protoc guarantees that all proto_files will be written after
// the fields above, even though this is not technically guaranteed by the
// protobuf wire format. This theoretically could allow a plugin to stream
// in the FileDescriptorProtos and handle them one by one rather than read
// the entire set into memory at once. However, as of this writing, this
// is not similarly optimized on protoc's end -- it will store all fields in
// memory at once before sending them to the plugin.
//
// Type names of fields and extensions in the FileDescriptorProto are always
// fully qualified.
repeated FileDescriptorProto proto_file = 15;
// The version number of protocol compiler.
optional Version compiler_version = 3;
}
// The plugin writes an encoded CodeGeneratorResponse to stdout.
message CodeGeneratorResponse {
// Error message. If non-empty, code generation failed. The plugin process
// should exit with status code zero even if it reports an error in this way.
//
// This should be used to indicate errors in .proto files which prevent the
// code generator from generating correct code. Errors which indicate a
// problem in protoc itself -- such as the input CodeGeneratorRequest being
// unparseable -- should be reported by writing a message to stderr and
// exiting with a non-zero status code.
optional string error = 1;
// Represents a single generated file.
message File {
// The file name, relative to the output directory. The name must not
// contain "." or ".." components and must be relative, not be absolute (so,
// the file cannot lie outside the output directory). "/" must be used as
// the path separator, not "\".
//
// If the name is omitted, the content will be appended to the previous
// file. This allows the generator to break large files into small chunks,
// and allows the generated text to be streamed back to protoc so that large
// files need not reside completely in memory at one time. Note that as of
// this writing protoc does not optimize for this -- it will read the entire
// CodeGeneratorResponse before writing files to disk.
optional string name = 1;
// If non-empty, indicates that the named file should already exist, and the
// content here is to be inserted into that file at a defined insertion
// point. This feature allows a code generator to extend the output
// produced by another code generator. The original generator may provide
// insertion points by placing special annotations in the file that look
// like:
// @@protoc_insertion_point(NAME)
// The annotation can have arbitrary text before and after it on the line,
// which allows it to be placed in a comment. NAME should be replaced with
// an identifier naming the point -- this is what other generators will use
// as the insertion_point. Code inserted at this point will be placed
// immediately above the line containing the insertion point (thus multiple
// insertions to the same point will come out in the order they were added).
// The double-@ is intended to make it unlikely that the generated code
// could contain things that look like insertion points by accident.
//
// For example, the C++ code generator places the following line in the
// .pb.h files that it generates:
// // @@protoc_insertion_point(namespace_scope)
// This line appears within the scope of the file's package namespace, but
// outside of any particular class. Another plugin can then specify the
// insertion_point "namespace_scope" to generate additional classes or
// other declarations that should be placed in this scope.
//
// Note that if the line containing the insertion point begins with
// whitespace, the same whitespace will be added to every line of the
// inserted text. This is useful for languages like Python, where
// indentation matters. In these languages, the insertion point comment
// should be indented the same amount as any inserted code will need to be
// in order to work correctly in that context.
//
// The code generator that generates the initial file and the one which
// inserts into it must both run as part of a single invocation of protoc.
// Code generators are executed in the order in which they appear on the
// command line.
//
// If |insertion_point| is present, |name| must also be present.
optional string insertion_point = 2;
// The file contents.
optional string content = 15;
}
repeated File file = 15;
}

@ -0,0 +1,450 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/struct.proto
package structpb // import "github.com/golang/protobuf/ptypes/struct"
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// `NullValue` is a singleton enumeration to represent the null value for the
// `Value` type union.
//
// The JSON representation for `NullValue` is JSON `null`.
type NullValue int32
const (
// Null value.
NullValue_NULL_VALUE NullValue = 0
)
var NullValue_name = map[int32]string{
0: "NULL_VALUE",
}
var NullValue_value = map[string]int32{
"NULL_VALUE": 0,
}
func (x NullValue) String() string {
return proto.EnumName(NullValue_name, int32(x))
}
func (NullValue) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_struct_3a5a94e0c7801b27, []int{0}
}
func (NullValue) XXX_WellKnownType() string { return "NullValue" }
// `Struct` represents a structured data value, consisting of fields
// which map to dynamically typed values. In some languages, `Struct`
// might be supported by a native representation. For example, in
// scripting languages like JS a struct is represented as an
// object. The details of that representation are described together
// with the proto support for the language.
//
// The JSON representation for `Struct` is JSON object.
type Struct struct {
// Unordered map of dynamically typed values.
Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Struct) Reset() { *m = Struct{} }
func (m *Struct) String() string { return proto.CompactTextString(m) }
func (*Struct) ProtoMessage() {}
func (*Struct) Descriptor() ([]byte, []int) {
return fileDescriptor_struct_3a5a94e0c7801b27, []int{0}
}
func (*Struct) XXX_WellKnownType() string { return "Struct" }
func (m *Struct) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Struct.Unmarshal(m, b)
}
func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Struct.Marshal(b, m, deterministic)
}
func (dst *Struct) XXX_Merge(src proto.Message) {
xxx_messageInfo_Struct.Merge(dst, src)
}
func (m *Struct) XXX_Size() int {
return xxx_messageInfo_Struct.Size(m)
}
func (m *Struct) XXX_DiscardUnknown() {
xxx_messageInfo_Struct.DiscardUnknown(m)
}
var xxx_messageInfo_Struct proto.InternalMessageInfo
func (m *Struct) GetFields() map[string]*Value {
if m != nil {
return m.Fields
}
return nil
}
// `Value` represents a dynamically typed value which can be either
// null, a number, a string, a boolean, a recursive struct value, or a
// list of values. A producer of value is expected to set one of that
// variants, absence of any variant indicates an error.
//
// The JSON representation for `Value` is JSON value.
type Value struct {
// The kind of value.
//
// Types that are valid to be assigned to Kind:
// *Value_NullValue
// *Value_NumberValue
// *Value_StringValue
// *Value_BoolValue
// *Value_StructValue
// *Value_ListValue
Kind isValue_Kind `protobuf_oneof:"kind"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Value) Reset() { *m = Value{} }
func (m *Value) String() string { return proto.CompactTextString(m) }
func (*Value) ProtoMessage() {}
func (*Value) Descriptor() ([]byte, []int) {
return fileDescriptor_struct_3a5a94e0c7801b27, []int{1}
}
func (*Value) XXX_WellKnownType() string { return "Value" }
func (m *Value) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Value.Unmarshal(m, b)
}
func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Value.Marshal(b, m, deterministic)
}
func (dst *Value) XXX_Merge(src proto.Message) {
xxx_messageInfo_Value.Merge(dst, src)
}
func (m *Value) XXX_Size() int {
return xxx_messageInfo_Value.Size(m)
}
func (m *Value) XXX_DiscardUnknown() {
xxx_messageInfo_Value.DiscardUnknown(m)
}
var xxx_messageInfo_Value proto.InternalMessageInfo
type isValue_Kind interface {
isValue_Kind()
}
type Value_NullValue struct {
NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
}
type Value_NumberValue struct {
NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"`
}
type Value_StringValue struct {
StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"`
}
type Value_BoolValue struct {
BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"`
}
type Value_StructValue struct {
StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"`
}
type Value_ListValue struct {
ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"`
}
func (*Value_NullValue) isValue_Kind() {}
func (*Value_NumberValue) isValue_Kind() {}
func (*Value_StringValue) isValue_Kind() {}
func (*Value_BoolValue) isValue_Kind() {}
func (*Value_StructValue) isValue_Kind() {}
func (*Value_ListValue) isValue_Kind() {}
func (m *Value) GetKind() isValue_Kind {
if m != nil {
return m.Kind
}
return nil
}
func (m *Value) GetNullValue() NullValue {
if x, ok := m.GetKind().(*Value_NullValue); ok {
return x.NullValue
}
return NullValue_NULL_VALUE
}
func (m *Value) GetNumberValue() float64 {
if x, ok := m.GetKind().(*Value_NumberValue); ok {
return x.NumberValue
}
return 0
}
func (m *Value) GetStringValue() string {
if x, ok := m.GetKind().(*Value_StringValue); ok {
return x.StringValue
}
return ""
}
func (m *Value) GetBoolValue() bool {
if x, ok := m.GetKind().(*Value_BoolValue); ok {
return x.BoolValue
}
return false
}
func (m *Value) GetStructValue() *Struct {
if x, ok := m.GetKind().(*Value_StructValue); ok {
return x.StructValue
}
return nil
}
func (m *Value) GetListValue() *ListValue {
if x, ok := m.GetKind().(*Value_ListValue); ok {
return x.ListValue
}
return nil
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _Value_OneofMarshaler, _Value_OneofUnmarshaler, _Value_OneofSizer, []interface{}{
(*Value_NullValue)(nil),
(*Value_NumberValue)(nil),
(*Value_StringValue)(nil),
(*Value_BoolValue)(nil),
(*Value_StructValue)(nil),
(*Value_ListValue)(nil),
}
}
func _Value_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*Value)
// kind
switch x := m.Kind.(type) {
case *Value_NullValue:
b.EncodeVarint(1<<3 | proto.WireVarint)
b.EncodeVarint(uint64(x.NullValue))
case *Value_NumberValue:
b.EncodeVarint(2<<3 | proto.WireFixed64)
b.EncodeFixed64(math.Float64bits(x.NumberValue))
case *Value_StringValue:
b.EncodeVarint(3<<3 | proto.WireBytes)
b.EncodeStringBytes(x.StringValue)
case *Value_BoolValue:
t := uint64(0)
if x.BoolValue {
t = 1
}
b.EncodeVarint(4<<3 | proto.WireVarint)
b.EncodeVarint(t)
case *Value_StructValue:
b.EncodeVarint(5<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.StructValue); err != nil {
return err
}
case *Value_ListValue:
b.EncodeVarint(6<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.ListValue); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("Value.Kind has unexpected type %T", x)
}
return nil
}
func _Value_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*Value)
switch tag {
case 1: // kind.null_value
if wire != proto.WireVarint {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeVarint()
m.Kind = &Value_NullValue{NullValue(x)}
return true, err
case 2: // kind.number_value
if wire != proto.WireFixed64 {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeFixed64()
m.Kind = &Value_NumberValue{math.Float64frombits(x)}
return true, err
case 3: // kind.string_value
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeStringBytes()
m.Kind = &Value_StringValue{x}
return true, err
case 4: // kind.bool_value
if wire != proto.WireVarint {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeVarint()
m.Kind = &Value_BoolValue{x != 0}
return true, err
case 5: // kind.struct_value
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(Struct)
err := b.DecodeMessage(msg)
m.Kind = &Value_StructValue{msg}
return true, err
case 6: // kind.list_value
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(ListValue)
err := b.DecodeMessage(msg)
m.Kind = &Value_ListValue{msg}
return true, err
default:
return false, nil
}
}
func _Value_OneofSizer(msg proto.Message) (n int) {
m := msg.(*Value)
// kind
switch x := m.Kind.(type) {
case *Value_NullValue:
n += 1 // tag and wire
n += proto.SizeVarint(uint64(x.NullValue))
case *Value_NumberValue:
n += 1 // tag and wire
n += 8
case *Value_StringValue:
n += 1 // tag and wire
n += proto.SizeVarint(uint64(len(x.StringValue)))
n += len(x.StringValue)
case *Value_BoolValue:
n += 1 // tag and wire
n += 1
case *Value_StructValue:
s := proto.Size(x.StructValue)
n += 1 // tag and wire
n += proto.SizeVarint(uint64(s))
n += s
case *Value_ListValue:
s := proto.Size(x.ListValue)
n += 1 // tag and wire
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
// `ListValue` is a wrapper around a repeated field of values.
//
// The JSON representation for `ListValue` is JSON array.
type ListValue struct {
// Repeated field of dynamically typed values.
Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ListValue) Reset() { *m = ListValue{} }
func (m *ListValue) String() string { return proto.CompactTextString(m) }
func (*ListValue) ProtoMessage() {}
func (*ListValue) Descriptor() ([]byte, []int) {
return fileDescriptor_struct_3a5a94e0c7801b27, []int{2}
}
func (*ListValue) XXX_WellKnownType() string { return "ListValue" }
func (m *ListValue) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ListValue.Unmarshal(m, b)
}
func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ListValue.Marshal(b, m, deterministic)
}
func (dst *ListValue) XXX_Merge(src proto.Message) {
xxx_messageInfo_ListValue.Merge(dst, src)
}
func (m *ListValue) XXX_Size() int {
return xxx_messageInfo_ListValue.Size(m)
}
func (m *ListValue) XXX_DiscardUnknown() {
xxx_messageInfo_ListValue.DiscardUnknown(m)
}
var xxx_messageInfo_ListValue proto.InternalMessageInfo
func (m *ListValue) GetValues() []*Value {
if m != nil {
return m.Values
}
return nil
}
func init() {
proto.RegisterType((*Struct)(nil), "google.protobuf.Struct")
proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry")
proto.RegisterType((*Value)(nil), "google.protobuf.Value")
proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue")
proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value)
}
func init() {
proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_struct_3a5a94e0c7801b27)
}
var fileDescriptor_struct_3a5a94e0c7801b27 = []byte{
// 417 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40,
0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09,
0x22, 0x29, 0xd6, 0x8b, 0x18, 0x2f, 0x06, 0xd6, 0x5d, 0x30, 0x2c, 0x31, 0xba, 0x15, 0xbc, 0x94,
0x26, 0x4d, 0x63, 0xe8, 0x74, 0x26, 0x24, 0x33, 0x4a, 0x8f, 0x7e, 0x0b, 0xcf, 0x1e, 0x3d, 0xfa,
0xe9, 0x3c, 0xca, 0xcc, 0x24, 0xa9, 0xb4, 0xf4, 0x94, 0xbc, 0xf7, 0x7e, 0xef, 0x3f, 0xef, 0xff,
0x66, 0xe0, 0x71, 0xc1, 0x58, 0x41, 0xf2, 0x49, 0x55, 0x33, 0xce, 0x52, 0xb1, 0x9a, 0x34, 0xbc,
0x16, 0x19, 0xf7, 0x55, 0x8c, 0xef, 0xe9, 0xaa, 0xdf, 0x55, 0xc7, 0x3f, 0x11, 0x58, 0x1f, 0x15,
0x81, 0x03, 0xb0, 0x56, 0x65, 0x4e, 0x96, 0xcd, 0x08, 0xb9, 0xa6, 0xe7, 0x4c, 0x2f, 0xfc, 0x3d,
0xd8, 0xd7, 0xa0, 0xff, 0x4e, 0x51, 0x97, 0x94, 0xd7, 0xdb, 0xa4, 0x6d, 0x39, 0xff, 0x00, 0xce,
0x7f, 0x69, 0x7c, 0x06, 0xe6, 0x3a, 0xdf, 0x8e, 0x90, 0x8b, 0x3c, 0x3b, 0x91, 0xbf, 0xf8, 0x39,
0x0c, 0xbf, 0x2d, 0x88, 0xc8, 0x47, 0x86, 0x8b, 0x3c, 0x67, 0xfa, 0xe0, 0x40, 0x7c, 0x26, 0xab,
0x89, 0x86, 0x5e, 0x1b, 0xaf, 0xd0, 0xf8, 0x8f, 0x01, 0x43, 0x95, 0xc4, 0x01, 0x00, 0x15, 0x84,
0xcc, 0xb5, 0x80, 0x14, 0x3d, 0x9d, 0x9e, 0x1f, 0x08, 0xdc, 0x08, 0x42, 0x14, 0x7f, 0x3d, 0x48,
0x6c, 0xda, 0x05, 0xf8, 0x02, 0xee, 0x52, 0xb1, 0x49, 0xf3, 0x7a, 0xbe, 0x3b, 0x1f, 0x5d, 0x0f,
0x12, 0x47, 0x67, 0x7b, 0xa8, 0xe1, 0x75, 0x49, 0x8b, 0x16, 0x32, 0xe5, 0xe0, 0x12, 0xd2, 0x59,
0x0d, 0x3d, 0x05, 0x48, 0x19, 0xeb, 0xc6, 0x38, 0x71, 0x91, 0x77, 0x47, 0x1e, 0x25, 0x73, 0x1a,
0x78, 0xa3, 0x54, 0x44, 0xc6, 0x5b, 0x64, 0xa8, 0xac, 0x3e, 0x3c, 0xb2, 0xc7, 0x56, 0x5e, 0x64,
0xbc, 0x77, 0x49, 0xca, 0xa6, 0xeb, 0xb5, 0x54, 0xef, 0xa1, 0xcb, 0xa8, 0x6c, 0x78, 0xef, 0x92,
0x74, 0x41, 0x68, 0xc1, 0xc9, 0xba, 0xa4, 0xcb, 0x71, 0x00, 0x76, 0x4f, 0x60, 0x1f, 0x2c, 0x25,
0xd6, 0xdd, 0xe8, 0xb1, 0xa5, 0xb7, 0xd4, 0xb3, 0x47, 0x60, 0xf7, 0x4b, 0xc4, 0xa7, 0x00, 0x37,
0xb7, 0x51, 0x34, 0x9f, 0xbd, 0x8d, 0x6e, 0x2f, 0xcf, 0x06, 0xe1, 0x0f, 0x04, 0xf7, 0x33, 0xb6,
0xd9, 0x97, 0x08, 0x1d, 0xed, 0x26, 0x96, 0x71, 0x8c, 0xbe, 0xbc, 0x28, 0x4a, 0xfe, 0x55, 0xa4,
0x7e, 0xc6, 0x36, 0x93, 0x82, 0x91, 0x05, 0x2d, 0x76, 0x4f, 0xb1, 0xe2, 0xdb, 0x2a, 0x6f, 0xda,
0x17, 0x19, 0xe8, 0x4f, 0x95, 0xfe, 0x45, 0xe8, 0x97, 0x61, 0x5e, 0xc5, 0xe1, 0x6f, 0xe3, 0xc9,
0x95, 0x16, 0x8f, 0xbb, 0xf9, 0x3e, 0xe7, 0x84, 0xbc, 0xa7, 0xec, 0x3b, 0xfd, 0x24, 0x3b, 0x53,
0x4b, 0x49, 0xbd, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x1b, 0x59, 0xf8, 0xe5, 0x02, 0x00,
0x00,
}

@ -0,0 +1,96 @@
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
package google.protobuf;
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
option cc_enable_arenas = true;
option go_package = "github.com/golang/protobuf/ptypes/struct;structpb";
option java_package = "com.google.protobuf";
option java_outer_classname = "StructProto";
option java_multiple_files = true;
option objc_class_prefix = "GPB";
// `Struct` represents a structured data value, consisting of fields
// which map to dynamically typed values. In some languages, `Struct`
// might be supported by a native representation. For example, in
// scripting languages like JS a struct is represented as an
// object. The details of that representation are described together
// with the proto support for the language.
//
// The JSON representation for `Struct` is JSON object.
message Struct {
// Unordered map of dynamically typed values.
map<string, Value> fields = 1;
}
// `Value` represents a dynamically typed value which can be either
// null, a number, a string, a boolean, a recursive struct value, or a
// list of values. A producer of value is expected to set one of that
// variants, absence of any variant indicates an error.
//
// The JSON representation for `Value` is JSON value.
message Value {
// The kind of value.
oneof kind {
// Represents a null value.
NullValue null_value = 1;
// Represents a double value.
double number_value = 2;
// Represents a string value.
string string_value = 3;
// Represents a boolean value.
bool bool_value = 4;
// Represents a structured value.
Struct struct_value = 5;
// Represents a repeated `Value`.
ListValue list_value = 6;
}
}
// `NullValue` is a singleton enumeration to represent the null value for the
// `Value` type union.
//
// The JSON representation for `NullValue` is JSON `null`.
enum NullValue {
// Null value.
NULL_VALUE = 0;
}
// `ListValue` is a wrapper around a repeated field of values.
//
// The JSON representation for `ListValue` is JSON array.
message ListValue {
// Repeated field of dynamically typed values.
repeated Value values = 1;
}

@ -0,0 +1,9 @@
language: go
go:
- 1.4.3
- 1.5.3
- tip
script:
- go test -v ./...

@ -0,0 +1,10 @@
# How to contribute
We definitely welcome patches and contribution to this project!
### Legal requirements
In order to protect both you and ourselves, you will need to sign the
[Contributor License Agreement](https://cla.developers.google.com/clas).
You may have already signed it for other Google projects.

@ -0,0 +1,9 @@
Paul Borman <borman@google.com>
bmatsuo
shawnps
theory
jboverfelt
dsymonds
cd1
wallclockbuilder
dansouza

@ -0,0 +1,27 @@
Copyright (c) 2009,2014 Google Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

@ -0,0 +1,19 @@
# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master)
The uuid package generates and inspects UUIDs based on
[RFC 4122](http://tools.ietf.org/html/rfc4122)
and DCE 1.1: Authentication and Security Services.
This package is based on the github.com/pborman/uuid package (previously named
code.google.com/p/go-uuid). It differs from these earlier packages in that
a UUID is a 16 byte array rather than a byte slice. One loss due to this
change is the ability to represent an invalid UUID (vs a NIL UUID).
###### Install
`go get github.com/google/uuid`
###### Documentation
[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid)
Full `go doc` style documentation for the package can be viewed online without
installing this package by using the GoDoc site here:
http://godoc.org/github.com/google/uuid

@ -0,0 +1,80 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"encoding/binary"
"fmt"
"os"
)
// A Domain represents a Version 2 domain
type Domain byte
// Domain constants for DCE Security (Version 2) UUIDs.
const (
Person = Domain(0)
Group = Domain(1)
Org = Domain(2)
)
// NewDCESecurity returns a DCE Security (Version 2) UUID.
//
// The domain should be one of Person, Group or Org.
// On a POSIX system the id should be the users UID for the Person
// domain and the users GID for the Group. The meaning of id for
// the domain Org or on non-POSIX systems is site defined.
//
// For a given domain/id pair the same token may be returned for up to
// 7 minutes and 10 seconds.
func NewDCESecurity(domain Domain, id uint32) (UUID, error) {
uuid, err := NewUUID()
if err == nil {
uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2
uuid[9] = byte(domain)
binary.BigEndian.PutUint32(uuid[0:], id)
}
return uuid, err
}
// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
// domain with the id returned by os.Getuid.
//
// NewDCESecurity(Person, uint32(os.Getuid()))
func NewDCEPerson() (UUID, error) {
return NewDCESecurity(Person, uint32(os.Getuid()))
}
// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
// domain with the id returned by os.Getgid.
//
// NewDCESecurity(Group, uint32(os.Getgid()))
func NewDCEGroup() (UUID, error) {
return NewDCESecurity(Group, uint32(os.Getgid()))
}
// Domain returns the domain for a Version 2 UUID. Domains are only defined
// for Version 2 UUIDs.
func (uuid UUID) Domain() Domain {
return Domain(uuid[9])
}
// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2
// UUIDs.
func (uuid UUID) ID() uint32 {
return binary.BigEndian.Uint32(uuid[0:4])
}
func (d Domain) String() string {
switch d {
case Person:
return "Person"
case Group:
return "Group"
case Org:
return "Org"
}
return fmt.Sprintf("Domain%d", int(d))
}

@ -0,0 +1,12 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package uuid generates and inspects UUIDs.
//
// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security
// Services.
//
// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to
// maps or compared directly.
package uuid

@ -0,0 +1 @@
module github.com/google/uuid

@ -0,0 +1,53 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"crypto/md5"
"crypto/sha1"
"hash"
)
// Well known namespace IDs and UUIDs
var (
NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8"))
NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8"))
NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
Nil UUID // empty UUID, all zeros
)
// NewHash returns a new UUID derived from the hash of space concatenated with
// data generated by h. The hash should be at least 16 byte in length. The
// first 16 bytes of the hash are used to form the UUID. The version of the
// UUID will be the lower 4 bits of version. NewHash is used to implement
// NewMD5 and NewSHA1.
func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
h.Reset()
h.Write(space[:])
h.Write(data)
s := h.Sum(nil)
var uuid UUID
copy(uuid[:], s)
uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4)
uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
return uuid
}
// NewMD5 returns a new MD5 (Version 3) UUID based on the
// supplied name space and data. It is the same as calling:
//
// NewHash(md5.New(), space, data, 3)
func NewMD5(space UUID, data []byte) UUID {
return NewHash(md5.New(), space, data, 3)
}
// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
// supplied name space and data. It is the same as calling:
//
// NewHash(sha1.New(), space, data, 5)
func NewSHA1(space UUID, data []byte) UUID {
return NewHash(sha1.New(), space, data, 5)
}

@ -0,0 +1,37 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import "fmt"
// MarshalText implements encoding.TextMarshaler.
func (uuid UUID) MarshalText() ([]byte, error) {
var js [36]byte
encodeHex(js[:], uuid)
return js[:], nil
}
// UnmarshalText implements encoding.TextUnmarshaler.
func (uuid *UUID) UnmarshalText(data []byte) error {
id, err := ParseBytes(data)
if err == nil {
*uuid = id
}
return err
}
// MarshalBinary implements encoding.BinaryMarshaler.
func (uuid UUID) MarshalBinary() ([]byte, error) {
return uuid[:], nil
}
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
func (uuid *UUID) UnmarshalBinary(data []byte) error {
if len(data) != 16 {
return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
}
copy(uuid[:], data)
return nil
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save