mirror of https://github.com/grafana/loki
vendoring: update cortex to latest master (#938)
* update cortex to latest master using v3.4.0-rc.1 for go.etcd.io/etcd v0.0.2 for github.com/prometheus/procfs v1.0.0 for github.com/prometheus/client_golang latest master for github.com/weaveworks/common with changes from https://github.com/weaveworks/common/pull/153 * fixed failing tests * use large instance for test and lint jobs in CircleCI * running only 6 test binaries in parallel * removed resource type change for CircleCI * changed GOGC to 10 for lint makefile targetpull/946/head
parent
9484632d3d
commit
b687ec6e5b
@ -0,0 +1,202 @@ |
||||
|
||||
Apache License |
||||
Version 2.0, January 2004 |
||||
http://www.apache.org/licenses/ |
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION |
||||
|
||||
1. Definitions. |
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction, |
||||
and distribution as defined by Sections 1 through 9 of this document. |
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by |
||||
the copyright owner that is granting the License. |
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all |
||||
other entities that control, are controlled by, or are under common |
||||
control with that entity. For the purposes of this definition, |
||||
"control" means (i) the power, direct or indirect, to cause the |
||||
direction or management of such entity, whether by contract or |
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the |
||||
outstanding shares, or (iii) beneficial ownership of such entity. |
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity |
||||
exercising permissions granted by this License. |
||||
|
||||
"Source" form shall mean the preferred form for making modifications, |
||||
including but not limited to software source code, documentation |
||||
source, and configuration files. |
||||
|
||||
"Object" form shall mean any form resulting from mechanical |
||||
transformation or translation of a Source form, including but |
||||
not limited to compiled object code, generated documentation, |
||||
and conversions to other media types. |
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or |
||||
Object form, made available under the License, as indicated by a |
||||
copyright notice that is included in or attached to the work |
||||
(an example is provided in the Appendix below). |
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object |
||||
form, that is based on (or derived from) the Work and for which the |
||||
editorial revisions, annotations, elaborations, or other modifications |
||||
represent, as a whole, an original work of authorship. For the purposes |
||||
of this License, Derivative Works shall not include works that remain |
||||
separable from, or merely link (or bind by name) to the interfaces of, |
||||
the Work and Derivative Works thereof. |
||||
|
||||
"Contribution" shall mean any work of authorship, including |
||||
the original version of the Work and any modifications or additions |
||||
to that Work or Derivative Works thereof, that is intentionally |
||||
submitted to Licensor for inclusion in the Work by the copyright owner |
||||
or by an individual or Legal Entity authorized to submit on behalf of |
||||
the copyright owner. For the purposes of this definition, "submitted" |
||||
means any form of electronic, verbal, or written communication sent |
||||
to the Licensor or its representatives, including but not limited to |
||||
communication on electronic mailing lists, source code control systems, |
||||
and issue tracking systems that are managed by, or on behalf of, the |
||||
Licensor for the purpose of discussing and improving the Work, but |
||||
excluding communication that is conspicuously marked or otherwise |
||||
designated in writing by the copyright owner as "Not a Contribution." |
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity |
||||
on behalf of whom a Contribution has been received by Licensor and |
||||
subsequently incorporated within the Work. |
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of |
||||
this License, each Contributor hereby grants to You a perpetual, |
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
||||
copyright license to reproduce, prepare Derivative Works of, |
||||
publicly display, publicly perform, sublicense, and distribute the |
||||
Work and such Derivative Works in Source or Object form. |
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of |
||||
this License, each Contributor hereby grants to You a perpetual, |
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
||||
(except as stated in this section) patent license to make, have made, |
||||
use, offer to sell, sell, import, and otherwise transfer the Work, |
||||
where such license applies only to those patent claims licensable |
||||
by such Contributor that are necessarily infringed by their |
||||
Contribution(s) alone or by combination of their Contribution(s) |
||||
with the Work to which such Contribution(s) was submitted. If You |
||||
institute patent litigation against any entity (including a |
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work |
||||
or a Contribution incorporated within the Work constitutes direct |
||||
or contributory patent infringement, then any patent licenses |
||||
granted to You under this License for that Work shall terminate |
||||
as of the date such litigation is filed. |
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the |
||||
Work or Derivative Works thereof in any medium, with or without |
||||
modifications, and in Source or Object form, provided that You |
||||
meet the following conditions: |
||||
|
||||
(a) You must give any other recipients of the Work or |
||||
Derivative Works a copy of this License; and |
||||
|
||||
(b) You must cause any modified files to carry prominent notices |
||||
stating that You changed the files; and |
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works |
||||
that You distribute, all copyright, patent, trademark, and |
||||
attribution notices from the Source form of the Work, |
||||
excluding those notices that do not pertain to any part of |
||||
the Derivative Works; and |
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its |
||||
distribution, then any Derivative Works that You distribute must |
||||
include a readable copy of the attribution notices contained |
||||
within such NOTICE file, excluding those notices that do not |
||||
pertain to any part of the Derivative Works, in at least one |
||||
of the following places: within a NOTICE text file distributed |
||||
as part of the Derivative Works; within the Source form or |
||||
documentation, if provided along with the Derivative Works; or, |
||||
within a display generated by the Derivative Works, if and |
||||
wherever such third-party notices normally appear. The contents |
||||
of the NOTICE file are for informational purposes only and |
||||
do not modify the License. You may add Your own attribution |
||||
notices within Derivative Works that You distribute, alongside |
||||
or as an addendum to the NOTICE text from the Work, provided |
||||
that such additional attribution notices cannot be construed |
||||
as modifying the License. |
||||
|
||||
You may add Your own copyright statement to Your modifications and |
||||
may provide additional or different license terms and conditions |
||||
for use, reproduction, or distribution of Your modifications, or |
||||
for any such Derivative Works as a whole, provided Your use, |
||||
reproduction, and distribution of the Work otherwise complies with |
||||
the conditions stated in this License. |
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise, |
||||
any Contribution intentionally submitted for inclusion in the Work |
||||
by You to the Licensor shall be under the terms and conditions of |
||||
this License, without any additional terms or conditions. |
||||
Notwithstanding the above, nothing herein shall supersede or modify |
||||
the terms of any separate license agreement you may have executed |
||||
with Licensor regarding such Contributions. |
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade |
||||
names, trademarks, service marks, or product names of the Licensor, |
||||
except as required for reasonable and customary use in describing the |
||||
origin of the Work and reproducing the content of the NOTICE file. |
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or |
||||
agreed to in writing, Licensor provides the Work (and each |
||||
Contributor provides its Contributions) on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or |
||||
implied, including, without limitation, any warranties or conditions |
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A |
||||
PARTICULAR PURPOSE. You are solely responsible for determining the |
||||
appropriateness of using or redistributing the Work and assume any |
||||
risks associated with Your exercise of permissions under this License. |
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory, |
||||
whether in tort (including negligence), contract, or otherwise, |
||||
unless required by applicable law (such as deliberate and grossly |
||||
negligent acts) or agreed to in writing, shall any Contributor be |
||||
liable to You for damages, including any direct, indirect, special, |
||||
incidental, or consequential damages of any character arising as a |
||||
result of this License or out of the use or inability to use the |
||||
Work (including but not limited to damages for loss of goodwill, |
||||
work stoppage, computer failure or malfunction, or any and all |
||||
other commercial damages or losses), even if such Contributor |
||||
has been advised of the possibility of such damages. |
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing |
||||
the Work or Derivative Works thereof, You may choose to offer, |
||||
and charge a fee for, acceptance of support, warranty, indemnity, |
||||
or other liability obligations and/or rights consistent with this |
||||
License. However, in accepting such obligations, You may act only |
||||
on Your own behalf and on Your sole responsibility, not on behalf |
||||
of any other Contributor, and only if You agree to indemnify, |
||||
defend, and hold each Contributor harmless for any liability |
||||
incurred by, or claims asserted against, such Contributor by reason |
||||
of your accepting any such warranty or additional liability. |
||||
|
||||
END OF TERMS AND CONDITIONS |
||||
|
||||
APPENDIX: How to apply the Apache License to your work. |
||||
|
||||
To apply the Apache License to your work, attach the following |
||||
boilerplate notice, with the fields enclosed by brackets "[]" |
||||
replaced with your own identifying information. (Don't include |
||||
the brackets!) The text should be enclosed in the appropriate |
||||
comment syntax for the file format. We also recommend that a |
||||
file or class name and description of purpose be included on the |
||||
same "printed page" as the copyright notice for easier |
||||
identification within third-party archives. |
||||
|
||||
Copyright [yyyy] [name of copyright owner] |
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); |
||||
you may not use this file except in compliance with the License. |
||||
You may obtain a copy of the License at |
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0 |
||||
|
||||
Unless required by applicable law or agreed to in writing, software |
||||
distributed under the License is distributed on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
See the License for the specific language governing permissions and |
||||
limitations under the License. |
||||
@ -0,0 +1,5 @@ |
||||
CoreOS Project |
||||
Copyright 2018 CoreOS, Inc |
||||
|
||||
This product includes software developed at CoreOS, Inc. |
||||
(http://www.coreos.com/). |
||||
@ -0,0 +1,296 @@ |
||||
// Copyright 2013-2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Semantic Versions http://semver.org
|
||||
package semver |
||||
|
||||
import ( |
||||
"bytes" |
||||
"errors" |
||||
"fmt" |
||||
"regexp" |
||||
"strconv" |
||||
"strings" |
||||
) |
||||
|
||||
type Version struct { |
||||
Major int64 |
||||
Minor int64 |
||||
Patch int64 |
||||
PreRelease PreRelease |
||||
Metadata string |
||||
} |
||||
|
||||
type PreRelease string |
||||
|
||||
func splitOff(input *string, delim string) (val string) { |
||||
parts := strings.SplitN(*input, delim, 2) |
||||
|
||||
if len(parts) == 2 { |
||||
*input = parts[0] |
||||
val = parts[1] |
||||
} |
||||
|
||||
return val |
||||
} |
||||
|
||||
func New(version string) *Version { |
||||
return Must(NewVersion(version)) |
||||
} |
||||
|
||||
func NewVersion(version string) (*Version, error) { |
||||
v := Version{} |
||||
|
||||
if err := v.Set(version); err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
return &v, nil |
||||
} |
||||
|
||||
// Must is a helper for wrapping NewVersion and will panic if err is not nil.
|
||||
func Must(v *Version, err error) *Version { |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
return v |
||||
} |
||||
|
||||
// Set parses and updates v from the given version string. Implements flag.Value
|
||||
func (v *Version) Set(version string) error { |
||||
metadata := splitOff(&version, "+") |
||||
preRelease := PreRelease(splitOff(&version, "-")) |
||||
dotParts := strings.SplitN(version, ".", 3) |
||||
|
||||
if len(dotParts) != 3 { |
||||
return fmt.Errorf("%s is not in dotted-tri format", version) |
||||
} |
||||
|
||||
if err := validateIdentifier(string(preRelease)); err != nil { |
||||
return fmt.Errorf("failed to validate pre-release: %v", err) |
||||
} |
||||
|
||||
if err := validateIdentifier(metadata); err != nil { |
||||
return fmt.Errorf("failed to validate metadata: %v", err) |
||||
} |
||||
|
||||
parsed := make([]int64, 3, 3) |
||||
|
||||
for i, v := range dotParts[:3] { |
||||
val, err := strconv.ParseInt(v, 10, 64) |
||||
parsed[i] = val |
||||
if err != nil { |
||||
return err |
||||
} |
||||
} |
||||
|
||||
v.Metadata = metadata |
||||
v.PreRelease = preRelease |
||||
v.Major = parsed[0] |
||||
v.Minor = parsed[1] |
||||
v.Patch = parsed[2] |
||||
return nil |
||||
} |
||||
|
||||
func (v Version) String() string { |
||||
var buffer bytes.Buffer |
||||
|
||||
fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch) |
||||
|
||||
if v.PreRelease != "" { |
||||
fmt.Fprintf(&buffer, "-%s", v.PreRelease) |
||||
} |
||||
|
||||
if v.Metadata != "" { |
||||
fmt.Fprintf(&buffer, "+%s", v.Metadata) |
||||
} |
||||
|
||||
return buffer.String() |
||||
} |
||||
|
||||
func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error { |
||||
var data string |
||||
if err := unmarshal(&data); err != nil { |
||||
return err |
||||
} |
||||
return v.Set(data) |
||||
} |
||||
|
||||
func (v Version) MarshalJSON() ([]byte, error) { |
||||
return []byte(`"` + v.String() + `"`), nil |
||||
} |
||||
|
||||
func (v *Version) UnmarshalJSON(data []byte) error { |
||||
l := len(data) |
||||
if l == 0 || string(data) == `""` { |
||||
return nil |
||||
} |
||||
if l < 2 || data[0] != '"' || data[l-1] != '"' { |
||||
return errors.New("invalid semver string") |
||||
} |
||||
return v.Set(string(data[1 : l-1])) |
||||
} |
||||
|
||||
// Compare tests if v is less than, equal to, or greater than versionB,
|
||||
// returning -1, 0, or +1 respectively.
|
||||
func (v Version) Compare(versionB Version) int { |
||||
if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 { |
||||
return cmp |
||||
} |
||||
return preReleaseCompare(v, versionB) |
||||
} |
||||
|
||||
// Equal tests if v is equal to versionB.
|
||||
func (v Version) Equal(versionB Version) bool { |
||||
return v.Compare(versionB) == 0 |
||||
} |
||||
|
||||
// LessThan tests if v is less than versionB.
|
||||
func (v Version) LessThan(versionB Version) bool { |
||||
return v.Compare(versionB) < 0 |
||||
} |
||||
|
||||
// Slice converts the comparable parts of the semver into a slice of integers.
|
||||
func (v Version) Slice() []int64 { |
||||
return []int64{v.Major, v.Minor, v.Patch} |
||||
} |
||||
|
||||
func (p PreRelease) Slice() []string { |
||||
preRelease := string(p) |
||||
return strings.Split(preRelease, ".") |
||||
} |
||||
|
||||
func preReleaseCompare(versionA Version, versionB Version) int { |
||||
a := versionA.PreRelease |
||||
b := versionB.PreRelease |
||||
|
||||
/* Handle the case where if two versions are otherwise equal it is the |
||||
* one without a PreRelease that is greater */ |
||||
if len(a) == 0 && (len(b) > 0) { |
||||
return 1 |
||||
} else if len(b) == 0 && (len(a) > 0) { |
||||
return -1 |
||||
} |
||||
|
||||
// If there is a prerelease, check and compare each part.
|
||||
return recursivePreReleaseCompare(a.Slice(), b.Slice()) |
||||
} |
||||
|
||||
func recursiveCompare(versionA []int64, versionB []int64) int { |
||||
if len(versionA) == 0 { |
||||
return 0 |
||||
} |
||||
|
||||
a := versionA[0] |
||||
b := versionB[0] |
||||
|
||||
if a > b { |
||||
return 1 |
||||
} else if a < b { |
||||
return -1 |
||||
} |
||||
|
||||
return recursiveCompare(versionA[1:], versionB[1:]) |
||||
} |
||||
|
||||
func recursivePreReleaseCompare(versionA []string, versionB []string) int { |
||||
// A larger set of pre-release fields has a higher precedence than a smaller set,
|
||||
// if all of the preceding identifiers are equal.
|
||||
if len(versionA) == 0 { |
||||
if len(versionB) > 0 { |
||||
return -1 |
||||
} |
||||
return 0 |
||||
} else if len(versionB) == 0 { |
||||
// We're longer than versionB so return 1.
|
||||
return 1 |
||||
} |
||||
|
||||
a := versionA[0] |
||||
b := versionB[0] |
||||
|
||||
aInt := false |
||||
bInt := false |
||||
|
||||
aI, err := strconv.Atoi(versionA[0]) |
||||
if err == nil { |
||||
aInt = true |
||||
} |
||||
|
||||
bI, err := strconv.Atoi(versionB[0]) |
||||
if err == nil { |
||||
bInt = true |
||||
} |
||||
|
||||
// Numeric identifiers always have lower precedence than non-numeric identifiers.
|
||||
if aInt && !bInt { |
||||
return -1 |
||||
} else if !aInt && bInt { |
||||
return 1 |
||||
} |
||||
|
||||
// Handle Integer Comparison
|
||||
if aInt && bInt { |
||||
if aI > bI { |
||||
return 1 |
||||
} else if aI < bI { |
||||
return -1 |
||||
} |
||||
} |
||||
|
||||
// Handle String Comparison
|
||||
if a > b { |
||||
return 1 |
||||
} else if a < b { |
||||
return -1 |
||||
} |
||||
|
||||
return recursivePreReleaseCompare(versionA[1:], versionB[1:]) |
||||
} |
||||
|
||||
// BumpMajor increments the Major field by 1 and resets all other fields to their default values
|
||||
func (v *Version) BumpMajor() { |
||||
v.Major += 1 |
||||
v.Minor = 0 |
||||
v.Patch = 0 |
||||
v.PreRelease = PreRelease("") |
||||
v.Metadata = "" |
||||
} |
||||
|
||||
// BumpMinor increments the Minor field by 1 and resets all other fields to their default values
|
||||
func (v *Version) BumpMinor() { |
||||
v.Minor += 1 |
||||
v.Patch = 0 |
||||
v.PreRelease = PreRelease("") |
||||
v.Metadata = "" |
||||
} |
||||
|
||||
// BumpPatch increments the Patch field by 1 and resets all other fields to their default values
|
||||
func (v *Version) BumpPatch() { |
||||
v.Patch += 1 |
||||
v.PreRelease = PreRelease("") |
||||
v.Metadata = "" |
||||
} |
||||
|
||||
// validateIdentifier makes sure the provided identifier satisfies semver spec
|
||||
func validateIdentifier(id string) error { |
||||
if id != "" && !reIdentifier.MatchString(id) { |
||||
return fmt.Errorf("%s is not a valid semver identifier", id) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// reIdentifier is a regular expression used to check that pre-release and metadata
|
||||
// identifiers satisfy the spec requirements
|
||||
var reIdentifier = regexp.MustCompile(`^[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*$`) |
||||
@ -0,0 +1,38 @@ |
||||
// Copyright 2013-2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package semver |
||||
|
||||
import ( |
||||
"sort" |
||||
) |
||||
|
||||
type Versions []*Version |
||||
|
||||
func (s Versions) Len() int { |
||||
return len(s) |
||||
} |
||||
|
||||
func (s Versions) Swap(i, j int) { |
||||
s[i], s[j] = s[j], s[i] |
||||
} |
||||
|
||||
func (s Versions) Less(i, j int) bool { |
||||
return s[i].LessThan(*s[j]) |
||||
} |
||||
|
||||
// Sort sorts the given slice of Version
|
||||
func Sort(versions []*Version) { |
||||
sort.Sort(Versions(versions)) |
||||
} |
||||
@ -0,0 +1,225 @@ |
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package journal provides write bindings to the local systemd journal.
|
||||
// It is implemented in pure Go and connects to the journal directly over its
|
||||
// unix socket.
|
||||
//
|
||||
// To read from the journal, see the "sdjournal" package, which wraps the
|
||||
// sd-journal a C API.
|
||||
//
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html
|
||||
package journal |
||||
|
||||
import ( |
||||
"bytes" |
||||
"encoding/binary" |
||||
"errors" |
||||
"fmt" |
||||
"io" |
||||
"io/ioutil" |
||||
"net" |
||||
"os" |
||||
"strconv" |
||||
"strings" |
||||
"sync" |
||||
"sync/atomic" |
||||
"syscall" |
||||
"unsafe" |
||||
) |
||||
|
||||
// Priority of a journal message
|
||||
type Priority int |
||||
|
||||
const ( |
||||
PriEmerg Priority = iota |
||||
PriAlert |
||||
PriCrit |
||||
PriErr |
||||
PriWarning |
||||
PriNotice |
||||
PriInfo |
||||
PriDebug |
||||
) |
||||
|
||||
var ( |
||||
// This can be overridden at build-time:
|
||||
// https://github.com/golang/go/wiki/GcToolchainTricks#including-build-information-in-the-executable
|
||||
journalSocket = "/run/systemd/journal/socket" |
||||
|
||||
// unixConnPtr atomically holds the local unconnected Unix-domain socket.
|
||||
// Concrete safe pointer type: *net.UnixConn
|
||||
unixConnPtr unsafe.Pointer |
||||
// onceConn ensures that unixConnPtr is initialized exactly once.
|
||||
onceConn sync.Once |
||||
) |
||||
|
||||
func init() { |
||||
onceConn.Do(initConn) |
||||
} |
||||
|
||||
// Enabled checks whether the local systemd journal is available for logging.
|
||||
func Enabled() bool { |
||||
onceConn.Do(initConn) |
||||
|
||||
if (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) == nil { |
||||
return false |
||||
} |
||||
|
||||
if _, err := net.Dial("unixgram", journalSocket); err != nil { |
||||
return false |
||||
} |
||||
|
||||
return true |
||||
} |
||||
|
||||
// Send a message to the local systemd journal. vars is a map of journald
|
||||
// fields to values. Fields must be composed of uppercase letters, numbers,
|
||||
// and underscores, but must not start with an underscore. Within these
|
||||
// restrictions, any arbitrary field name may be used. Some names have special
|
||||
// significance: see the journalctl documentation
|
||||
// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html)
|
||||
// for more details. vars may be nil.
|
||||
func Send(message string, priority Priority, vars map[string]string) error { |
||||
conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) |
||||
if conn == nil { |
||||
return errors.New("could not initialize socket to journald") |
||||
} |
||||
|
||||
socketAddr := &net.UnixAddr{ |
||||
Name: journalSocket, |
||||
Net: "unixgram", |
||||
} |
||||
|
||||
data := new(bytes.Buffer) |
||||
appendVariable(data, "PRIORITY", strconv.Itoa(int(priority))) |
||||
appendVariable(data, "MESSAGE", message) |
||||
for k, v := range vars { |
||||
appendVariable(data, k, v) |
||||
} |
||||
|
||||
_, _, err := conn.WriteMsgUnix(data.Bytes(), nil, socketAddr) |
||||
if err == nil { |
||||
return nil |
||||
} |
||||
if !isSocketSpaceError(err) { |
||||
return err |
||||
} |
||||
|
||||
// Large log entry, send it via tempfile and ancillary-fd.
|
||||
file, err := tempFd() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer file.Close() |
||||
_, err = io.Copy(file, data) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
rights := syscall.UnixRights(int(file.Fd())) |
||||
_, _, err = conn.WriteMsgUnix([]byte{}, rights, socketAddr) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// Print prints a message to the local systemd journal using Send().
|
||||
func Print(priority Priority, format string, a ...interface{}) error { |
||||
return Send(fmt.Sprintf(format, a...), priority, nil) |
||||
} |
||||
|
||||
func appendVariable(w io.Writer, name, value string) { |
||||
if err := validVarName(name); err != nil { |
||||
fmt.Fprintf(os.Stderr, "variable name %s contains invalid character, ignoring\n", name) |
||||
} |
||||
if strings.ContainsRune(value, '\n') { |
||||
/* When the value contains a newline, we write: |
||||
* - the variable name, followed by a newline |
||||
* - the size (in 64bit little endian format) |
||||
* - the data, followed by a newline |
||||
*/ |
||||
fmt.Fprintln(w, name) |
||||
binary.Write(w, binary.LittleEndian, uint64(len(value))) |
||||
fmt.Fprintln(w, value) |
||||
} else { |
||||
/* just write the variable and value all on one line */ |
||||
fmt.Fprintf(w, "%s=%s\n", name, value) |
||||
} |
||||
} |
||||
|
||||
// validVarName validates a variable name to make sure journald will accept it.
|
||||
// The variable name must be in uppercase and consist only of characters,
|
||||
// numbers and underscores, and may not begin with an underscore:
|
||||
// https://www.freedesktop.org/software/systemd/man/sd_journal_print.html
|
||||
func validVarName(name string) error { |
||||
if name == "" { |
||||
return errors.New("Empty variable name") |
||||
} else if name[0] == '_' { |
||||
return errors.New("Variable name begins with an underscore") |
||||
} |
||||
|
||||
for _, c := range name { |
||||
if !(('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_') { |
||||
return errors.New("Variable name contains invalid characters") |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// isSocketSpaceError checks whether the error is signaling
|
||||
// an "overlarge message" condition.
|
||||
func isSocketSpaceError(err error) bool { |
||||
opErr, ok := err.(*net.OpError) |
||||
if !ok || opErr == nil { |
||||
return false |
||||
} |
||||
|
||||
sysErr, ok := opErr.Err.(*os.SyscallError) |
||||
if !ok || sysErr == nil { |
||||
return false |
||||
} |
||||
|
||||
return sysErr.Err == syscall.EMSGSIZE || sysErr.Err == syscall.ENOBUFS |
||||
} |
||||
|
||||
// tempFd creates a temporary, unlinked file under `/dev/shm`.
|
||||
func tempFd() (*os.File, error) { |
||||
file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX") |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
err = syscall.Unlink(file.Name()) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return file, nil |
||||
} |
||||
|
||||
// initConn initializes the global `unixConnPtr` socket.
|
||||
// It is meant to be called exactly once, at program startup.
|
||||
func initConn() { |
||||
autobind, err := net.ResolveUnixAddr("unixgram", "") |
||||
if err != nil { |
||||
return |
||||
} |
||||
|
||||
sock, err := net.ListenUnixgram("unixgram", autobind) |
||||
if err != nil { |
||||
return |
||||
} |
||||
|
||||
atomic.StorePointer(&unixConnPtr, unsafe.Pointer(sock)) |
||||
} |
||||
@ -0,0 +1,39 @@ |
||||
# capnslog, the CoreOS logging package |
||||
|
||||
There are far too many logging packages out there, with varying degrees of licenses, far too many features (colorization, all sorts of log frameworks) or are just a pain to use (lack of `Fatalln()`?). |
||||
capnslog provides a simple but consistent logging interface suitable for all kinds of projects. |
||||
|
||||
### Design Principles |
||||
|
||||
##### `package main` is the place where logging gets turned on and routed |
||||
|
||||
A library should not touch log options, only generate log entries. Libraries are silent until main lets them speak. |
||||
|
||||
##### All log options are runtime-configurable. |
||||
|
||||
Still the job of `main` to expose these configurations. `main` may delegate this to, say, a configuration webhook, but does so explicitly. |
||||
|
||||
##### There is one log object per package. It is registered under its repository and package name. |
||||
|
||||
`main` activates logging for its repository and any dependency repositories it would also like to have output in its logstream. `main` also dictates at which level each subpackage logs. |
||||
|
||||
##### There is *one* output stream, and it is an `io.Writer` composed with a formatter. |
||||
|
||||
Splitting streams is probably not the job of your program, but rather, your log aggregation framework. If you must split output streams, again, `main` configures this and you can write a very simple two-output struct that satisfies io.Writer. |
||||
|
||||
Fancy colorful formatting and JSON output are beyond the scope of a basic logging framework -- they're application/log-collector dependent. These are, at best, provided as options, but more likely, provided by your application. |
||||
|
||||
##### Log objects are an interface |
||||
|
||||
An object knows best how to print itself. Log objects can collect more interesting metadata if they wish, however, because text isn't going away anytime soon, they must all be marshalable to text. The simplest log object is a string, which returns itself. If you wish to do more fancy tricks for printing your log objects, see also JSON output -- introspect and write a formatter which can handle your advanced log interface. Making strings is the only thing guaranteed. |
||||
|
||||
##### Log levels have specific meanings: |
||||
|
||||
* Critical: Unrecoverable. Must fail. |
||||
* Error: Data has been lost, a request has failed for a bad reason, or a required resource has been lost |
||||
* Warning: (Hopefully) Temporary conditions that may cause errors, but may work fine. A replica disappearing (that may reconnect) is a warning. |
||||
* Notice: Normal, but important (uncommon) log information. |
||||
* Info: Normal, working log information, everything is fine, but helpful notices for auditing or common operations. |
||||
* Debug: Everything is still fine, but even common operations may be logged, and less helpful but more quantity of notices. |
||||
* Trace: Anything goes, from logging every function call as part of a common operation, to tracing execution of a query. |
||||
|
||||
@ -0,0 +1,157 @@ |
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package capnslog |
||||
|
||||
import ( |
||||
"bufio" |
||||
"fmt" |
||||
"io" |
||||
"log" |
||||
"runtime" |
||||
"strings" |
||||
"time" |
||||
) |
||||
|
||||
type Formatter interface { |
||||
Format(pkg string, level LogLevel, depth int, entries ...interface{}) |
||||
Flush() |
||||
} |
||||
|
||||
func NewStringFormatter(w io.Writer) Formatter { |
||||
return &StringFormatter{ |
||||
w: bufio.NewWriter(w), |
||||
} |
||||
} |
||||
|
||||
type StringFormatter struct { |
||||
w *bufio.Writer |
||||
} |
||||
|
||||
func (s *StringFormatter) Format(pkg string, l LogLevel, i int, entries ...interface{}) { |
||||
now := time.Now().UTC() |
||||
s.w.WriteString(now.Format(time.RFC3339)) |
||||
s.w.WriteByte(' ') |
||||
writeEntries(s.w, pkg, l, i, entries...) |
||||
s.Flush() |
||||
} |
||||
|
||||
func writeEntries(w *bufio.Writer, pkg string, _ LogLevel, _ int, entries ...interface{}) { |
||||
if pkg != "" { |
||||
w.WriteString(pkg + ": ") |
||||
} |
||||
str := fmt.Sprint(entries...) |
||||
endsInNL := strings.HasSuffix(str, "\n") |
||||
w.WriteString(str) |
||||
if !endsInNL { |
||||
w.WriteString("\n") |
||||
} |
||||
} |
||||
|
||||
func (s *StringFormatter) Flush() { |
||||
s.w.Flush() |
||||
} |
||||
|
||||
func NewPrettyFormatter(w io.Writer, debug bool) Formatter { |
||||
return &PrettyFormatter{ |
||||
w: bufio.NewWriter(w), |
||||
debug: debug, |
||||
} |
||||
} |
||||
|
||||
type PrettyFormatter struct { |
||||
w *bufio.Writer |
||||
debug bool |
||||
} |
||||
|
||||
func (c *PrettyFormatter) Format(pkg string, l LogLevel, depth int, entries ...interface{}) { |
||||
now := time.Now() |
||||
ts := now.Format("2006-01-02 15:04:05") |
||||
c.w.WriteString(ts) |
||||
ms := now.Nanosecond() / 1000 |
||||
c.w.WriteString(fmt.Sprintf(".%06d", ms)) |
||||
if c.debug { |
||||
_, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call.
|
||||
if !ok { |
||||
file = "???" |
||||
line = 1 |
||||
} else { |
||||
slash := strings.LastIndex(file, "/") |
||||
if slash >= 0 { |
||||
file = file[slash+1:] |
||||
} |
||||
} |
||||
if line < 0 { |
||||
line = 0 // not a real line number
|
||||
} |
||||
c.w.WriteString(fmt.Sprintf(" [%s:%d]", file, line)) |
||||
} |
||||
c.w.WriteString(fmt.Sprint(" ", l.Char(), " | ")) |
||||
writeEntries(c.w, pkg, l, depth, entries...) |
||||
c.Flush() |
||||
} |
||||
|
||||
func (c *PrettyFormatter) Flush() { |
||||
c.w.Flush() |
||||
} |
||||
|
||||
// LogFormatter emulates the form of the traditional built-in logger.
|
||||
type LogFormatter struct { |
||||
logger *log.Logger |
||||
prefix string |
||||
} |
||||
|
||||
// NewLogFormatter is a helper to produce a new LogFormatter struct. It uses the
|
||||
// golang log package to actually do the logging work so that logs look similar.
|
||||
func NewLogFormatter(w io.Writer, prefix string, flag int) Formatter { |
||||
return &LogFormatter{ |
||||
logger: log.New(w, "", flag), // don't use prefix here
|
||||
prefix: prefix, // save it instead
|
||||
} |
||||
} |
||||
|
||||
// Format builds a log message for the LogFormatter. The LogLevel is ignored.
|
||||
func (lf *LogFormatter) Format(pkg string, _ LogLevel, _ int, entries ...interface{}) { |
||||
str := fmt.Sprint(entries...) |
||||
prefix := lf.prefix |
||||
if pkg != "" { |
||||
prefix = fmt.Sprintf("%s%s: ", prefix, pkg) |
||||
} |
||||
lf.logger.Output(5, fmt.Sprintf("%s%v", prefix, str)) // call depth is 5
|
||||
} |
||||
|
||||
// Flush is included so that the interface is complete, but is a no-op.
|
||||
func (lf *LogFormatter) Flush() { |
||||
// noop
|
||||
} |
||||
|
||||
// NilFormatter is a no-op log formatter that does nothing.
|
||||
type NilFormatter struct { |
||||
} |
||||
|
||||
// NewNilFormatter is a helper to produce a new LogFormatter struct. It logs no
|
||||
// messages so that you can cause part of your logging to be silent.
|
||||
func NewNilFormatter() Formatter { |
||||
return &NilFormatter{} |
||||
} |
||||
|
||||
// Format does nothing.
|
||||
func (_ *NilFormatter) Format(_ string, _ LogLevel, _ int, _ ...interface{}) { |
||||
// noop
|
||||
} |
||||
|
||||
// Flush is included so that the interface is complete, but is a no-op.
|
||||
func (_ *NilFormatter) Flush() { |
||||
// noop
|
||||
} |
||||
@ -0,0 +1,96 @@ |
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package capnslog |
||||
|
||||
import ( |
||||
"bufio" |
||||
"bytes" |
||||
"io" |
||||
"os" |
||||
"runtime" |
||||
"strconv" |
||||
"strings" |
||||
"time" |
||||
) |
||||
|
||||
var pid = os.Getpid() |
||||
|
||||
type GlogFormatter struct { |
||||
StringFormatter |
||||
} |
||||
|
||||
func NewGlogFormatter(w io.Writer) *GlogFormatter { |
||||
g := &GlogFormatter{} |
||||
g.w = bufio.NewWriter(w) |
||||
return g |
||||
} |
||||
|
||||
func (g GlogFormatter) Format(pkg string, level LogLevel, depth int, entries ...interface{}) { |
||||
g.w.Write(GlogHeader(level, depth+1)) |
||||
g.StringFormatter.Format(pkg, level, depth+1, entries...) |
||||
} |
||||
|
||||
func GlogHeader(level LogLevel, depth int) []byte { |
||||
// Lmmdd hh:mm:ss.uuuuuu threadid file:line]
|
||||
now := time.Now().UTC() |
||||
_, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call.
|
||||
if !ok { |
||||
file = "???" |
||||
line = 1 |
||||
} else { |
||||
slash := strings.LastIndex(file, "/") |
||||
if slash >= 0 { |
||||
file = file[slash+1:] |
||||
} |
||||
} |
||||
if line < 0 { |
||||
line = 0 // not a real line number
|
||||
} |
||||
buf := &bytes.Buffer{} |
||||
buf.Grow(30) |
||||
_, month, day := now.Date() |
||||
hour, minute, second := now.Clock() |
||||
buf.WriteString(level.Char()) |
||||
twoDigits(buf, int(month)) |
||||
twoDigits(buf, day) |
||||
buf.WriteByte(' ') |
||||
twoDigits(buf, hour) |
||||
buf.WriteByte(':') |
||||
twoDigits(buf, minute) |
||||
buf.WriteByte(':') |
||||
twoDigits(buf, second) |
||||
buf.WriteByte('.') |
||||
buf.WriteString(strconv.Itoa(now.Nanosecond() / 1000)) |
||||
buf.WriteByte('Z') |
||||
buf.WriteByte(' ') |
||||
buf.WriteString(strconv.Itoa(pid)) |
||||
buf.WriteByte(' ') |
||||
buf.WriteString(file) |
||||
buf.WriteByte(':') |
||||
buf.WriteString(strconv.Itoa(line)) |
||||
buf.WriteByte(']') |
||||
buf.WriteByte(' ') |
||||
return buf.Bytes() |
||||
} |
||||
|
||||
const digits = "0123456789" |
||||
|
||||
func twoDigits(b *bytes.Buffer, d int) { |
||||
c2 := digits[d%10] |
||||
d /= 10 |
||||
c1 := digits[d%10] |
||||
b.WriteByte(c1) |
||||
b.WriteByte(c2) |
||||
} |
||||
@ -0,0 +1,49 @@ |
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// +build !windows
|
||||
|
||||
package capnslog |
||||
|
||||
import ( |
||||
"io" |
||||
"os" |
||||
"syscall" |
||||
) |
||||
|
||||
// Here's where the opinionation comes in. We need some sensible defaults,
|
||||
// especially after taking over the log package. Your project (whatever it may
|
||||
// be) may see things differently. That's okay; there should be no defaults in
|
||||
// the main package that cannot be controlled or overridden programatically,
|
||||
// otherwise it's a bug. Doing so is creating your own init_log.go file much
|
||||
// like this one.
|
||||
|
||||
func init() { |
||||
initHijack() |
||||
|
||||
// Go `log` pacakge uses os.Stderr.
|
||||
SetFormatter(NewDefaultFormatter(os.Stderr)) |
||||
SetGlobalLogLevel(INFO) |
||||
} |
||||
|
||||
func NewDefaultFormatter(out io.Writer) Formatter { |
||||
if syscall.Getppid() == 1 { |
||||
// We're running under init, which may be systemd.
|
||||
f, err := NewJournaldFormatter() |
||||
if err == nil { |
||||
return f |
||||
} |
||||
} |
||||
return NewPrettyFormatter(out, false) |
||||
} |
||||
@ -0,0 +1,25 @@ |
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package capnslog |
||||
|
||||
import "os" |
||||
|
||||
func init() { |
||||
initHijack() |
||||
|
||||
// Go `log` package uses os.Stderr.
|
||||
SetFormatter(NewPrettyFormatter(os.Stderr, false)) |
||||
SetGlobalLogLevel(INFO) |
||||
} |
||||
@ -0,0 +1,68 @@ |
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// +build !windows
|
||||
|
||||
package capnslog |
||||
|
||||
import ( |
||||
"errors" |
||||
"fmt" |
||||
"os" |
||||
"path/filepath" |
||||
|
||||
"github.com/coreos/go-systemd/journal" |
||||
) |
||||
|
||||
func NewJournaldFormatter() (Formatter, error) { |
||||
if !journal.Enabled() { |
||||
return nil, errors.New("No systemd detected") |
||||
} |
||||
return &journaldFormatter{}, nil |
||||
} |
||||
|
||||
type journaldFormatter struct{} |
||||
|
||||
func (j *journaldFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) { |
||||
var pri journal.Priority |
||||
switch l { |
||||
case CRITICAL: |
||||
pri = journal.PriCrit |
||||
case ERROR: |
||||
pri = journal.PriErr |
||||
case WARNING: |
||||
pri = journal.PriWarning |
||||
case NOTICE: |
||||
pri = journal.PriNotice |
||||
case INFO: |
||||
pri = journal.PriInfo |
||||
case DEBUG: |
||||
pri = journal.PriDebug |
||||
case TRACE: |
||||
pri = journal.PriDebug |
||||
default: |
||||
panic("Unhandled loglevel") |
||||
} |
||||
msg := fmt.Sprint(entries...) |
||||
tags := map[string]string{ |
||||
"PACKAGE": pkg, |
||||
"SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]), |
||||
} |
||||
err := journal.Send(msg, pri, tags) |
||||
if err != nil { |
||||
fmt.Fprintln(os.Stderr, err) |
||||
} |
||||
} |
||||
|
||||
func (j *journaldFormatter) Flush() {} |
||||
@ -0,0 +1,39 @@ |
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package capnslog |
||||
|
||||
import ( |
||||
"log" |
||||
) |
||||
|
||||
func initHijack() { |
||||
pkg := NewPackageLogger("log", "") |
||||
w := packageWriter{pkg} |
||||
log.SetFlags(0) |
||||
log.SetPrefix("") |
||||
log.SetOutput(w) |
||||
} |
||||
|
||||
type packageWriter struct { |
||||
pl *PackageLogger |
||||
} |
||||
|
||||
func (p packageWriter) Write(b []byte) (int, error) { |
||||
if p.pl.level < INFO { |
||||
return 0, nil |
||||
} |
||||
p.pl.internalLog(calldepth+2, INFO, string(b)) |
||||
return len(b), nil |
||||
} |
||||
@ -0,0 +1,245 @@ |
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package capnslog |
||||
|
||||
import ( |
||||
"errors" |
||||
"strings" |
||||
"sync" |
||||
) |
||||
|
||||
// LogLevel is the set of all log levels.
|
||||
type LogLevel int8 |
||||
|
||||
const ( |
||||
// CRITICAL is the lowest log level; only errors which will end the program will be propagated.
|
||||
CRITICAL LogLevel = iota - 1 |
||||
// ERROR is for errors that are not fatal but lead to troubling behavior.
|
||||
ERROR |
||||
// WARNING is for errors which are not fatal and not errors, but are unusual. Often sourced from misconfigurations.
|
||||
WARNING |
||||
// NOTICE is for normal but significant conditions.
|
||||
NOTICE |
||||
// INFO is a log level for common, everyday log updates.
|
||||
INFO |
||||
// DEBUG is the default hidden level for more verbose updates about internal processes.
|
||||
DEBUG |
||||
// TRACE is for (potentially) call by call tracing of programs.
|
||||
TRACE |
||||
) |
||||
|
||||
// Char returns a single-character representation of the log level.
|
||||
func (l LogLevel) Char() string { |
||||
switch l { |
||||
case CRITICAL: |
||||
return "C" |
||||
case ERROR: |
||||
return "E" |
||||
case WARNING: |
||||
return "W" |
||||
case NOTICE: |
||||
return "N" |
||||
case INFO: |
||||
return "I" |
||||
case DEBUG: |
||||
return "D" |
||||
case TRACE: |
||||
return "T" |
||||
default: |
||||
panic("Unhandled loglevel") |
||||
} |
||||
} |
||||
|
||||
// String returns a multi-character representation of the log level.
|
||||
func (l LogLevel) String() string { |
||||
switch l { |
||||
case CRITICAL: |
||||
return "CRITICAL" |
||||
case ERROR: |
||||
return "ERROR" |
||||
case WARNING: |
||||
return "WARNING" |
||||
case NOTICE: |
||||
return "NOTICE" |
||||
case INFO: |
||||
return "INFO" |
||||
case DEBUG: |
||||
return "DEBUG" |
||||
case TRACE: |
||||
return "TRACE" |
||||
default: |
||||
panic("Unhandled loglevel") |
||||
} |
||||
} |
||||
|
||||
// Update using the given string value. Fulfills the flag.Value interface.
|
||||
func (l *LogLevel) Set(s string) error { |
||||
value, err := ParseLevel(s) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
*l = value |
||||
return nil |
||||
} |
||||
|
||||
// Returns an empty string, only here to fulfill the pflag.Value interface.
|
||||
func (l *LogLevel) Type() string { |
||||
return "" |
||||
} |
||||
|
||||
// ParseLevel translates some potential loglevel strings into their corresponding levels.
|
||||
func ParseLevel(s string) (LogLevel, error) { |
||||
switch s { |
||||
case "CRITICAL", "C": |
||||
return CRITICAL, nil |
||||
case "ERROR", "0", "E": |
||||
return ERROR, nil |
||||
case "WARNING", "1", "W": |
||||
return WARNING, nil |
||||
case "NOTICE", "2", "N": |
||||
return NOTICE, nil |
||||
case "INFO", "3", "I": |
||||
return INFO, nil |
||||
case "DEBUG", "4", "D": |
||||
return DEBUG, nil |
||||
case "TRACE", "5", "T": |
||||
return TRACE, nil |
||||
} |
||||
return CRITICAL, errors.New("couldn't parse log level " + s) |
||||
} |
||||
|
||||
type RepoLogger map[string]*PackageLogger |
||||
|
||||
type loggerStruct struct { |
||||
sync.Mutex |
||||
repoMap map[string]RepoLogger |
||||
formatter Formatter |
||||
} |
||||
|
||||
// logger is the global logger
|
||||
var logger = new(loggerStruct) |
||||
|
||||
// SetGlobalLogLevel sets the log level for all packages in all repositories
|
||||
// registered with capnslog.
|
||||
func SetGlobalLogLevel(l LogLevel) { |
||||
logger.Lock() |
||||
defer logger.Unlock() |
||||
for _, r := range logger.repoMap { |
||||
r.setRepoLogLevelInternal(l) |
||||
} |
||||
} |
||||
|
||||
// GetRepoLogger may return the handle to the repository's set of packages' loggers.
|
||||
func GetRepoLogger(repo string) (RepoLogger, error) { |
||||
logger.Lock() |
||||
defer logger.Unlock() |
||||
r, ok := logger.repoMap[repo] |
||||
if !ok { |
||||
return nil, errors.New("no packages registered for repo " + repo) |
||||
} |
||||
return r, nil |
||||
} |
||||
|
||||
// MustRepoLogger returns the handle to the repository's packages' loggers.
|
||||
func MustRepoLogger(repo string) RepoLogger { |
||||
r, err := GetRepoLogger(repo) |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
return r |
||||
} |
||||
|
||||
// SetRepoLogLevel sets the log level for all packages in the repository.
|
||||
func (r RepoLogger) SetRepoLogLevel(l LogLevel) { |
||||
logger.Lock() |
||||
defer logger.Unlock() |
||||
r.setRepoLogLevelInternal(l) |
||||
} |
||||
|
||||
func (r RepoLogger) setRepoLogLevelInternal(l LogLevel) { |
||||
for _, v := range r { |
||||
v.level = l |
||||
} |
||||
} |
||||
|
||||
// ParseLogLevelConfig parses a comma-separated string of "package=loglevel", in
|
||||
// order, and returns a map of the results, for use in SetLogLevel.
|
||||
func (r RepoLogger) ParseLogLevelConfig(conf string) (map[string]LogLevel, error) { |
||||
setlist := strings.Split(conf, ",") |
||||
out := make(map[string]LogLevel) |
||||
for _, setstring := range setlist { |
||||
setting := strings.Split(setstring, "=") |
||||
if len(setting) != 2 { |
||||
return nil, errors.New("oddly structured `pkg=level` option: " + setstring) |
||||
} |
||||
l, err := ParseLevel(setting[1]) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
out[setting[0]] = l |
||||
} |
||||
return out, nil |
||||
} |
||||
|
||||
// SetLogLevel takes a map of package names within a repository to their desired
|
||||
// loglevel, and sets the levels appropriately. Unknown packages are ignored.
|
||||
// "*" is a special package name that corresponds to all packages, and will be
|
||||
// processed first.
|
||||
func (r RepoLogger) SetLogLevel(m map[string]LogLevel) { |
||||
logger.Lock() |
||||
defer logger.Unlock() |
||||
if l, ok := m["*"]; ok { |
||||
r.setRepoLogLevelInternal(l) |
||||
} |
||||
for k, v := range m { |
||||
l, ok := r[k] |
||||
if !ok { |
||||
continue |
||||
} |
||||
l.level = v |
||||
} |
||||
} |
||||
|
||||
// SetFormatter sets the formatting function for all logs.
|
||||
func SetFormatter(f Formatter) { |
||||
logger.Lock() |
||||
defer logger.Unlock() |
||||
logger.formatter = f |
||||
} |
||||
|
||||
// NewPackageLogger creates a package logger object.
|
||||
// This should be defined as a global var in your package, referencing your repo.
|
||||
func NewPackageLogger(repo string, pkg string) (p *PackageLogger) { |
||||
logger.Lock() |
||||
defer logger.Unlock() |
||||
if logger.repoMap == nil { |
||||
logger.repoMap = make(map[string]RepoLogger) |
||||
} |
||||
r, rok := logger.repoMap[repo] |
||||
if !rok { |
||||
logger.repoMap[repo] = make(RepoLogger) |
||||
r = logger.repoMap[repo] |
||||
} |
||||
p, pok := r[pkg] |
||||
if !pok { |
||||
r[pkg] = &PackageLogger{ |
||||
pkg: pkg, |
||||
level: INFO, |
||||
} |
||||
p = r[pkg] |
||||
} |
||||
return |
||||
} |
||||
@ -0,0 +1,191 @@ |
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package capnslog |
||||
|
||||
import ( |
||||
"fmt" |
||||
"os" |
||||
) |
||||
|
||||
type PackageLogger struct { |
||||
pkg string |
||||
level LogLevel |
||||
} |
||||
|
||||
const calldepth = 2 |
||||
|
||||
func (p *PackageLogger) internalLog(depth int, inLevel LogLevel, entries ...interface{}) { |
||||
logger.Lock() |
||||
defer logger.Unlock() |
||||
if inLevel != CRITICAL && p.level < inLevel { |
||||
return |
||||
} |
||||
if logger.formatter != nil { |
||||
logger.formatter.Format(p.pkg, inLevel, depth+1, entries...) |
||||
} |
||||
} |
||||
|
||||
// SetLevel allows users to change the current logging level.
|
||||
func (p *PackageLogger) SetLevel(l LogLevel) { |
||||
logger.Lock() |
||||
defer logger.Unlock() |
||||
p.level = l |
||||
} |
||||
|
||||
// LevelAt checks if the given log level will be outputted under current setting.
|
||||
func (p *PackageLogger) LevelAt(l LogLevel) bool { |
||||
logger.Lock() |
||||
defer logger.Unlock() |
||||
return p.level >= l |
||||
} |
||||
|
||||
// Log a formatted string at any level between ERROR and TRACE
|
||||
func (p *PackageLogger) Logf(l LogLevel, format string, args ...interface{}) { |
||||
p.internalLog(calldepth, l, fmt.Sprintf(format, args...)) |
||||
} |
||||
|
||||
// Log a message at any level between ERROR and TRACE
|
||||
func (p *PackageLogger) Log(l LogLevel, args ...interface{}) { |
||||
p.internalLog(calldepth, l, fmt.Sprint(args...)) |
||||
} |
||||
|
||||
// log stdlib compatibility
|
||||
|
||||
func (p *PackageLogger) Println(args ...interface{}) { |
||||
p.internalLog(calldepth, INFO, fmt.Sprintln(args...)) |
||||
} |
||||
|
||||
func (p *PackageLogger) Printf(format string, args ...interface{}) { |
||||
p.Logf(INFO, format, args...) |
||||
} |
||||
|
||||
func (p *PackageLogger) Print(args ...interface{}) { |
||||
p.internalLog(calldepth, INFO, fmt.Sprint(args...)) |
||||
} |
||||
|
||||
// Panic and fatal
|
||||
|
||||
func (p *PackageLogger) Panicf(format string, args ...interface{}) { |
||||
s := fmt.Sprintf(format, args...) |
||||
p.internalLog(calldepth, CRITICAL, s) |
||||
panic(s) |
||||
} |
||||
|
||||
func (p *PackageLogger) Panic(args ...interface{}) { |
||||
s := fmt.Sprint(args...) |
||||
p.internalLog(calldepth, CRITICAL, s) |
||||
panic(s) |
||||
} |
||||
|
||||
func (p *PackageLogger) Panicln(args ...interface{}) { |
||||
s := fmt.Sprintln(args...) |
||||
p.internalLog(calldepth, CRITICAL, s) |
||||
panic(s) |
||||
} |
||||
|
||||
func (p *PackageLogger) Fatalf(format string, args ...interface{}) { |
||||
p.Logf(CRITICAL, format, args...) |
||||
os.Exit(1) |
||||
} |
||||
|
||||
func (p *PackageLogger) Fatal(args ...interface{}) { |
||||
s := fmt.Sprint(args...) |
||||
p.internalLog(calldepth, CRITICAL, s) |
||||
os.Exit(1) |
||||
} |
||||
|
||||
func (p *PackageLogger) Fatalln(args ...interface{}) { |
||||
s := fmt.Sprintln(args...) |
||||
p.internalLog(calldepth, CRITICAL, s) |
||||
os.Exit(1) |
||||
} |
||||
|
||||
// Error Functions
|
||||
|
||||
func (p *PackageLogger) Errorf(format string, args ...interface{}) { |
||||
p.Logf(ERROR, format, args...) |
||||
} |
||||
|
||||
func (p *PackageLogger) Error(entries ...interface{}) { |
||||
p.internalLog(calldepth, ERROR, entries...) |
||||
} |
||||
|
||||
// Warning Functions
|
||||
|
||||
func (p *PackageLogger) Warningf(format string, args ...interface{}) { |
||||
p.Logf(WARNING, format, args...) |
||||
} |
||||
|
||||
func (p *PackageLogger) Warning(entries ...interface{}) { |
||||
p.internalLog(calldepth, WARNING, entries...) |
||||
} |
||||
|
||||
// Notice Functions
|
||||
|
||||
func (p *PackageLogger) Noticef(format string, args ...interface{}) { |
||||
p.Logf(NOTICE, format, args...) |
||||
} |
||||
|
||||
func (p *PackageLogger) Notice(entries ...interface{}) { |
||||
p.internalLog(calldepth, NOTICE, entries...) |
||||
} |
||||
|
||||
// Info Functions
|
||||
|
||||
func (p *PackageLogger) Infof(format string, args ...interface{}) { |
||||
p.Logf(INFO, format, args...) |
||||
} |
||||
|
||||
func (p *PackageLogger) Info(entries ...interface{}) { |
||||
p.internalLog(calldepth, INFO, entries...) |
||||
} |
||||
|
||||
// Debug Functions
|
||||
|
||||
func (p *PackageLogger) Debugf(format string, args ...interface{}) { |
||||
if p.level < DEBUG { |
||||
return |
||||
} |
||||
p.Logf(DEBUG, format, args...) |
||||
} |
||||
|
||||
func (p *PackageLogger) Debug(entries ...interface{}) { |
||||
if p.level < DEBUG { |
||||
return |
||||
} |
||||
p.internalLog(calldepth, DEBUG, entries...) |
||||
} |
||||
|
||||
// Trace Functions
|
||||
|
||||
func (p *PackageLogger) Tracef(format string, args ...interface{}) { |
||||
if p.level < TRACE { |
||||
return |
||||
} |
||||
p.Logf(TRACE, format, args...) |
||||
} |
||||
|
||||
func (p *PackageLogger) Trace(entries ...interface{}) { |
||||
if p.level < TRACE { |
||||
return |
||||
} |
||||
p.internalLog(calldepth, TRACE, entries...) |
||||
} |
||||
|
||||
func (p *PackageLogger) Flush() { |
||||
logger.Lock() |
||||
defer logger.Unlock() |
||||
logger.formatter.Flush() |
||||
} |
||||
@ -0,0 +1,65 @@ |
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// +build !windows
|
||||
|
||||
package capnslog |
||||
|
||||
import ( |
||||
"fmt" |
||||
"log/syslog" |
||||
) |
||||
|
||||
func NewSyslogFormatter(w *syslog.Writer) Formatter { |
||||
return &syslogFormatter{w} |
||||
} |
||||
|
||||
func NewDefaultSyslogFormatter(tag string) (Formatter, error) { |
||||
w, err := syslog.New(syslog.LOG_DEBUG, tag) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return NewSyslogFormatter(w), nil |
||||
} |
||||
|
||||
type syslogFormatter struct { |
||||
w *syslog.Writer |
||||
} |
||||
|
||||
func (s *syslogFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) { |
||||
for _, entry := range entries { |
||||
str := fmt.Sprint(entry) |
||||
switch l { |
||||
case CRITICAL: |
||||
s.w.Crit(str) |
||||
case ERROR: |
||||
s.w.Err(str) |
||||
case WARNING: |
||||
s.w.Warning(str) |
||||
case NOTICE: |
||||
s.w.Notice(str) |
||||
case INFO: |
||||
s.w.Info(str) |
||||
case DEBUG: |
||||
s.w.Debug(str) |
||||
case TRACE: |
||||
s.w.Debug(str) |
||||
default: |
||||
panic("Unhandled loglevel") |
||||
} |
||||
} |
||||
} |
||||
|
||||
func (s *syslogFormatter) Flush() { |
||||
} |
||||
131
vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached_client_selector.go
generated
vendored
131
vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached_client_selector.go
generated
vendored
@ -0,0 +1,131 @@ |
||||
package cache |
||||
|
||||
import ( |
||||
"net" |
||||
"strings" |
||||
"sync" |
||||
|
||||
"github.com/bradfitz/gomemcache/memcache" |
||||
"github.com/cespare/xxhash" |
||||
"github.com/facette/natsort" |
||||
) |
||||
|
||||
// MemcachedJumpHashSelector implements the memcache.ServerSelector
|
||||
// interface. MemcachedJumpHashSelector utilizes a jump hash to
|
||||
// distribute keys to servers.
|
||||
//
|
||||
// While adding or removing servers only requires 1/N keys to move,
|
||||
// servers are treated as a stack and can only be pushed/popped.
|
||||
// Therefore, MemcachedJumpHashSelector works best for servers
|
||||
// with consistent DNS names where the naturally sorted order
|
||||
// is predictable.
|
||||
type MemcachedJumpHashSelector struct { |
||||
mu sync.RWMutex |
||||
addrs []net.Addr |
||||
} |
||||
|
||||
// staticAddr caches the Network() and String() values from
|
||||
// any net.Addr.
|
||||
//
|
||||
// Copied from github.com/bradfitz/gomemcache/selector.go.
|
||||
type staticAddr struct { |
||||
network, str string |
||||
} |
||||
|
||||
func newStaticAddr(a net.Addr) net.Addr { |
||||
return &staticAddr{ |
||||
network: a.Network(), |
||||
str: a.String(), |
||||
} |
||||
} |
||||
|
||||
func (a *staticAddr) Network() string { return a.network } |
||||
func (a *staticAddr) String() string { return a.str } |
||||
|
||||
// SetServers changes a MemcachedJumpHashSelector's set of servers at
|
||||
// runtime and is safe for concurrent use by multiple goroutines.
|
||||
//
|
||||
// Each server is given equal weight. A server is given more weight
|
||||
// if it's listed multiple times.
|
||||
//
|
||||
// SetServers returns an error if any of the server names fail to
|
||||
// resolve. No attempt is made to connect to the server. If any
|
||||
// error occurs, no changes are made to the internal server list.
|
||||
//
|
||||
// To minimize the number of rehashes for keys when scaling the
|
||||
// number of servers in subsequent calls to SetServers, servers
|
||||
// are stored in natural sort order.
|
||||
func (s *MemcachedJumpHashSelector) SetServers(servers ...string) error { |
||||
sortedServers := make([]string, len(servers)) |
||||
copy(sortedServers, servers) |
||||
natsort.Sort(sortedServers) |
||||
|
||||
naddrs := make([]net.Addr, len(sortedServers)) |
||||
for i, server := range sortedServers { |
||||
if strings.Contains(server, "/") { |
||||
addr, err := net.ResolveUnixAddr("unix", server) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
naddrs[i] = newStaticAddr(addr) |
||||
} else { |
||||
tcpAddr, err := net.ResolveTCPAddr("tcp", server) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
naddrs[i] = newStaticAddr(tcpAddr) |
||||
} |
||||
} |
||||
|
||||
s.mu.Lock() |
||||
defer s.mu.Unlock() |
||||
s.addrs = naddrs |
||||
return nil |
||||
} |
||||
|
||||
// jumpHash consistently chooses a hash bucket number in the range [0, numBuckets) for the given key.
|
||||
// numBuckets must be >= 1.
|
||||
//
|
||||
// Copied from github.com/dgryski/go-jump/blob/master/jump.go
|
||||
func jumpHash(key uint64, numBuckets int) int32 { |
||||
|
||||
var b int64 = -1 |
||||
var j int64 |
||||
|
||||
for j < int64(numBuckets) { |
||||
b = j |
||||
key = key*2862933555777941757 + 1 |
||||
j = int64(float64(b+1) * (float64(int64(1)<<31) / float64((key>>33)+1))) |
||||
} |
||||
|
||||
return int32(b) |
||||
} |
||||
|
||||
// PickServer returns the server address that a given item
|
||||
// should be shared onto.
|
||||
func (s *MemcachedJumpHashSelector) PickServer(key string) (net.Addr, error) { |
||||
s.mu.RLock() |
||||
defer s.mu.RUnlock() |
||||
if len(s.addrs) == 0 { |
||||
return nil, memcache.ErrNoServers |
||||
} else if len(s.addrs) == 1 { |
||||
return s.addrs[0], nil |
||||
} |
||||
cs := xxhash.Sum64String(key) |
||||
idx := jumpHash(cs, len(s.addrs)) |
||||
return s.addrs[idx], nil |
||||
} |
||||
|
||||
// Each iterates over each server and calls the given function.
|
||||
// If f returns a non-nil error, iteration will stop and that
|
||||
// error will be returned.
|
||||
func (s *MemcachedJumpHashSelector) Each(f func(net.Addr) error) error { |
||||
s.mu.RLock() |
||||
defer s.mu.RUnlock() |
||||
for _, def := range s.addrs { |
||||
if err := f(def); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
@ -0,0 +1,112 @@ |
||||
package kv |
||||
|
||||
import ( |
||||
"context" |
||||
"flag" |
||||
"fmt" |
||||
"sync" |
||||
|
||||
"github.com/cortexproject/cortex/pkg/ring/kv/codec" |
||||
"github.com/cortexproject/cortex/pkg/ring/kv/consul" |
||||
"github.com/cortexproject/cortex/pkg/ring/kv/etcd" |
||||
) |
||||
|
||||
// The NewInMemoryKVClient returned by NewClient() is a singleton, so
|
||||
// that distributors and ingesters started in the same process can
|
||||
// find themselves.
|
||||
var inmemoryStoreInit sync.Once |
||||
var inmemoryStore Client |
||||
|
||||
// Config is config for a KVStore currently used by ring and HA tracker,
|
||||
// where store can be consul or inmemory.
|
||||
type Config struct { |
||||
Store string `yaml:"store,omitempty"` |
||||
Consul consul.Config `yaml:"consul,omitempty"` |
||||
Etcd etcd.Config `yaml:"etcd,omitempty"` |
||||
Prefix string `yaml:"prefix,omitempty"` |
||||
|
||||
Mock Client |
||||
} |
||||
|
||||
// RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet.
|
||||
// If prefix is an empty string we will register consul flags with no prefix and the
|
||||
// store flag with the prefix ring, so ring.store. For everything else we pass the prefix
|
||||
// to the Consul flags.
|
||||
// If prefix is not an empty string it should end with a period.
|
||||
func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { |
||||
// We need Consul flags to not have the ring prefix to maintain compatibility.
|
||||
// This needs to be fixed in the future (1.0 release maybe?) when we normalize flags.
|
||||
// At the moment we have consul.<flag-name>, and ring.store, going forward it would
|
||||
// be easier to have everything under ring, so ring.consul.<flag-name>
|
||||
cfg.Consul.RegisterFlags(f, prefix) |
||||
cfg.Etcd.RegisterFlagsWithPrefix(f, prefix) |
||||
if prefix == "" { |
||||
prefix = "ring." |
||||
} |
||||
f.StringVar(&cfg.Prefix, prefix+"prefix", "collectors/", "The prefix for the keys in the store. Should end with a /.") |
||||
f.StringVar(&cfg.Store, prefix+"store", "consul", "Backend storage to use for the ring (consul, etcd, inmemory).") |
||||
} |
||||
|
||||
// Client is a high-level client for key-value stores (such as Etcd and
|
||||
// Consul) that exposes operations such as CAS and Watch which take callbacks.
|
||||
// It also deals with serialisation by using a Codec and having a instance of
|
||||
// the the desired type passed in to methods ala json.Unmarshal.
|
||||
type Client interface { |
||||
// Get a spefic key. Will use a codec to deserialise key to appropriate type.
|
||||
Get(ctx context.Context, key string) (interface{}, error) |
||||
|
||||
// CAS stands for Compare-And-Swap. Will call provided callback f with the
|
||||
// current value of the key and allow callback to return a different value.
|
||||
// Will then attempt to atomically swap the current value for the new value.
|
||||
// If that doesn't succeed will try again - callback will be called again
|
||||
// with new value etc. Guarantees that only a single concurrent CAS
|
||||
// succeeds. Callback can return nil to indicate it is happy with existing
|
||||
// value.
|
||||
CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error |
||||
|
||||
// WatchKey calls f whenever the value stored under key changes.
|
||||
WatchKey(ctx context.Context, key string, f func(interface{}) bool) |
||||
|
||||
// WatchPrefix calls f whenever any value stored under prefix changes.
|
||||
WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) |
||||
} |
||||
|
||||
// NewClient creates a new Client (consul, etcd or inmemory) based on the config,
|
||||
// encodes and decodes data for storage using the codec.
|
||||
func NewClient(cfg Config, codec codec.Codec) (Client, error) { |
||||
if cfg.Mock != nil { |
||||
return cfg.Mock, nil |
||||
} |
||||
|
||||
var client Client |
||||
var err error |
||||
|
||||
switch cfg.Store { |
||||
case "consul": |
||||
client, err = consul.NewClient(cfg.Consul, codec) |
||||
|
||||
case "etcd": |
||||
client, err = etcd.New(cfg.Etcd, codec) |
||||
|
||||
case "inmemory": |
||||
// If we use the in-memory store, make sure everyone gets the same instance
|
||||
// within the same process.
|
||||
inmemoryStoreInit.Do(func() { |
||||
inmemoryStore = consul.NewInMemoryClient(codec) |
||||
}) |
||||
client = inmemoryStore |
||||
|
||||
default: |
||||
return nil, fmt.Errorf("invalid KV store type: %s", cfg.Store) |
||||
} |
||||
|
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
if cfg.Prefix != "" { |
||||
client = PrefixClient(client, cfg.Prefix) |
||||
} |
||||
|
||||
return metrics{client}, nil |
||||
} |
||||
@ -0,0 +1,52 @@ |
||||
package codec |
||||
|
||||
import ( |
||||
"github.com/golang/protobuf/proto" |
||||
"github.com/golang/snappy" |
||||
) |
||||
|
||||
// Codec allows KV clients to serialise and deserialise values.
|
||||
type Codec interface { |
||||
Decode([]byte) (interface{}, error) |
||||
Encode(interface{}) ([]byte, error) |
||||
} |
||||
|
||||
// Proto is a Codec for proto/snappy
|
||||
type Proto struct { |
||||
Factory func() proto.Message |
||||
} |
||||
|
||||
// Decode implements Codec
|
||||
func (p Proto) Decode(bytes []byte) (interface{}, error) { |
||||
out := p.Factory() |
||||
bytes, err := snappy.Decode(nil, bytes) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
if err := proto.Unmarshal(bytes, out); err != nil { |
||||
return nil, err |
||||
} |
||||
return out, nil |
||||
} |
||||
|
||||
// Encode implements Codec
|
||||
func (p Proto) Encode(msg interface{}) ([]byte, error) { |
||||
bytes, err := proto.Marshal(msg.(proto.Message)) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return snappy.Encode(nil, bytes), nil |
||||
} |
||||
|
||||
// String is a code for strings.
|
||||
type String struct{} |
||||
|
||||
// Decode implements Codec.
|
||||
func (String) Decode(bytes []byte) (interface{}, error) { |
||||
return string(bytes), nil |
||||
} |
||||
|
||||
// Encode implements Codec.
|
||||
func (String) Encode(msg interface{}) ([]byte, error) { |
||||
return []byte(msg.(string)), nil |
||||
} |
||||
@ -1,4 +1,4 @@ |
||||
package ring |
||||
package consul |
||||
|
||||
import ( |
||||
"context" |
||||
@ -0,0 +1,196 @@ |
||||
package etcd |
||||
|
||||
import ( |
||||
"context" |
||||
"flag" |
||||
"fmt" |
||||
"time" |
||||
|
||||
"github.com/go-kit/kit/log/level" |
||||
"go.etcd.io/etcd/clientv3" |
||||
|
||||
"github.com/cortexproject/cortex/pkg/ring/kv/codec" |
||||
"github.com/cortexproject/cortex/pkg/util" |
||||
"github.com/cortexproject/cortex/pkg/util/flagext" |
||||
) |
||||
|
||||
// Config for a new etcd.Client.
|
||||
type Config struct { |
||||
Endpoints []string `yaml:"endpoints"` |
||||
DialTimeout time.Duration `yaml:"dial_timeout"` |
||||
MaxRetries int `yaml:"max_retries"` |
||||
} |
||||
|
||||
// Client implements ring.KVClient for etcd.
|
||||
type Client struct { |
||||
cfg Config |
||||
codec codec.Codec |
||||
cli *clientv3.Client |
||||
} |
||||
|
||||
// RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet.
|
||||
func (cfg *Config) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) { |
||||
cfg.Endpoints = []string{} |
||||
f.Var((*flagext.Strings)(&cfg.Endpoints), prefix+"etcd.endpoints", "The etcd endpoints to connect to.") |
||||
f.DurationVar(&cfg.DialTimeout, prefix+"etcd.dial-timeout", 10*time.Second, "The dial timeout for the etcd connection.") |
||||
f.IntVar(&cfg.MaxRetries, prefix+"etcd.max-retries", 10, "The maximum number of retries to do for failed ops.") |
||||
} |
||||
|
||||
// New makes a new Client.
|
||||
func New(cfg Config, codec codec.Codec) (*Client, error) { |
||||
cli, err := clientv3.New(clientv3.Config{ |
||||
Endpoints: cfg.Endpoints, |
||||
DialTimeout: cfg.DialTimeout, |
||||
}) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
return &Client{ |
||||
cfg: cfg, |
||||
codec: codec, |
||||
cli: cli, |
||||
}, nil |
||||
} |
||||
|
||||
// CAS implements kv.Client.
|
||||
func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error { |
||||
var revision int64 |
||||
var lastErr error |
||||
|
||||
for i := 0; i < c.cfg.MaxRetries; i++ { |
||||
resp, err := c.cli.Get(ctx, key) |
||||
if err != nil { |
||||
level.Error(util.Logger).Log("msg", "error getting key", "key", key, "err", err) |
||||
lastErr = err |
||||
continue |
||||
} |
||||
|
||||
var intermediate interface{} |
||||
if len(resp.Kvs) > 0 { |
||||
intermediate, err = c.codec.Decode(resp.Kvs[0].Value) |
||||
if err != nil { |
||||
level.Error(util.Logger).Log("msg", "error decoding key", "key", key, "err", err) |
||||
lastErr = err |
||||
continue |
||||
} |
||||
revision = resp.Kvs[0].Version |
||||
} |
||||
|
||||
var retry bool |
||||
intermediate, retry, err = f(intermediate) |
||||
if err != nil { |
||||
if !retry { |
||||
return err |
||||
} |
||||
lastErr = err |
||||
continue |
||||
} |
||||
|
||||
// Callback returning nil means it doesn't want to CAS anymore.
|
||||
if intermediate == nil { |
||||
return nil |
||||
} |
||||
|
||||
buf, err := c.codec.Encode(intermediate) |
||||
if err != nil { |
||||
level.Error(util.Logger).Log("msg", "error serialising value", "key", key, "err", err) |
||||
lastErr = err |
||||
continue |
||||
} |
||||
|
||||
result, err := c.cli.Txn(ctx). |
||||
If(clientv3.Compare(clientv3.Version(key), "=", revision)). |
||||
Then(clientv3.OpPut(key, string(buf))). |
||||
Commit() |
||||
if err != nil { |
||||
level.Error(util.Logger).Log("msg", "error CASing", "key", key, "err", err) |
||||
lastErr = err |
||||
continue |
||||
} |
||||
// result is not Succeeded if the the comparison was false, meaning if the modify indexes did not match.
|
||||
if !result.Succeeded { |
||||
level.Debug(util.Logger).Log("msg", "failed to CAS, revision and version did not match in etcd", "key", key, "revision", revision) |
||||
continue |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
if lastErr != nil { |
||||
return lastErr |
||||
} |
||||
return fmt.Errorf("failed to CAS %s", key) |
||||
} |
||||
|
||||
// WatchKey implements kv.Client.
|
||||
func (c *Client) WatchKey(ctx context.Context, key string, f func(interface{}) bool) { |
||||
backoff := util.NewBackoff(ctx, util.BackoffConfig{ |
||||
MinBackoff: 1 * time.Second, |
||||
MaxBackoff: 1 * time.Minute, |
||||
}) |
||||
for backoff.Ongoing() { |
||||
watchChan := c.cli.Watch(ctx, key) |
||||
for { |
||||
resp, ok := <-watchChan |
||||
if !ok { |
||||
break |
||||
} |
||||
backoff.Reset() |
||||
|
||||
for _, event := range resp.Events { |
||||
out, err := c.codec.Decode(event.Kv.Value) |
||||
if err != nil { |
||||
level.Error(util.Logger).Log("msg", "error decoding key", "key", key, "err", err) |
||||
continue |
||||
} |
||||
|
||||
if !f(out) { |
||||
return |
||||
} |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
// WatchPrefix implements kv.Client.
|
||||
func (c *Client) WatchPrefix(ctx context.Context, key string, f func(string, interface{}) bool) { |
||||
backoff := util.NewBackoff(ctx, util.BackoffConfig{ |
||||
MinBackoff: 1 * time.Second, |
||||
MaxBackoff: 1 * time.Minute, |
||||
}) |
||||
for backoff.Ongoing() { |
||||
watchChan := c.cli.Watch(ctx, key, clientv3.WithPrefix()) |
||||
for { |
||||
resp, ok := <-watchChan |
||||
if !ok { |
||||
break |
||||
} |
||||
backoff.Reset() |
||||
|
||||
for _, event := range resp.Events { |
||||
out, err := c.codec.Decode(event.Kv.Value) |
||||
if err != nil { |
||||
level.Error(util.Logger).Log("msg", "error decoding key", "key", key, "err", err) |
||||
continue |
||||
} |
||||
|
||||
if !f(string(event.Kv.Key), out) { |
||||
return |
||||
} |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
// Get implements kv.Client.
|
||||
func (c *Client) Get(ctx context.Context, key string) (interface{}, error) { |
||||
resp, err := c.cli.Get(ctx, key) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
if len(resp.Kvs) != 1 { |
||||
return nil, fmt.Errorf("got %d kvs, expected 1", len(resp.Kvs)) |
||||
} |
||||
return c.codec.Decode(resp.Kvs[0].Value) |
||||
} |
||||
@ -0,0 +1,81 @@ |
||||
package etcd |
||||
|
||||
import ( |
||||
"flag" |
||||
"fmt" |
||||
"io" |
||||
"io/ioutil" |
||||
"net/url" |
||||
"time" |
||||
|
||||
"github.com/cortexproject/cortex/pkg/util/flagext" |
||||
|
||||
"go.etcd.io/etcd/embed" |
||||
"go.etcd.io/etcd/etcdserver/api/v3client" |
||||
|
||||
"github.com/cortexproject/cortex/pkg/ring/kv/codec" |
||||
) |
||||
|
||||
const etcdStartTimeout = 30 * time.Second |
||||
|
||||
// Mock returns a Mock Etcd client.
|
||||
// Inspired by https://github.com/ligato/cn-infra/blob/master/db/keyval/etcd/mocks/embeded_etcd.go.
|
||||
func Mock(codec codec.Codec) (*Client, io.Closer, error) { |
||||
dir, err := ioutil.TempDir("", "etcd") |
||||
if err != nil { |
||||
return nil, nil, err |
||||
} |
||||
|
||||
cfg := embed.NewConfig() |
||||
cfg.Dir = dir |
||||
lpurl, _ := url.Parse("http://localhost:0") |
||||
lcurl, _ := url.Parse("http://localhost:0") |
||||
cfg.LPUrls = []url.URL{*lpurl} |
||||
cfg.LCUrls = []url.URL{*lcurl} |
||||
|
||||
etcd, err := embed.StartEtcd(cfg) |
||||
if err != nil { |
||||
return nil, nil, err |
||||
} |
||||
|
||||
select { |
||||
case <-etcd.Server.ReadyNotify(): |
||||
case <-time.After(etcdStartTimeout): |
||||
etcd.Server.Stop() // trigger a shutdown
|
||||
return nil, nil, fmt.Errorf("server took too long to start") |
||||
} |
||||
|
||||
closer := CloserFunc(func() error { |
||||
etcd.Server.Stop() |
||||
return nil |
||||
}) |
||||
|
||||
var config Config |
||||
flagext.DefaultValues(&config) |
||||
|
||||
client := &Client{ |
||||
cfg: config, |
||||
codec: codec, |
||||
cli: v3client.New(etcd.Server), |
||||
} |
||||
|
||||
return client, closer, nil |
||||
} |
||||
|
||||
// CloserFunc is like http.HandlerFunc but for io.Closers.
|
||||
type CloserFunc func() error |
||||
|
||||
// Close implements io.Closer.
|
||||
func (f CloserFunc) Close() error { |
||||
return f() |
||||
} |
||||
|
||||
// NopCloser does nothing.
|
||||
var NopCloser = CloserFunc(func() error { |
||||
return nil |
||||
}) |
||||
|
||||
// RegisterFlags adds the flags required to config this to the given FlagSet.
|
||||
func (cfg *Config) RegisterFlags(f *flag.FlagSet) { |
||||
cfg.RegisterFlagsWithPrefix(f, "") |
||||
} |
||||
@ -0,0 +1,53 @@ |
||||
package kv |
||||
|
||||
import ( |
||||
"context" |
||||
|
||||
"github.com/prometheus/client_golang/prometheus" |
||||
"github.com/weaveworks/common/instrument" |
||||
) |
||||
|
||||
var requestDuration = instrument.NewHistogramCollector(prometheus.NewHistogramVec(prometheus.HistogramOpts{ |
||||
Namespace: "cortex", |
||||
Name: "kv_request_duration_seconds", |
||||
Help: "Time spent on consul requests.", |
||||
Buckets: prometheus.DefBuckets, |
||||
}, []string{"operation", "status_code"})) |
||||
|
||||
func init() { |
||||
requestDuration.Register() |
||||
} |
||||
|
||||
type metrics struct { |
||||
c Client |
||||
} |
||||
|
||||
func (m metrics) Get(ctx context.Context, key string) (interface{}, error) { |
||||
var result interface{} |
||||
err := instrument.CollectedRequest(ctx, "GET", requestDuration, instrument.ErrorCode, func(ctx context.Context) error { |
||||
var err error |
||||
result, err = m.c.Get(ctx, key) |
||||
return err |
||||
}) |
||||
return result, err |
||||
} |
||||
|
||||
func (m metrics) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error { |
||||
return instrument.CollectedRequest(ctx, "CAS", requestDuration, instrument.ErrorCode, func(ctx context.Context) error { |
||||
return m.c.CAS(ctx, key, f) |
||||
}) |
||||
} |
||||
|
||||
func (m metrics) WatchKey(ctx context.Context, key string, f func(interface{}) bool) { |
||||
instrument.CollectedRequest(ctx, "WatchKey", requestDuration, instrument.ErrorCode, func(ctx context.Context) error { |
||||
m.c.WatchKey(ctx, key, f) |
||||
return nil |
||||
}) |
||||
} |
||||
|
||||
func (m metrics) WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) { |
||||
instrument.CollectedRequest(ctx, "WatchPrefix", requestDuration, instrument.ErrorCode, func(ctx context.Context) error { |
||||
m.c.WatchPrefix(ctx, prefix, f) |
||||
return nil |
||||
}) |
||||
} |
||||
@ -0,0 +1,39 @@ |
||||
package kv |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"strings" |
||||
) |
||||
|
||||
type prefixedKVClient struct { |
||||
prefix string |
||||
client Client |
||||
} |
||||
|
||||
// PrefixClient takes a KVClient and forces a prefix on all its operations.
|
||||
func PrefixClient(client Client, prefix string) Client { |
||||
return &prefixedKVClient{prefix, client} |
||||
} |
||||
|
||||
// CAS atomically modifies a value in a callback. If the value doesn't exist,
|
||||
// you'll get 'nil' as an argument to your callback.
|
||||
func (c *prefixedKVClient) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error { |
||||
return c.client.CAS(ctx, c.prefix+key, f) |
||||
} |
||||
|
||||
// WatchKey watches a key.
|
||||
func (c *prefixedKVClient) WatchKey(ctx context.Context, key string, f func(interface{}) bool) { |
||||
c.client.WatchKey(ctx, c.prefix+key, f) |
||||
} |
||||
|
||||
// WatchPrefix watches a prefix. For a prefix client it appends the prefix argument to the clients prefix.
|
||||
func (c *prefixedKVClient) WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) { |
||||
c.client.WatchPrefix(ctx, fmt.Sprintf("%s%s", c.prefix, prefix), func(k string, i interface{}) bool { |
||||
return f(strings.TrimPrefix(k, c.prefix), i) |
||||
}) |
||||
} |
||||
|
||||
func (c *prefixedKVClient) Get(ctx context.Context, key string) (interface{}, error) { |
||||
return c.client.Get(ctx, c.prefix+key) |
||||
} |
||||
@ -1,109 +0,0 @@ |
||||
package ring |
||||
|
||||
import ( |
||||
"context" |
||||
"flag" |
||||
"fmt" |
||||
"sync" |
||||
|
||||
"github.com/golang/protobuf/proto" |
||||
"github.com/golang/snappy" |
||||
) |
||||
|
||||
var inmemoryStoreInit sync.Once |
||||
var inmemoryStore KVClient |
||||
|
||||
// KVClient is a high-level client for Consul, that exposes operations
|
||||
// such as CAS and Watch which take callbacks. It also deals with serialisation
|
||||
// by having an instance factory passed in to methods and deserialising into that.
|
||||
type KVClient interface { |
||||
CAS(ctx context.Context, key string, f CASCallback) error |
||||
WatchKey(ctx context.Context, key string, f func(interface{}) bool) |
||||
WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) |
||||
Get(ctx context.Context, key string) (interface{}, error) |
||||
PutBytes(ctx context.Context, key string, buf []byte) error |
||||
} |
||||
|
||||
// KVConfig is config for a KVStore currently used by ring and HA tracker,
|
||||
// where store can be consul or inmemory.
|
||||
type KVConfig struct { |
||||
Store string `yaml:"store,omitempty"` |
||||
Consul ConsulConfig `yaml:"consul,omitempty"` |
||||
|
||||
Mock KVClient |
||||
} |
||||
|
||||
// RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet.
|
||||
// If prefix is an empty string we will register consul flags with no prefix and the
|
||||
// store flag with the prefix ring, so ring.store. For everything else we pass the prefix
|
||||
// to the Consul flags.
|
||||
// If prefix is not an empty string it should end with a period.
|
||||
func (cfg *KVConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { |
||||
// We need Consul flags to not have the ring prefix to maintain compatibility.
|
||||
// This needs to be fixed in the future (1.0 release maybe?) when we normalize flags.
|
||||
// At the moment we have consul.<flag-name>, and ring.store, going forward it would
|
||||
// be easier to have everything under ring, so ring.consul.<flag-name>
|
||||
cfg.Consul.RegisterFlags(f, prefix) |
||||
if prefix == "" { |
||||
prefix = "ring." |
||||
} |
||||
f.StringVar(&cfg.Store, prefix+"store", "consul", "Backend storage to use for the ring (consul, inmemory).") |
||||
} |
||||
|
||||
// CASCallback is the type of the callback to CAS. If err is nil, out must be non-nil.
|
||||
type CASCallback func(in interface{}) (out interface{}, retry bool, err error) |
||||
|
||||
// NewKVStore creates a new KVstore client (inmemory or consul) based on the config,
|
||||
// encodes and decodes data for storage using the codec.
|
||||
func NewKVStore(cfg KVConfig, codec Codec) (KVClient, error) { |
||||
if cfg.Mock != nil { |
||||
return cfg.Mock, nil |
||||
} |
||||
|
||||
switch cfg.Store { |
||||
case "consul": |
||||
return NewConsulClient(cfg.Consul, codec) |
||||
case "inmemory": |
||||
// If we use the in-memory store, make sure everyone gets the same instance
|
||||
// within the same process.
|
||||
inmemoryStoreInit.Do(func() { |
||||
inmemoryStore = NewInMemoryKVClient(codec) |
||||
}) |
||||
return inmemoryStore, nil |
||||
default: |
||||
return nil, fmt.Errorf("invalid KV store type: %s", cfg.Store) |
||||
} |
||||
} |
||||
|
||||
// Codec allows the consul client to serialise and deserialise values.
|
||||
type Codec interface { |
||||
Decode([]byte) (interface{}, error) |
||||
Encode(interface{}) ([]byte, error) |
||||
} |
||||
|
||||
// ProtoCodec is a Codec for proto/snappy
|
||||
type ProtoCodec struct { |
||||
Factory func() proto.Message |
||||
} |
||||
|
||||
// Decode implements Codec
|
||||
func (p ProtoCodec) Decode(bytes []byte) (interface{}, error) { |
||||
out := p.Factory() |
||||
bytes, err := snappy.Decode(nil, bytes) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
if err := proto.Unmarshal(bytes, out); err != nil { |
||||
return nil, err |
||||
} |
||||
return out, nil |
||||
} |
||||
|
||||
// Encode implements Codec
|
||||
func (p ProtoCodec) Encode(msg interface{}) ([]byte, error) { |
||||
bytes, err := proto.Marshal(msg.(proto.Message)) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return snappy.Encode(nil, bytes), nil |
||||
} |
||||
@ -0,0 +1,19 @@ |
||||
package flagext |
||||
|
||||
import ( |
||||
"strings" |
||||
) |
||||
|
||||
// StringSlice is a slice of strings that implements flag.Value
|
||||
type StringSlice []string |
||||
|
||||
// String implements flag.Value
|
||||
func (v StringSlice) String() string { |
||||
return strings.Join(v, " ") |
||||
} |
||||
|
||||
// Set implements flag.Value
|
||||
func (v *StringSlice) Set(s string) error { |
||||
*v = append(*v, s) |
||||
return nil |
||||
} |
||||
@ -1,120 +0,0 @@ |
||||
package util |
||||
|
||||
import ( |
||||
"hash/fnv" |
||||
"reflect" |
||||
"sync" |
||||
"sync/atomic" |
||||
"time" |
||||
"unsafe" |
||||
|
||||
"github.com/prometheus/client_golang/prometheus" |
||||
) |
||||
|
||||
// HashBucketHistogramOpts are the options for making a HashBucketHistogram
|
||||
type HashBucketHistogramOpts struct { |
||||
prometheus.HistogramOpts |
||||
HashBuckets int |
||||
} |
||||
|
||||
// HashBucketHistogram is used to track a histogram of per-bucket rates.
|
||||
//
|
||||
// For instance, I want to know that 50% of rows are getting X QPS or lower
|
||||
// and 99% are getting Y QPS of lower. At first glance, this would involve
|
||||
// tracking write rate per row, and periodically sticking those numbers in
|
||||
// a histogram. To make this fit in memory: instead of per-row, we keep
|
||||
// N buckets of counters and hash the key to a bucket. Then every second
|
||||
// we update a histogram with the bucket values (and zero the buckets).
|
||||
//
|
||||
// Note, we want this metric to be relatively independent of the number of
|
||||
// hash buckets and QPS of the service - we're trying to measure how well
|
||||
// load balanced the write load is. So we normalise the values in the hash
|
||||
// buckets such that if all buckets are '1', then we have even load. We
|
||||
// do this by multiplying the number of ops per bucket by the number of
|
||||
// buckets, and dividing by the number of ops.
|
||||
type HashBucketHistogram interface { |
||||
prometheus.Metric |
||||
prometheus.Collector |
||||
|
||||
Observe(string, uint32) |
||||
Stop() |
||||
} |
||||
|
||||
type hashBucketHistogram struct { |
||||
prometheus.Histogram |
||||
mtx sync.RWMutex |
||||
buckets *hashBuckets |
||||
quit chan struct{} |
||||
opts HashBucketHistogramOpts |
||||
} |
||||
|
||||
type hashBuckets struct { |
||||
ops uint32 |
||||
buckets []uint32 |
||||
} |
||||
|
||||
// NewHashBucketHistogram makes a new HashBucketHistogram
|
||||
func NewHashBucketHistogram(opts HashBucketHistogramOpts) HashBucketHistogram { |
||||
result := &hashBucketHistogram{ |
||||
Histogram: prometheus.NewHistogram(opts.HistogramOpts), |
||||
quit: make(chan struct{}), |
||||
opts: opts, |
||||
} |
||||
result.swapBuckets() |
||||
go result.loop() |
||||
return result |
||||
} |
||||
|
||||
// Stop the background goroutine
|
||||
func (h *hashBucketHistogram) Stop() { |
||||
h.quit <- struct{}{} |
||||
} |
||||
|
||||
func (h *hashBucketHistogram) swapBuckets() *hashBuckets { |
||||
h.mtx.Lock() |
||||
buckets := h.buckets |
||||
h.buckets = &hashBuckets{ |
||||
buckets: make([]uint32, h.opts.HashBuckets, h.opts.HashBuckets), |
||||
} |
||||
h.mtx.Unlock() |
||||
return buckets |
||||
} |
||||
|
||||
func (h *hashBucketHistogram) loop() { |
||||
ticker := time.NewTicker(1 * time.Second) |
||||
defer ticker.Stop() |
||||
for { |
||||
select { |
||||
case <-ticker.C: |
||||
buckets := h.swapBuckets() |
||||
for _, v := range buckets.buckets { |
||||
if buckets.ops > 0 { |
||||
h.Histogram.Observe(float64(v) * float64(h.opts.HashBuckets) / float64(buckets.ops)) |
||||
} |
||||
} |
||||
case <-h.quit: |
||||
return |
||||
} |
||||
} |
||||
} |
||||
|
||||
// Observe implements HashBucketHistogram
|
||||
func (h *hashBucketHistogram) Observe(key string, value uint32) { |
||||
h.mtx.RLock() |
||||
hash := fnv.New32() |
||||
hash.Write(bytesView(key)) |
||||
i := hash.Sum32() % uint32(h.opts.HashBuckets) |
||||
atomic.AddUint32(&h.buckets.ops, 1) |
||||
atomic.AddUint32(&h.buckets.buckets[i], value) |
||||
h.mtx.RUnlock() |
||||
} |
||||
|
||||
func bytesView(v string) []byte { |
||||
strHeader := (*reflect.StringHeader)(unsafe.Pointer(&v)) |
||||
bytesHeader := reflect.SliceHeader{ |
||||
Data: strHeader.Data, |
||||
Len: strHeader.Len, |
||||
Cap: strHeader.Len, |
||||
} |
||||
return *(*[]byte)(unsafe.Pointer(&bytesHeader)) |
||||
} |
||||
@ -0,0 +1,21 @@ |
||||
sudo: false |
||||
language: go |
||||
go: |
||||
- 1.3.x |
||||
- 1.5.x |
||||
- 1.6.x |
||||
- 1.7.x |
||||
- 1.8.x |
||||
- 1.9.x |
||||
- master |
||||
matrix: |
||||
allow_failures: |
||||
- go: master |
||||
fast_finish: true |
||||
install: |
||||
- # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). |
||||
script: |
||||
- go get -t -v ./... |
||||
- diff -u <(echo -n) <(gofmt -d -s .) |
||||
- go tool vet . |
||||
- go test -v -race ./... |
||||
@ -0,0 +1,21 @@ |
||||
Copyright (c) 2005-2008 Dustin Sallings <dustin@spy.net> |
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy |
||||
of this software and associated documentation files (the "Software"), to deal |
||||
in the Software without restriction, including without limitation the rights |
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
||||
copies of the Software, and to permit persons to whom the Software is |
||||
furnished to do so, subject to the following conditions: |
||||
|
||||
The above copyright notice and this permission notice shall be included in |
||||
all copies or substantial portions of the Software. |
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
||||
SOFTWARE. |
||||
|
||||
<http://www.opensource.org/licenses/mit-license.php> |
||||
@ -0,0 +1,124 @@ |
||||
# Humane Units [](https://travis-ci.org/dustin/go-humanize) [](https://godoc.org/github.com/dustin/go-humanize) |
||||
|
||||
Just a few functions for helping humanize times and sizes. |
||||
|
||||
`go get` it as `github.com/dustin/go-humanize`, import it as |
||||
`"github.com/dustin/go-humanize"`, use it as `humanize`. |
||||
|
||||
See [godoc](https://godoc.org/github.com/dustin/go-humanize) for |
||||
complete documentation. |
||||
|
||||
## Sizes |
||||
|
||||
This lets you take numbers like `82854982` and convert them to useful |
||||
strings like, `83 MB` or `79 MiB` (whichever you prefer). |
||||
|
||||
Example: |
||||
|
||||
```go |
||||
fmt.Printf("That file is %s.", humanize.Bytes(82854982)) // That file is 83 MB. |
||||
``` |
||||
|
||||
## Times |
||||
|
||||
This lets you take a `time.Time` and spit it out in relative terms. |
||||
For example, `12 seconds ago` or `3 days from now`. |
||||
|
||||
Example: |
||||
|
||||
```go |
||||
fmt.Printf("This was touched %s.", humanize.Time(someTimeInstance)) // This was touched 7 hours ago. |
||||
``` |
||||
|
||||
Thanks to Kyle Lemons for the time implementation from an IRC |
||||
conversation one day. It's pretty neat. |
||||
|
||||
## Ordinals |
||||
|
||||
From a [mailing list discussion][odisc] where a user wanted to be able |
||||
to label ordinals. |
||||
|
||||
0 -> 0th |
||||
1 -> 1st |
||||
2 -> 2nd |
||||
3 -> 3rd |
||||
4 -> 4th |
||||
[...] |
||||
|
||||
Example: |
||||
|
||||
```go |
||||
fmt.Printf("You're my %s best friend.", humanize.Ordinal(193)) // You are my 193rd best friend. |
||||
``` |
||||
|
||||
## Commas |
||||
|
||||
Want to shove commas into numbers? Be my guest. |
||||
|
||||
0 -> 0 |
||||
100 -> 100 |
||||
1000 -> 1,000 |
||||
1000000000 -> 1,000,000,000 |
||||
-100000 -> -100,000 |
||||
|
||||
Example: |
||||
|
||||
```go |
||||
fmt.Printf("You owe $%s.\n", humanize.Comma(6582491)) // You owe $6,582,491. |
||||
``` |
||||
|
||||
## Ftoa |
||||
|
||||
Nicer float64 formatter that removes trailing zeros. |
||||
|
||||
```go |
||||
fmt.Printf("%f", 2.24) // 2.240000 |
||||
fmt.Printf("%s", humanize.Ftoa(2.24)) // 2.24 |
||||
fmt.Printf("%f", 2.0) // 2.000000 |
||||
fmt.Printf("%s", humanize.Ftoa(2.0)) // 2 |
||||
``` |
||||
|
||||
## SI notation |
||||
|
||||
Format numbers with [SI notation][sinotation]. |
||||
|
||||
Example: |
||||
|
||||
```go |
||||
humanize.SI(0.00000000223, "M") // 2.23 nM |
||||
``` |
||||
|
||||
## English-specific functions |
||||
|
||||
The following functions are in the `humanize/english` subpackage. |
||||
|
||||
### Plurals |
||||
|
||||
Simple English pluralization |
||||
|
||||
```go |
||||
english.PluralWord(1, "object", "") // object |
||||
english.PluralWord(42, "object", "") // objects |
||||
english.PluralWord(2, "bus", "") // buses |
||||
english.PluralWord(99, "locus", "loci") // loci |
||||
|
||||
english.Plural(1, "object", "") // 1 object |
||||
english.Plural(42, "object", "") // 42 objects |
||||
english.Plural(2, "bus", "") // 2 buses |
||||
english.Plural(99, "locus", "loci") // 99 loci |
||||
``` |
||||
|
||||
### Word series |
||||
|
||||
Format comma-separated words lists with conjuctions: |
||||
|
||||
```go |
||||
english.WordSeries([]string{"foo"}, "and") // foo |
||||
english.WordSeries([]string{"foo", "bar"}, "and") // foo and bar |
||||
english.WordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar and baz |
||||
|
||||
english.OxfordWordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar, and baz |
||||
``` |
||||
|
||||
[odisc]: https://groups.google.com/d/topic/golang-nuts/l8NhI74jl-4/discussion |
||||
[sinotation]: http://en.wikipedia.org/wiki/Metric_prefix |
||||
@ -0,0 +1,31 @@ |
||||
package humanize |
||||
|
||||
import ( |
||||
"math/big" |
||||
) |
||||
|
||||
// order of magnitude (to a max order)
|
||||
func oomm(n, b *big.Int, maxmag int) (float64, int) { |
||||
mag := 0 |
||||
m := &big.Int{} |
||||
for n.Cmp(b) >= 0 { |
||||
n.DivMod(n, b, m) |
||||
mag++ |
||||
if mag == maxmag && maxmag >= 0 { |
||||
break |
||||
} |
||||
} |
||||
return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag |
||||
} |
||||
|
||||
// total order of magnitude
|
||||
// (same as above, but with no upper limit)
|
||||
func oom(n, b *big.Int) (float64, int) { |
||||
mag := 0 |
||||
m := &big.Int{} |
||||
for n.Cmp(b) >= 0 { |
||||
n.DivMod(n, b, m) |
||||
mag++ |
||||
} |
||||
return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag |
||||
} |
||||
@ -0,0 +1,173 @@ |
||||
package humanize |
||||
|
||||
import ( |
||||
"fmt" |
||||
"math/big" |
||||
"strings" |
||||
"unicode" |
||||
) |
||||
|
||||
var ( |
||||
bigIECExp = big.NewInt(1024) |
||||
|
||||
// BigByte is one byte in bit.Ints
|
||||
BigByte = big.NewInt(1) |
||||
// BigKiByte is 1,024 bytes in bit.Ints
|
||||
BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp) |
||||
// BigMiByte is 1,024 k bytes in bit.Ints
|
||||
BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp) |
||||
// BigGiByte is 1,024 m bytes in bit.Ints
|
||||
BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp) |
||||
// BigTiByte is 1,024 g bytes in bit.Ints
|
||||
BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp) |
||||
// BigPiByte is 1,024 t bytes in bit.Ints
|
||||
BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp) |
||||
// BigEiByte is 1,024 p bytes in bit.Ints
|
||||
BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp) |
||||
// BigZiByte is 1,024 e bytes in bit.Ints
|
||||
BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp) |
||||
// BigYiByte is 1,024 z bytes in bit.Ints
|
||||
BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp) |
||||
) |
||||
|
||||
var ( |
||||
bigSIExp = big.NewInt(1000) |
||||
|
||||
// BigSIByte is one SI byte in big.Ints
|
||||
BigSIByte = big.NewInt(1) |
||||
// BigKByte is 1,000 SI bytes in big.Ints
|
||||
BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp) |
||||
// BigMByte is 1,000 SI k bytes in big.Ints
|
||||
BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp) |
||||
// BigGByte is 1,000 SI m bytes in big.Ints
|
||||
BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp) |
||||
// BigTByte is 1,000 SI g bytes in big.Ints
|
||||
BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp) |
||||
// BigPByte is 1,000 SI t bytes in big.Ints
|
||||
BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp) |
||||
// BigEByte is 1,000 SI p bytes in big.Ints
|
||||
BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp) |
||||
// BigZByte is 1,000 SI e bytes in big.Ints
|
||||
BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp) |
||||
// BigYByte is 1,000 SI z bytes in big.Ints
|
||||
BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp) |
||||
) |
||||
|
||||
var bigBytesSizeTable = map[string]*big.Int{ |
||||
"b": BigByte, |
||||
"kib": BigKiByte, |
||||
"kb": BigKByte, |
||||
"mib": BigMiByte, |
||||
"mb": BigMByte, |
||||
"gib": BigGiByte, |
||||
"gb": BigGByte, |
||||
"tib": BigTiByte, |
||||
"tb": BigTByte, |
||||
"pib": BigPiByte, |
||||
"pb": BigPByte, |
||||
"eib": BigEiByte, |
||||
"eb": BigEByte, |
||||
"zib": BigZiByte, |
||||
"zb": BigZByte, |
||||
"yib": BigYiByte, |
||||
"yb": BigYByte, |
||||
// Without suffix
|
||||
"": BigByte, |
||||
"ki": BigKiByte, |
||||
"k": BigKByte, |
||||
"mi": BigMiByte, |
||||
"m": BigMByte, |
||||
"gi": BigGiByte, |
||||
"g": BigGByte, |
||||
"ti": BigTiByte, |
||||
"t": BigTByte, |
||||
"pi": BigPiByte, |
||||
"p": BigPByte, |
||||
"ei": BigEiByte, |
||||
"e": BigEByte, |
||||
"z": BigZByte, |
||||
"zi": BigZiByte, |
||||
"y": BigYByte, |
||||
"yi": BigYiByte, |
||||
} |
||||
|
||||
var ten = big.NewInt(10) |
||||
|
||||
func humanateBigBytes(s, base *big.Int, sizes []string) string { |
||||
if s.Cmp(ten) < 0 { |
||||
return fmt.Sprintf("%d B", s) |
||||
} |
||||
c := (&big.Int{}).Set(s) |
||||
val, mag := oomm(c, base, len(sizes)-1) |
||||
suffix := sizes[mag] |
||||
f := "%.0f %s" |
||||
if val < 10 { |
||||
f = "%.1f %s" |
||||
} |
||||
|
||||
return fmt.Sprintf(f, val, suffix) |
||||
|
||||
} |
||||
|
||||
// BigBytes produces a human readable representation of an SI size.
|
||||
//
|
||||
// See also: ParseBigBytes.
|
||||
//
|
||||
// BigBytes(82854982) -> 83 MB
|
||||
func BigBytes(s *big.Int) string { |
||||
sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} |
||||
return humanateBigBytes(s, bigSIExp, sizes) |
||||
} |
||||
|
||||
// BigIBytes produces a human readable representation of an IEC size.
|
||||
//
|
||||
// See also: ParseBigBytes.
|
||||
//
|
||||
// BigIBytes(82854982) -> 79 MiB
|
||||
func BigIBytes(s *big.Int) string { |
||||
sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} |
||||
return humanateBigBytes(s, bigIECExp, sizes) |
||||
} |
||||
|
||||
// ParseBigBytes parses a string representation of bytes into the number
|
||||
// of bytes it represents.
|
||||
//
|
||||
// See also: BigBytes, BigIBytes.
|
||||
//
|
||||
// ParseBigBytes("42 MB") -> 42000000, nil
|
||||
// ParseBigBytes("42 mib") -> 44040192, nil
|
||||
func ParseBigBytes(s string) (*big.Int, error) { |
||||
lastDigit := 0 |
||||
hasComma := false |
||||
for _, r := range s { |
||||
if !(unicode.IsDigit(r) || r == '.' || r == ',') { |
||||
break |
||||
} |
||||
if r == ',' { |
||||
hasComma = true |
||||
} |
||||
lastDigit++ |
||||
} |
||||
|
||||
num := s[:lastDigit] |
||||
if hasComma { |
||||
num = strings.Replace(num, ",", "", -1) |
||||
} |
||||
|
||||
val := &big.Rat{} |
||||
_, err := fmt.Sscanf(num, "%f", val) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
extra := strings.ToLower(strings.TrimSpace(s[lastDigit:])) |
||||
if m, ok := bigBytesSizeTable[extra]; ok { |
||||
mv := (&big.Rat{}).SetInt(m) |
||||
val.Mul(val, mv) |
||||
rv := &big.Int{} |
||||
rv.Div(val.Num(), val.Denom()) |
||||
return rv, nil |
||||
} |
||||
|
||||
return nil, fmt.Errorf("unhandled size name: %v", extra) |
||||
} |
||||
@ -0,0 +1,143 @@ |
||||
package humanize |
||||
|
||||
import ( |
||||
"fmt" |
||||
"math" |
||||
"strconv" |
||||
"strings" |
||||
"unicode" |
||||
) |
||||
|
||||
// IEC Sizes.
|
||||
// kibis of bits
|
||||
const ( |
||||
Byte = 1 << (iota * 10) |
||||
KiByte |
||||
MiByte |
||||
GiByte |
||||
TiByte |
||||
PiByte |
||||
EiByte |
||||
) |
||||
|
||||
// SI Sizes.
|
||||
const ( |
||||
IByte = 1 |
||||
KByte = IByte * 1000 |
||||
MByte = KByte * 1000 |
||||
GByte = MByte * 1000 |
||||
TByte = GByte * 1000 |
||||
PByte = TByte * 1000 |
||||
EByte = PByte * 1000 |
||||
) |
||||
|
||||
var bytesSizeTable = map[string]uint64{ |
||||
"b": Byte, |
||||
"kib": KiByte, |
||||
"kb": KByte, |
||||
"mib": MiByte, |
||||
"mb": MByte, |
||||
"gib": GiByte, |
||||
"gb": GByte, |
||||
"tib": TiByte, |
||||
"tb": TByte, |
||||
"pib": PiByte, |
||||
"pb": PByte, |
||||
"eib": EiByte, |
||||
"eb": EByte, |
||||
// Without suffix
|
||||
"": Byte, |
||||
"ki": KiByte, |
||||
"k": KByte, |
||||
"mi": MiByte, |
||||
"m": MByte, |
||||
"gi": GiByte, |
||||
"g": GByte, |
||||
"ti": TiByte, |
||||
"t": TByte, |
||||
"pi": PiByte, |
||||
"p": PByte, |
||||
"ei": EiByte, |
||||
"e": EByte, |
||||
} |
||||
|
||||
func logn(n, b float64) float64 { |
||||
return math.Log(n) / math.Log(b) |
||||
} |
||||
|
||||
func humanateBytes(s uint64, base float64, sizes []string) string { |
||||
if s < 10 { |
||||
return fmt.Sprintf("%d B", s) |
||||
} |
||||
e := math.Floor(logn(float64(s), base)) |
||||
suffix := sizes[int(e)] |
||||
val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10 |
||||
f := "%.0f %s" |
||||
if val < 10 { |
||||
f = "%.1f %s" |
||||
} |
||||
|
||||
return fmt.Sprintf(f, val, suffix) |
||||
} |
||||
|
||||
// Bytes produces a human readable representation of an SI size.
|
||||
//
|
||||
// See also: ParseBytes.
|
||||
//
|
||||
// Bytes(82854982) -> 83 MB
|
||||
func Bytes(s uint64) string { |
||||
sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"} |
||||
return humanateBytes(s, 1000, sizes) |
||||
} |
||||
|
||||
// IBytes produces a human readable representation of an IEC size.
|
||||
//
|
||||
// See also: ParseBytes.
|
||||
//
|
||||
// IBytes(82854982) -> 79 MiB
|
||||
func IBytes(s uint64) string { |
||||
sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"} |
||||
return humanateBytes(s, 1024, sizes) |
||||
} |
||||
|
||||
// ParseBytes parses a string representation of bytes into the number
|
||||
// of bytes it represents.
|
||||
//
|
||||
// See Also: Bytes, IBytes.
|
||||
//
|
||||
// ParseBytes("42 MB") -> 42000000, nil
|
||||
// ParseBytes("42 mib") -> 44040192, nil
|
||||
func ParseBytes(s string) (uint64, error) { |
||||
lastDigit := 0 |
||||
hasComma := false |
||||
for _, r := range s { |
||||
if !(unicode.IsDigit(r) || r == '.' || r == ',') { |
||||
break |
||||
} |
||||
if r == ',' { |
||||
hasComma = true |
||||
} |
||||
lastDigit++ |
||||
} |
||||
|
||||
num := s[:lastDigit] |
||||
if hasComma { |
||||
num = strings.Replace(num, ",", "", -1) |
||||
} |
||||
|
||||
f, err := strconv.ParseFloat(num, 64) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
|
||||
extra := strings.ToLower(strings.TrimSpace(s[lastDigit:])) |
||||
if m, ok := bytesSizeTable[extra]; ok { |
||||
f *= float64(m) |
||||
if f >= math.MaxUint64 { |
||||
return 0, fmt.Errorf("too large: %v", s) |
||||
} |
||||
return uint64(f), nil |
||||
} |
||||
|
||||
return 0, fmt.Errorf("unhandled size name: %v", extra) |
||||
} |
||||
@ -0,0 +1,116 @@ |
||||
package humanize |
||||
|
||||
import ( |
||||
"bytes" |
||||
"math" |
||||
"math/big" |
||||
"strconv" |
||||
"strings" |
||||
) |
||||
|
||||
// Comma produces a string form of the given number in base 10 with
|
||||
// commas after every three orders of magnitude.
|
||||
//
|
||||
// e.g. Comma(834142) -> 834,142
|
||||
func Comma(v int64) string { |
||||
sign := "" |
||||
|
||||
// Min int64 can't be negated to a usable value, so it has to be special cased.
|
||||
if v == math.MinInt64 { |
||||
return "-9,223,372,036,854,775,808" |
||||
} |
||||
|
||||
if v < 0 { |
||||
sign = "-" |
||||
v = 0 - v |
||||
} |
||||
|
||||
parts := []string{"", "", "", "", "", "", ""} |
||||
j := len(parts) - 1 |
||||
|
||||
for v > 999 { |
||||
parts[j] = strconv.FormatInt(v%1000, 10) |
||||
switch len(parts[j]) { |
||||
case 2: |
||||
parts[j] = "0" + parts[j] |
||||
case 1: |
||||
parts[j] = "00" + parts[j] |
||||
} |
||||
v = v / 1000 |
||||
j-- |
||||
} |
||||
parts[j] = strconv.Itoa(int(v)) |
||||
return sign + strings.Join(parts[j:], ",") |
||||
} |
||||
|
||||
// Commaf produces a string form of the given number in base 10 with
|
||||
// commas after every three orders of magnitude.
|
||||
//
|
||||
// e.g. Commaf(834142.32) -> 834,142.32
|
||||
func Commaf(v float64) string { |
||||
buf := &bytes.Buffer{} |
||||
if v < 0 { |
||||
buf.Write([]byte{'-'}) |
||||
v = 0 - v |
||||
} |
||||
|
||||
comma := []byte{','} |
||||
|
||||
parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".") |
||||
pos := 0 |
||||
if len(parts[0])%3 != 0 { |
||||
pos += len(parts[0]) % 3 |
||||
buf.WriteString(parts[0][:pos]) |
||||
buf.Write(comma) |
||||
} |
||||
for ; pos < len(parts[0]); pos += 3 { |
||||
buf.WriteString(parts[0][pos : pos+3]) |
||||
buf.Write(comma) |
||||
} |
||||
buf.Truncate(buf.Len() - 1) |
||||
|
||||
if len(parts) > 1 { |
||||
buf.Write([]byte{'.'}) |
||||
buf.WriteString(parts[1]) |
||||
} |
||||
return buf.String() |
||||
} |
||||
|
||||
// CommafWithDigits works like the Commaf but limits the resulting
|
||||
// string to the given number of decimal places.
|
||||
//
|
||||
// e.g. CommafWithDigits(834142.32, 1) -> 834,142.3
|
||||
func CommafWithDigits(f float64, decimals int) string { |
||||
return stripTrailingDigits(Commaf(f), decimals) |
||||
} |
||||
|
||||
// BigComma produces a string form of the given big.Int in base 10
|
||||
// with commas after every three orders of magnitude.
|
||||
func BigComma(b *big.Int) string { |
||||
sign := "" |
||||
if b.Sign() < 0 { |
||||
sign = "-" |
||||
b.Abs(b) |
||||
} |
||||
|
||||
athousand := big.NewInt(1000) |
||||
c := (&big.Int{}).Set(b) |
||||
_, m := oom(c, athousand) |
||||
parts := make([]string, m+1) |
||||
j := len(parts) - 1 |
||||
|
||||
mod := &big.Int{} |
||||
for b.Cmp(athousand) >= 0 { |
||||
b.DivMod(b, athousand, mod) |
||||
parts[j] = strconv.FormatInt(mod.Int64(), 10) |
||||
switch len(parts[j]) { |
||||
case 2: |
||||
parts[j] = "0" + parts[j] |
||||
case 1: |
||||
parts[j] = "00" + parts[j] |
||||
} |
||||
j-- |
||||
} |
||||
parts[j] = strconv.Itoa(int(b.Int64())) |
||||
return sign + strings.Join(parts[j:], ",") |
||||
} |
||||
@ -0,0 +1,40 @@ |
||||
// +build go1.6
|
||||
|
||||
package humanize |
||||
|
||||
import ( |
||||
"bytes" |
||||
"math/big" |
||||
"strings" |
||||
) |
||||
|
||||
// BigCommaf produces a string form of the given big.Float in base 10
|
||||
// with commas after every three orders of magnitude.
|
||||
func BigCommaf(v *big.Float) string { |
||||
buf := &bytes.Buffer{} |
||||
if v.Sign() < 0 { |
||||
buf.Write([]byte{'-'}) |
||||
v.Abs(v) |
||||
} |
||||
|
||||
comma := []byte{','} |
||||
|
||||
parts := strings.Split(v.Text('f', -1), ".") |
||||
pos := 0 |
||||
if len(parts[0])%3 != 0 { |
||||
pos += len(parts[0]) % 3 |
||||
buf.WriteString(parts[0][:pos]) |
||||
buf.Write(comma) |
||||
} |
||||
for ; pos < len(parts[0]); pos += 3 { |
||||
buf.WriteString(parts[0][pos : pos+3]) |
||||
buf.Write(comma) |
||||
} |
||||
buf.Truncate(buf.Len() - 1) |
||||
|
||||
if len(parts) > 1 { |
||||
buf.Write([]byte{'.'}) |
||||
buf.WriteString(parts[1]) |
||||
} |
||||
return buf.String() |
||||
} |
||||
@ -0,0 +1,46 @@ |
||||
package humanize |
||||
|
||||
import ( |
||||
"strconv" |
||||
"strings" |
||||
) |
||||
|
||||
func stripTrailingZeros(s string) string { |
||||
offset := len(s) - 1 |
||||
for offset > 0 { |
||||
if s[offset] == '.' { |
||||
offset-- |
||||
break |
||||
} |
||||
if s[offset] != '0' { |
||||
break |
||||
} |
||||
offset-- |
||||
} |
||||
return s[:offset+1] |
||||
} |
||||
|
||||
func stripTrailingDigits(s string, digits int) string { |
||||
if i := strings.Index(s, "."); i >= 0 { |
||||
if digits <= 0 { |
||||
return s[:i] |
||||
} |
||||
i++ |
||||
if i+digits >= len(s) { |
||||
return s |
||||
} |
||||
return s[:i+digits] |
||||
} |
||||
return s |
||||
} |
||||
|
||||
// Ftoa converts a float to a string with no trailing zeros.
|
||||
func Ftoa(num float64) string { |
||||
return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64)) |
||||
} |
||||
|
||||
// FtoaWithDigits converts a float to a string but limits the resulting string
|
||||
// to the given number of decimal places, and no trailing zeros.
|
||||
func FtoaWithDigits(num float64, digits int) string { |
||||
return stripTrailingZeros(stripTrailingDigits(strconv.FormatFloat(num, 'f', 6, 64), digits)) |
||||
} |
||||
@ -0,0 +1,8 @@ |
||||
/* |
||||
Package humanize converts boring ugly numbers to human-friendly strings and back. |
||||
|
||||
Durations can be turned into strings such as "3 days ago", numbers |
||||
representing sizes like 82854982 into useful strings like, "83 MB" or |
||||
"79 MiB" (whichever you prefer). |
||||
*/ |
||||
package humanize |
||||
@ -0,0 +1,25 @@ |
||||
package humanize |
||||
|
||||
import "strconv" |
||||
|
||||
// Ordinal gives you the input number in a rank/ordinal format.
|
||||
//
|
||||
// Ordinal(3) -> 3rd
|
||||
func Ordinal(x int) string { |
||||
suffix := "th" |
||||
switch x % 10 { |
||||
case 1: |
||||
if x%100 != 11 { |
||||
suffix = "st" |
||||
} |
||||
case 2: |
||||
if x%100 != 12 { |
||||
suffix = "nd" |
||||
} |
||||
case 3: |
||||
if x%100 != 13 { |
||||
suffix = "rd" |
||||
} |
||||
} |
||||
return strconv.Itoa(x) + suffix |
||||
} |
||||
@ -0,0 +1,123 @@ |
||||
package humanize |
||||
|
||||
import ( |
||||
"errors" |
||||
"math" |
||||
"regexp" |
||||
"strconv" |
||||
) |
||||
|
||||
var siPrefixTable = map[float64]string{ |
||||
-24: "y", // yocto
|
||||
-21: "z", // zepto
|
||||
-18: "a", // atto
|
||||
-15: "f", // femto
|
||||
-12: "p", // pico
|
||||
-9: "n", // nano
|
||||
-6: "µ", // micro
|
||||
-3: "m", // milli
|
||||
0: "", |
||||
3: "k", // kilo
|
||||
6: "M", // mega
|
||||
9: "G", // giga
|
||||
12: "T", // tera
|
||||
15: "P", // peta
|
||||
18: "E", // exa
|
||||
21: "Z", // zetta
|
||||
24: "Y", // yotta
|
||||
} |
||||
|
||||
var revSIPrefixTable = revfmap(siPrefixTable) |
||||
|
||||
// revfmap reverses the map and precomputes the power multiplier
|
||||
func revfmap(in map[float64]string) map[string]float64 { |
||||
rv := map[string]float64{} |
||||
for k, v := range in { |
||||
rv[v] = math.Pow(10, k) |
||||
} |
||||
return rv |
||||
} |
||||
|
||||
var riParseRegex *regexp.Regexp |
||||
|
||||
func init() { |
||||
ri := `^([\-0-9.]+)\s?([` |
||||
for _, v := range siPrefixTable { |
||||
ri += v |
||||
} |
||||
ri += `]?)(.*)` |
||||
|
||||
riParseRegex = regexp.MustCompile(ri) |
||||
} |
||||
|
||||
// ComputeSI finds the most appropriate SI prefix for the given number
|
||||
// and returns the prefix along with the value adjusted to be within
|
||||
// that prefix.
|
||||
//
|
||||
// See also: SI, ParseSI.
|
||||
//
|
||||
// e.g. ComputeSI(2.2345e-12) -> (2.2345, "p")
|
||||
func ComputeSI(input float64) (float64, string) { |
||||
if input == 0 { |
||||
return 0, "" |
||||
} |
||||
mag := math.Abs(input) |
||||
exponent := math.Floor(logn(mag, 10)) |
||||
exponent = math.Floor(exponent/3) * 3 |
||||
|
||||
value := mag / math.Pow(10, exponent) |
||||
|
||||
// Handle special case where value is exactly 1000.0
|
||||
// Should return 1 M instead of 1000 k
|
||||
if value == 1000.0 { |
||||
exponent += 3 |
||||
value = mag / math.Pow(10, exponent) |
||||
} |
||||
|
||||
value = math.Copysign(value, input) |
||||
|
||||
prefix := siPrefixTable[exponent] |
||||
return value, prefix |
||||
} |
||||
|
||||
// SI returns a string with default formatting.
|
||||
//
|
||||
// SI uses Ftoa to format float value, removing trailing zeros.
|
||||
//
|
||||
// See also: ComputeSI, ParseSI.
|
||||
//
|
||||
// e.g. SI(1000000, "B") -> 1 MB
|
||||
// e.g. SI(2.2345e-12, "F") -> 2.2345 pF
|
||||
func SI(input float64, unit string) string { |
||||
value, prefix := ComputeSI(input) |
||||
return Ftoa(value) + " " + prefix + unit |
||||
} |
||||
|
||||
// SIWithDigits works like SI but limits the resulting string to the
|
||||
// given number of decimal places.
|
||||
//
|
||||
// e.g. SIWithDigits(1000000, 0, "B") -> 1 MB
|
||||
// e.g. SIWithDigits(2.2345e-12, 2, "F") -> 2.23 pF
|
||||
func SIWithDigits(input float64, decimals int, unit string) string { |
||||
value, prefix := ComputeSI(input) |
||||
return FtoaWithDigits(value, decimals) + " " + prefix + unit |
||||
} |
||||
|
||||
var errInvalid = errors.New("invalid input") |
||||
|
||||
// ParseSI parses an SI string back into the number and unit.
|
||||
//
|
||||
// See also: SI, ComputeSI.
|
||||
//
|
||||
// e.g. ParseSI("2.2345 pF") -> (2.2345e-12, "F", nil)
|
||||
func ParseSI(input string) (float64, string, error) { |
||||
found := riParseRegex.FindStringSubmatch(input) |
||||
if len(found) != 4 { |
||||
return 0, "", errInvalid |
||||
} |
||||
mag := revSIPrefixTable[found[2]] |
||||
unit := found[3] |
||||
|
||||
base, err := strconv.ParseFloat(found[1], 64) |
||||
return base * mag, unit, err |
||||
} |
||||
@ -0,0 +1,117 @@ |
||||
package humanize |
||||
|
||||
import ( |
||||
"fmt" |
||||
"math" |
||||
"sort" |
||||
"time" |
||||
) |
||||
|
||||
// Seconds-based time units
|
||||
const ( |
||||
Day = 24 * time.Hour |
||||
Week = 7 * Day |
||||
Month = 30 * Day |
||||
Year = 12 * Month |
||||
LongTime = 37 * Year |
||||
) |
||||
|
||||
// Time formats a time into a relative string.
|
||||
//
|
||||
// Time(someT) -> "3 weeks ago"
|
||||
func Time(then time.Time) string { |
||||
return RelTime(then, time.Now(), "ago", "from now") |
||||
} |
||||
|
||||
// A RelTimeMagnitude struct contains a relative time point at which
|
||||
// the relative format of time will switch to a new format string. A
|
||||
// slice of these in ascending order by their "D" field is passed to
|
||||
// CustomRelTime to format durations.
|
||||
//
|
||||
// The Format field is a string that may contain a "%s" which will be
|
||||
// replaced with the appropriate signed label (e.g. "ago" or "from
|
||||
// now") and a "%d" that will be replaced by the quantity.
|
||||
//
|
||||
// The DivBy field is the amount of time the time difference must be
|
||||
// divided by in order to display correctly.
|
||||
//
|
||||
// e.g. if D is 2*time.Minute and you want to display "%d minutes %s"
|
||||
// DivBy should be time.Minute so whatever the duration is will be
|
||||
// expressed in minutes.
|
||||
type RelTimeMagnitude struct { |
||||
D time.Duration |
||||
Format string |
||||
DivBy time.Duration |
||||
} |
||||
|
||||
var defaultMagnitudes = []RelTimeMagnitude{ |
||||
{time.Second, "now", time.Second}, |
||||
{2 * time.Second, "1 second %s", 1}, |
||||
{time.Minute, "%d seconds %s", time.Second}, |
||||
{2 * time.Minute, "1 minute %s", 1}, |
||||
{time.Hour, "%d minutes %s", time.Minute}, |
||||
{2 * time.Hour, "1 hour %s", 1}, |
||||
{Day, "%d hours %s", time.Hour}, |
||||
{2 * Day, "1 day %s", 1}, |
||||
{Week, "%d days %s", Day}, |
||||
{2 * Week, "1 week %s", 1}, |
||||
{Month, "%d weeks %s", Week}, |
||||
{2 * Month, "1 month %s", 1}, |
||||
{Year, "%d months %s", Month}, |
||||
{18 * Month, "1 year %s", 1}, |
||||
{2 * Year, "2 years %s", 1}, |
||||
{LongTime, "%d years %s", Year}, |
||||
{math.MaxInt64, "a long while %s", 1}, |
||||
} |
||||
|
||||
// RelTime formats a time into a relative string.
|
||||
//
|
||||
// It takes two times and two labels. In addition to the generic time
|
||||
// delta string (e.g. 5 minutes), the labels are used applied so that
|
||||
// the label corresponding to the smaller time is applied.
|
||||
//
|
||||
// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier"
|
||||
func RelTime(a, b time.Time, albl, blbl string) string { |
||||
return CustomRelTime(a, b, albl, blbl, defaultMagnitudes) |
||||
} |
||||
|
||||
// CustomRelTime formats a time into a relative string.
|
||||
//
|
||||
// It takes two times two labels and a table of relative time formats.
|
||||
// In addition to the generic time delta string (e.g. 5 minutes), the
|
||||
// labels are used applied so that the label corresponding to the
|
||||
// smaller time is applied.
|
||||
func CustomRelTime(a, b time.Time, albl, blbl string, magnitudes []RelTimeMagnitude) string { |
||||
lbl := albl |
||||
diff := b.Sub(a) |
||||
|
||||
if a.After(b) { |
||||
lbl = blbl |
||||
diff = a.Sub(b) |
||||
} |
||||
|
||||
n := sort.Search(len(magnitudes), func(i int) bool { |
||||
return magnitudes[i].D > diff |
||||
}) |
||||
|
||||
if n >= len(magnitudes) { |
||||
n = len(magnitudes) - 1 |
||||
} |
||||
mag := magnitudes[n] |
||||
args := []interface{}{} |
||||
escaped := false |
||||
for _, ch := range mag.Format { |
||||
if escaped { |
||||
switch ch { |
||||
case 's': |
||||
args = append(args, lbl) |
||||
case 'd': |
||||
args = append(args, diff/mag.DivBy) |
||||
} |
||||
escaped = false |
||||
} else { |
||||
escaped = ch == '%' |
||||
} |
||||
} |
||||
return fmt.Sprintf(mag.Format, args...) |
||||
} |
||||
@ -1,30 +0,0 @@ |
||||
BRANCH=`git rev-parse --abbrev-ref HEAD`
|
||||
COMMIT=`git rev-parse --short HEAD`
|
||||
GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
|
||||
|
||||
default: build |
||||
|
||||
race: |
||||
@go test -v -race -test.run="TestSimulate_(100op|1000op)"
|
||||
|
||||
fmt: |
||||
!(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]')
|
||||
|
||||
# go get honnef.co/go/tools/simple
|
||||
gosimple: |
||||
gosimple ./...
|
||||
|
||||
# go get honnef.co/go/tools/unused
|
||||
unused: |
||||
unused ./...
|
||||
|
||||
# go get github.com/kisielk/errcheck
|
||||
errcheck: |
||||
@errcheck -ignorepkg=bytes -ignore=os:Remove go.etcd.io/bbolt
|
||||
|
||||
test: |
||||
go test -timeout 20m -v -coverprofile cover.out -covermode atomic
|
||||
# Note: gets "program not an importable package" in out of path builds
|
||||
go test -v ./cmd/bbolt
|
||||
|
||||
.PHONY: race fmt errcheck test gosimple unused |
||||
@ -0,0 +1,29 @@ |
||||
Copyright (c) 2015, Vincent Batoufflet and Marc Falzon |
||||
All rights reserved. |
||||
|
||||
Redistribution and use in source and binary forms, with or without |
||||
modification, are permitted provided that the following conditions |
||||
are met: |
||||
|
||||
* Redistributions of source code must retain the above copyright |
||||
notice, this list of conditions and the following disclaimer. |
||||
|
||||
* Redistributions in binary form must reproduce the above copyright |
||||
notice, this list of conditions and the following disclaimer in the |
||||
documentation and/or other materials provided with the distribution. |
||||
|
||||
* Neither the name of the authors nor the names of its contributors |
||||
may be used to endorse or promote products derived from this software |
||||
without specific prior written permission. |
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE |
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
||||
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
||||
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
||||
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
||||
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
||||
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
||||
POSSIBILITY OF SUCH DAMAGE. |
||||
@ -0,0 +1,104 @@ |
||||
# natsort: natural strings sorting in Go |
||||
|
||||
This is an implementation of the "Alphanum Algorithm" by [Dave Koelle][0] in Go. |
||||
|
||||
[](https://godoc.org/facette.io/natsort) |
||||
|
||||
## Usage |
||||
|
||||
```go |
||||
package main |
||||
|
||||
import ( |
||||
"fmt" |
||||
"strings" |
||||
|
||||
"facette.io/natsort" |
||||
) |
||||
|
||||
func main() { |
||||
list := []string{ |
||||
"1000X Radonius Maximus", |
||||
"10X Radonius", |
||||
"200X Radonius", |
||||
"20X Radonius", |
||||
"20X Radonius Prime", |
||||
"30X Radonius", |
||||
"40X Radonius", |
||||
"Allegia 50 Clasteron", |
||||
"Allegia 500 Clasteron", |
||||
"Allegia 50B Clasteron", |
||||
"Allegia 51 Clasteron", |
||||
"Allegia 6R Clasteron", |
||||
"Alpha 100", |
||||
"Alpha 2", |
||||
"Alpha 200", |
||||
"Alpha 2A", |
||||
"Alpha 2A-8000", |
||||
"Alpha 2A-900", |
||||
"Callisto Morphamax", |
||||
"Callisto Morphamax 500", |
||||
"Callisto Morphamax 5000", |
||||
"Callisto Morphamax 600", |
||||
"Callisto Morphamax 6000 SE", |
||||
"Callisto Morphamax 6000 SE2", |
||||
"Callisto Morphamax 700", |
||||
"Callisto Morphamax 7000", |
||||
"Xiph Xlater 10000", |
||||
"Xiph Xlater 2000", |
||||
"Xiph Xlater 300", |
||||
"Xiph Xlater 40", |
||||
"Xiph Xlater 5", |
||||
"Xiph Xlater 50", |
||||
"Xiph Xlater 500", |
||||
"Xiph Xlater 5000", |
||||
"Xiph Xlater 58", |
||||
} |
||||
|
||||
natsort.Sort(list) |
||||
|
||||
fmt.Println(strings.Join(list, "\n")) |
||||
} |
||||
``` |
||||
|
||||
Output: |
||||
|
||||
``` |
||||
10X Radonius |
||||
20X Radonius |
||||
20X Radonius Prime |
||||
30X Radonius |
||||
40X Radonius |
||||
200X Radonius |
||||
1000X Radonius Maximus |
||||
Allegia 6R Clasteron |
||||
Allegia 50 Clasteron |
||||
Allegia 50B Clasteron |
||||
Allegia 51 Clasteron |
||||
Allegia 500 Clasteron |
||||
Alpha 2 |
||||
Alpha 2A |
||||
Alpha 2A-900 |
||||
Alpha 2A-8000 |
||||
Alpha 100 |
||||
Alpha 200 |
||||
Callisto Morphamax |
||||
Callisto Morphamax 500 |
||||
Callisto Morphamax 600 |
||||
Callisto Morphamax 700 |
||||
Callisto Morphamax 5000 |
||||
Callisto Morphamax 6000 SE |
||||
Callisto Morphamax 6000 SE2 |
||||
Callisto Morphamax 7000 |
||||
Xiph Xlater 5 |
||||
Xiph Xlater 40 |
||||
Xiph Xlater 50 |
||||
Xiph Xlater 58 |
||||
Xiph Xlater 300 |
||||
Xiph Xlater 500 |
||||
Xiph Xlater 2000 |
||||
Xiph Xlater 5000 |
||||
Xiph Xlater 10000 |
||||
``` |
||||
|
||||
[0]: http://davekoelle.com/alphanum.html |
||||
@ -0,0 +1,85 @@ |
||||
// Package natsort implements natural strings sorting
|
||||
package natsort |
||||
|
||||
import ( |
||||
"regexp" |
||||
"sort" |
||||
"strconv" |
||||
) |
||||
|
||||
type stringSlice []string |
||||
|
||||
func (s stringSlice) Len() int { |
||||
return len(s) |
||||
} |
||||
|
||||
func (s stringSlice) Less(a, b int) bool { |
||||
return Compare(s[a], s[b]) |
||||
} |
||||
|
||||
func (s stringSlice) Swap(a, b int) { |
||||
s[a], s[b] = s[b], s[a] |
||||
} |
||||
|
||||
var chunkifyRegexp = regexp.MustCompile(`(\d+|\D+)`) |
||||
|
||||
func chunkify(s string) []string { |
||||
return chunkifyRegexp.FindAllString(s, -1) |
||||
} |
||||
|
||||
// Sort sorts a list of strings in a natural order
|
||||
func Sort(l []string) { |
||||
sort.Sort(stringSlice(l)) |
||||
} |
||||
|
||||
// Compare returns true if the first string precedes the second one according to natural order
|
||||
func Compare(a, b string) bool { |
||||
chunksA := chunkify(a) |
||||
chunksB := chunkify(b) |
||||
|
||||
nChunksA := len(chunksA) |
||||
nChunksB := len(chunksB) |
||||
|
||||
for i := range chunksA { |
||||
if i >= nChunksB { |
||||
return false |
||||
} |
||||
|
||||
aInt, aErr := strconv.Atoi(chunksA[i]) |
||||
bInt, bErr := strconv.Atoi(chunksB[i]) |
||||
|
||||
// If both chunks are numeric, compare them as integers
|
||||
if aErr == nil && bErr == nil { |
||||
if aInt == bInt { |
||||
if i == nChunksA-1 { |
||||
// We reached the last chunk of A, thus B is greater than A
|
||||
return true |
||||
} else if i == nChunksB-1 { |
||||
// We reached the last chunk of B, thus A is greater than B
|
||||
return false |
||||
} |
||||
|
||||
continue |
||||
} |
||||
|
||||
return aInt < bInt |
||||
} |
||||
|
||||
// So far both strings are equal, continue to next chunk
|
||||
if chunksA[i] == chunksB[i] { |
||||
if i == nChunksA-1 { |
||||
// We reached the last chunk of A, thus B is greater than A
|
||||
return true |
||||
} else if i == nChunksB-1 { |
||||
// We reached the last chunk of B, thus A is greater than B
|
||||
return false |
||||
} |
||||
|
||||
continue |
||||
} |
||||
|
||||
return chunksA[i] < chunksB[i] |
||||
} |
||||
|
||||
return false |
||||
} |
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
117
vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go
generated
vendored
117
vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go
generated
vendored
@ -0,0 +1,117 @@ |
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
/* |
||||
Package remap handles tracking the locations of Go tokens in a source text |
||||
across a rewrite by the Go formatter. |
||||
*/ |
||||
package remap |
||||
|
||||
import ( |
||||
"fmt" |
||||
"go/scanner" |
||||
"go/token" |
||||
) |
||||
|
||||
// A Location represents a span of byte offsets in the source text.
|
||||
type Location struct { |
||||
Pos, End int // End is exclusive
|
||||
} |
||||
|
||||
// A Map represents a mapping between token locations in an input source text
|
||||
// and locations in the correspnding output text.
|
||||
type Map map[Location]Location |
||||
|
||||
// Find reports whether the specified span is recorded by m, and if so returns
|
||||
// the new location it was mapped to. If the input span was not found, the
|
||||
// returned location is the same as the input.
|
||||
func (m Map) Find(pos, end int) (Location, bool) { |
||||
key := Location{ |
||||
Pos: pos, |
||||
End: end, |
||||
} |
||||
if loc, ok := m[key]; ok { |
||||
return loc, true |
||||
} |
||||
return key, false |
||||
} |
||||
|
||||
func (m Map) add(opos, oend, npos, nend int) { |
||||
m[Location{Pos: opos, End: oend}] = Location{Pos: npos, End: nend} |
||||
} |
||||
|
||||
// Compute constructs a location mapping from input to output. An error is
|
||||
// reported if any of the tokens of output cannot be mapped.
|
||||
func Compute(input, output []byte) (Map, error) { |
||||
itok := tokenize(input) |
||||
otok := tokenize(output) |
||||
if len(itok) != len(otok) { |
||||
return nil, fmt.Errorf("wrong number of tokens, %d ≠ %d", len(itok), len(otok)) |
||||
} |
||||
m := make(Map) |
||||
for i, ti := range itok { |
||||
to := otok[i] |
||||
if ti.Token != to.Token { |
||||
return nil, fmt.Errorf("token %d type mismatch: %s ≠ %s", i+1, ti, to) |
||||
} |
||||
m.add(ti.pos, ti.end, to.pos, to.end) |
||||
} |
||||
return m, nil |
||||
} |
||||
|
||||
// tokinfo records the span and type of a source token.
|
||||
type tokinfo struct { |
||||
pos, end int |
||||
token.Token |
||||
} |
||||
|
||||
func tokenize(src []byte) []tokinfo { |
||||
fs := token.NewFileSet() |
||||
var s scanner.Scanner |
||||
s.Init(fs.AddFile("src", fs.Base(), len(src)), src, nil, scanner.ScanComments) |
||||
var info []tokinfo |
||||
for { |
||||
pos, next, lit := s.Scan() |
||||
switch next { |
||||
case token.SEMICOLON: |
||||
continue |
||||
} |
||||
info = append(info, tokinfo{ |
||||
pos: int(pos - 1), |
||||
end: int(pos + token.Pos(len(lit)) - 1), |
||||
Token: next, |
||||
}) |
||||
if next == token.EOF { |
||||
break |
||||
} |
||||
} |
||||
return info |
||||
} |
||||
@ -0,0 +1,369 @@ |
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/protobuf/compiler/plugin.proto
|
||||
|
||||
/* |
||||
Package plugin_go is a generated protocol buffer package. |
||||
|
||||
It is generated from these files: |
||||
google/protobuf/compiler/plugin.proto |
||||
|
||||
It has these top-level messages: |
||||
Version |
||||
CodeGeneratorRequest |
||||
CodeGeneratorResponse |
||||
*/ |
||||
package plugin_go |
||||
|
||||
import proto "github.com/golang/protobuf/proto" |
||||
import fmt "fmt" |
||||
import math "math" |
||||
import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" |
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal |
||||
var _ = fmt.Errorf |
||||
var _ = math.Inf |
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// The version number of protocol compiler.
|
||||
type Version struct { |
||||
Major *int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` |
||||
Minor *int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` |
||||
Patch *int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` |
||||
// A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
|
||||
// be empty for mainline stable releases.
|
||||
Suffix *string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"` |
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"` |
||||
XXX_unrecognized []byte `json:"-"` |
||||
XXX_sizecache int32 `json:"-"` |
||||
} |
||||
|
||||
func (m *Version) Reset() { *m = Version{} } |
||||
func (m *Version) String() string { return proto.CompactTextString(m) } |
||||
func (*Version) ProtoMessage() {} |
||||
func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } |
||||
func (m *Version) Unmarshal(b []byte) error { |
||||
return xxx_messageInfo_Version.Unmarshal(m, b) |
||||
} |
||||
func (m *Version) Marshal(b []byte, deterministic bool) ([]byte, error) { |
||||
return xxx_messageInfo_Version.Marshal(b, m, deterministic) |
||||
} |
||||
func (dst *Version) XXX_Merge(src proto.Message) { |
||||
xxx_messageInfo_Version.Merge(dst, src) |
||||
} |
||||
func (m *Version) XXX_Size() int { |
||||
return xxx_messageInfo_Version.Size(m) |
||||
} |
||||
func (m *Version) XXX_DiscardUnknown() { |
||||
xxx_messageInfo_Version.DiscardUnknown(m) |
||||
} |
||||
|
||||
var xxx_messageInfo_Version proto.InternalMessageInfo |
||||
|
||||
func (m *Version) GetMajor() int32 { |
||||
if m != nil && m.Major != nil { |
||||
return *m.Major |
||||
} |
||||
return 0 |
||||
} |
||||
|
||||
func (m *Version) GetMinor() int32 { |
||||
if m != nil && m.Minor != nil { |
||||
return *m.Minor |
||||
} |
||||
return 0 |
||||
} |
||||
|
||||
func (m *Version) GetPatch() int32 { |
||||
if m != nil && m.Patch != nil { |
||||
return *m.Patch |
||||
} |
||||
return 0 |
||||
} |
||||
|
||||
func (m *Version) GetSuffix() string { |
||||
if m != nil && m.Suffix != nil { |
||||
return *m.Suffix |
||||
} |
||||
return "" |
||||
} |
||||
|
||||
// An encoded CodeGeneratorRequest is written to the plugin's stdin.
|
||||
type CodeGeneratorRequest struct { |
||||
// The .proto files that were explicitly listed on the command-line. The
|
||||
// code generator should generate code only for these files. Each file's
|
||||
// descriptor will be included in proto_file, below.
|
||||
FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate,json=fileToGenerate" json:"file_to_generate,omitempty"` |
||||
// The generator parameter passed on the command-line.
|
||||
Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"` |
||||
// FileDescriptorProtos for all files in files_to_generate and everything
|
||||
// they import. The files will appear in topological order, so each file
|
||||
// appears before any file that imports it.
|
||||
//
|
||||
// protoc guarantees that all proto_files will be written after
|
||||
// the fields above, even though this is not technically guaranteed by the
|
||||
// protobuf wire format. This theoretically could allow a plugin to stream
|
||||
// in the FileDescriptorProtos and handle them one by one rather than read
|
||||
// the entire set into memory at once. However, as of this writing, this
|
||||
// is not similarly optimized on protoc's end -- it will store all fields in
|
||||
// memory at once before sending them to the plugin.
|
||||
//
|
||||
// Type names of fields and extensions in the FileDescriptorProto are always
|
||||
// fully qualified.
|
||||
ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"` |
||||
// The version number of protocol compiler.
|
||||
CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` |
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"` |
||||
XXX_unrecognized []byte `json:"-"` |
||||
XXX_sizecache int32 `json:"-"` |
||||
} |
||||
|
||||
func (m *CodeGeneratorRequest) Reset() { *m = CodeGeneratorRequest{} } |
||||
func (m *CodeGeneratorRequest) String() string { return proto.CompactTextString(m) } |
||||
func (*CodeGeneratorRequest) ProtoMessage() {} |
||||
func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } |
||||
func (m *CodeGeneratorRequest) Unmarshal(b []byte) error { |
||||
return xxx_messageInfo_CodeGeneratorRequest.Unmarshal(m, b) |
||||
} |
||||
func (m *CodeGeneratorRequest) Marshal(b []byte, deterministic bool) ([]byte, error) { |
||||
return xxx_messageInfo_CodeGeneratorRequest.Marshal(b, m, deterministic) |
||||
} |
||||
func (dst *CodeGeneratorRequest) XXX_Merge(src proto.Message) { |
||||
xxx_messageInfo_CodeGeneratorRequest.Merge(dst, src) |
||||
} |
||||
func (m *CodeGeneratorRequest) XXX_Size() int { |
||||
return xxx_messageInfo_CodeGeneratorRequest.Size(m) |
||||
} |
||||
func (m *CodeGeneratorRequest) XXX_DiscardUnknown() { |
||||
xxx_messageInfo_CodeGeneratorRequest.DiscardUnknown(m) |
||||
} |
||||
|
||||
var xxx_messageInfo_CodeGeneratorRequest proto.InternalMessageInfo |
||||
|
||||
func (m *CodeGeneratorRequest) GetFileToGenerate() []string { |
||||
if m != nil { |
||||
return m.FileToGenerate |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (m *CodeGeneratorRequest) GetParameter() string { |
||||
if m != nil && m.Parameter != nil { |
||||
return *m.Parameter |
||||
} |
||||
return "" |
||||
} |
||||
|
||||
func (m *CodeGeneratorRequest) GetProtoFile() []*google_protobuf.FileDescriptorProto { |
||||
if m != nil { |
||||
return m.ProtoFile |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (m *CodeGeneratorRequest) GetCompilerVersion() *Version { |
||||
if m != nil { |
||||
return m.CompilerVersion |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// The plugin writes an encoded CodeGeneratorResponse to stdout.
|
||||
type CodeGeneratorResponse struct { |
||||
// Error message. If non-empty, code generation failed. The plugin process
|
||||
// should exit with status code zero even if it reports an error in this way.
|
||||
//
|
||||
// This should be used to indicate errors in .proto files which prevent the
|
||||
// code generator from generating correct code. Errors which indicate a
|
||||
// problem in protoc itself -- such as the input CodeGeneratorRequest being
|
||||
// unparseable -- should be reported by writing a message to stderr and
|
||||
// exiting with a non-zero status code.
|
||||
Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` |
||||
File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` |
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"` |
||||
XXX_unrecognized []byte `json:"-"` |
||||
XXX_sizecache int32 `json:"-"` |
||||
} |
||||
|
||||
func (m *CodeGeneratorResponse) Reset() { *m = CodeGeneratorResponse{} } |
||||
func (m *CodeGeneratorResponse) String() string { return proto.CompactTextString(m) } |
||||
func (*CodeGeneratorResponse) ProtoMessage() {} |
||||
func (*CodeGeneratorResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } |
||||
func (m *CodeGeneratorResponse) Unmarshal(b []byte) error { |
||||
return xxx_messageInfo_CodeGeneratorResponse.Unmarshal(m, b) |
||||
} |
||||
func (m *CodeGeneratorResponse) Marshal(b []byte, deterministic bool) ([]byte, error) { |
||||
return xxx_messageInfo_CodeGeneratorResponse.Marshal(b, m, deterministic) |
||||
} |
||||
func (dst *CodeGeneratorResponse) XXX_Merge(src proto.Message) { |
||||
xxx_messageInfo_CodeGeneratorResponse.Merge(dst, src) |
||||
} |
||||
func (m *CodeGeneratorResponse) XXX_Size() int { |
||||
return xxx_messageInfo_CodeGeneratorResponse.Size(m) |
||||
} |
||||
func (m *CodeGeneratorResponse) XXX_DiscardUnknown() { |
||||
xxx_messageInfo_CodeGeneratorResponse.DiscardUnknown(m) |
||||
} |
||||
|
||||
var xxx_messageInfo_CodeGeneratorResponse proto.InternalMessageInfo |
||||
|
||||
func (m *CodeGeneratorResponse) GetError() string { |
||||
if m != nil && m.Error != nil { |
||||
return *m.Error |
||||
} |
||||
return "" |
||||
} |
||||
|
||||
func (m *CodeGeneratorResponse) GetFile() []*CodeGeneratorResponse_File { |
||||
if m != nil { |
||||
return m.File |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// Represents a single generated file.
|
||||
type CodeGeneratorResponse_File struct { |
||||
// The file name, relative to the output directory. The name must not
|
||||
// contain "." or ".." components and must be relative, not be absolute (so,
|
||||
// the file cannot lie outside the output directory). "/" must be used as
|
||||
// the path separator, not "\".
|
||||
//
|
||||
// If the name is omitted, the content will be appended to the previous
|
||||
// file. This allows the generator to break large files into small chunks,
|
||||
// and allows the generated text to be streamed back to protoc so that large
|
||||
// files need not reside completely in memory at one time. Note that as of
|
||||
// this writing protoc does not optimize for this -- it will read the entire
|
||||
// CodeGeneratorResponse before writing files to disk.
|
||||
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` |
||||
// If non-empty, indicates that the named file should already exist, and the
|
||||
// content here is to be inserted into that file at a defined insertion
|
||||
// point. This feature allows a code generator to extend the output
|
||||
// produced by another code generator. The original generator may provide
|
||||
// insertion points by placing special annotations in the file that look
|
||||
// like:
|
||||
// @@protoc_insertion_point(NAME)
|
||||
// The annotation can have arbitrary text before and after it on the line,
|
||||
// which allows it to be placed in a comment. NAME should be replaced with
|
||||
// an identifier naming the point -- this is what other generators will use
|
||||
// as the insertion_point. Code inserted at this point will be placed
|
||||
// immediately above the line containing the insertion point (thus multiple
|
||||
// insertions to the same point will come out in the order they were added).
|
||||
// The double-@ is intended to make it unlikely that the generated code
|
||||
// could contain things that look like insertion points by accident.
|
||||
//
|
||||
// For example, the C++ code generator places the following line in the
|
||||
// .pb.h files that it generates:
|
||||
// // @@protoc_insertion_point(namespace_scope)
|
||||
// This line appears within the scope of the file's package namespace, but
|
||||
// outside of any particular class. Another plugin can then specify the
|
||||
// insertion_point "namespace_scope" to generate additional classes or
|
||||
// other declarations that should be placed in this scope.
|
||||
//
|
||||
// Note that if the line containing the insertion point begins with
|
||||
// whitespace, the same whitespace will be added to every line of the
|
||||
// inserted text. This is useful for languages like Python, where
|
||||
// indentation matters. In these languages, the insertion point comment
|
||||
// should be indented the same amount as any inserted code will need to be
|
||||
// in order to work correctly in that context.
|
||||
//
|
||||
// The code generator that generates the initial file and the one which
|
||||
// inserts into it must both run as part of a single invocation of protoc.
|
||||
// Code generators are executed in the order in which they appear on the
|
||||
// command line.
|
||||
//
|
||||
// If |insertion_point| is present, |name| must also be present.
|
||||
InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"` |
||||
// The file contents.
|
||||
Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` |
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"` |
||||
XXX_unrecognized []byte `json:"-"` |
||||
XXX_sizecache int32 `json:"-"` |
||||
} |
||||
|
||||
func (m *CodeGeneratorResponse_File) Reset() { *m = CodeGeneratorResponse_File{} } |
||||
func (m *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(m) } |
||||
func (*CodeGeneratorResponse_File) ProtoMessage() {} |
||||
func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } |
||||
func (m *CodeGeneratorResponse_File) Unmarshal(b []byte) error { |
||||
return xxx_messageInfo_CodeGeneratorResponse_File.Unmarshal(m, b) |
||||
} |
||||
func (m *CodeGeneratorResponse_File) Marshal(b []byte, deterministic bool) ([]byte, error) { |
||||
return xxx_messageInfo_CodeGeneratorResponse_File.Marshal(b, m, deterministic) |
||||
} |
||||
func (dst *CodeGeneratorResponse_File) XXX_Merge(src proto.Message) { |
||||
xxx_messageInfo_CodeGeneratorResponse_File.Merge(dst, src) |
||||
} |
||||
func (m *CodeGeneratorResponse_File) XXX_Size() int { |
||||
return xxx_messageInfo_CodeGeneratorResponse_File.Size(m) |
||||
} |
||||
func (m *CodeGeneratorResponse_File) XXX_DiscardUnknown() { |
||||
xxx_messageInfo_CodeGeneratorResponse_File.DiscardUnknown(m) |
||||
} |
||||
|
||||
var xxx_messageInfo_CodeGeneratorResponse_File proto.InternalMessageInfo |
||||
|
||||
func (m *CodeGeneratorResponse_File) GetName() string { |
||||
if m != nil && m.Name != nil { |
||||
return *m.Name |
||||
} |
||||
return "" |
||||
} |
||||
|
||||
func (m *CodeGeneratorResponse_File) GetInsertionPoint() string { |
||||
if m != nil && m.InsertionPoint != nil { |
||||
return *m.InsertionPoint |
||||
} |
||||
return "" |
||||
} |
||||
|
||||
func (m *CodeGeneratorResponse_File) GetContent() string { |
||||
if m != nil && m.Content != nil { |
||||
return *m.Content |
||||
} |
||||
return "" |
||||
} |
||||
|
||||
func init() { |
||||
proto.RegisterType((*Version)(nil), "google.protobuf.compiler.Version") |
||||
proto.RegisterType((*CodeGeneratorRequest)(nil), "google.protobuf.compiler.CodeGeneratorRequest") |
||||
proto.RegisterType((*CodeGeneratorResponse)(nil), "google.protobuf.compiler.CodeGeneratorResponse") |
||||
proto.RegisterType((*CodeGeneratorResponse_File)(nil), "google.protobuf.compiler.CodeGeneratorResponse.File") |
||||
} |
||||
|
||||
func init() { proto.RegisterFile("google/protobuf/compiler/plugin.proto", fileDescriptor0) } |
||||
|
||||
var fileDescriptor0 = []byte{ |
||||
// 417 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xcf, 0x6a, 0x14, 0x41, |
||||
0x10, 0xc6, 0x19, 0x77, 0x63, 0x98, 0x8a, 0x64, 0x43, 0x13, 0xa5, 0x09, 0x39, 0x8c, 0x8b, 0xe2, |
||||
0x5c, 0x32, 0x0b, 0xc1, 0x8b, 0x78, 0x4b, 0x44, 0x3d, 0x78, 0x58, 0x1a, 0xf1, 0x20, 0xc8, 0x30, |
||||
0x99, 0xd4, 0x74, 0x5a, 0x66, 0xba, 0xc6, 0xee, 0x1e, 0xf1, 0x49, 0x7d, 0x0f, 0xdf, 0x40, 0xfa, |
||||
0xcf, 0x24, 0xb2, 0xb8, 0xa7, 0xee, 0xef, 0x57, 0xd5, 0xd5, 0x55, 0x1f, 0x05, 0x2f, 0x25, 0x91, |
||||
0xec, 0x71, 0x33, 0x1a, 0x72, 0x74, 0x33, 0x75, 0x9b, 0x96, 0x86, 0x51, 0xf5, 0x68, 0x36, 0x63, |
||||
0x3f, 0x49, 0xa5, 0xab, 0x10, 0x60, 0x3c, 0xa6, 0x55, 0x73, 0x5a, 0x35, 0xa7, 0x9d, 0x15, 0xbb, |
||||
0x05, 0x6e, 0xd1, 0xb6, 0x46, 0x8d, 0x8e, 0x4c, 0xcc, 0x5e, 0xb7, 0x70, 0xf8, 0x05, 0x8d, 0x55, |
||||
0xa4, 0xd9, 0x29, 0x1c, 0x0c, 0xcd, 0x77, 0x32, 0x3c, 0x2b, 0xb2, 0xf2, 0x40, 0x44, 0x11, 0xa8, |
||||
0xd2, 0x64, 0xf8, 0xa3, 0x44, 0xbd, 0xf0, 0x74, 0x6c, 0x5c, 0x7b, 0xc7, 0x17, 0x91, 0x06, 0xc1, |
||||
0x9e, 0xc1, 0x63, 0x3b, 0x75, 0x9d, 0xfa, 0xc5, 0x97, 0x45, 0x56, 0xe6, 0x22, 0xa9, 0xf5, 0x9f, |
||||
0x0c, 0x4e, 0xaf, 0xe9, 0x16, 0x3f, 0xa0, 0x46, 0xd3, 0x38, 0x32, 0x02, 0x7f, 0x4c, 0x68, 0x1d, |
||||
0x2b, 0xe1, 0xa4, 0x53, 0x3d, 0xd6, 0x8e, 0x6a, 0x19, 0x63, 0xc8, 0xb3, 0x62, 0x51, 0xe6, 0xe2, |
||||
0xd8, 0xf3, 0xcf, 0x94, 0x5e, 0x20, 0x3b, 0x87, 0x7c, 0x6c, 0x4c, 0x33, 0xa0, 0xc3, 0xd8, 0x4a, |
||||
0x2e, 0x1e, 0x00, 0xbb, 0x06, 0x08, 0xe3, 0xd4, 0xfe, 0x15, 0x5f, 0x15, 0x8b, 0xf2, 0xe8, 0xf2, |
||||
0x45, 0xb5, 0x6b, 0xcb, 0x7b, 0xd5, 0xe3, 0xbb, 0x7b, 0x03, 0xb6, 0x1e, 0x8b, 0x3c, 0x44, 0x7d, |
||||
0x84, 0x7d, 0x82, 0x93, 0xd9, 0xb8, 0xfa, 0x67, 0xf4, 0x24, 0x8c, 0x77, 0x74, 0xf9, 0xbc, 0xda, |
||||
0xe7, 0x70, 0x95, 0xcc, 0x13, 0xab, 0x99, 0x24, 0xb0, 0xfe, 0x9d, 0xc1, 0xd3, 0x9d, 0x99, 0xed, |
||||
0x48, 0xda, 0xa2, 0xf7, 0x0e, 0x8d, 0x49, 0x3e, 0xe7, 0x22, 0x0a, 0xf6, 0x11, 0x96, 0xff, 0x34, |
||||
0xff, 0x7a, 0xff, 0x8f, 0xff, 0x2d, 0x1a, 0x66, 0x13, 0xa1, 0xc2, 0xd9, 0x37, 0x58, 0x86, 0x79, |
||||
0x18, 0x2c, 0x75, 0x33, 0x60, 0xfa, 0x26, 0xdc, 0xd9, 0x2b, 0x58, 0x29, 0x6d, 0xd1, 0x38, 0x45, |
||||
0xba, 0x1e, 0x49, 0x69, 0x97, 0xcc, 0x3c, 0xbe, 0xc7, 0x5b, 0x4f, 0x19, 0x87, 0xc3, 0x96, 0xb4, |
||||
0x43, 0xed, 0xf8, 0x2a, 0x24, 0xcc, 0xf2, 0x4a, 0xc2, 0x79, 0x4b, 0xc3, 0xde, 0xfe, 0xae, 0x9e, |
||||
0x6c, 0xc3, 0x6e, 0x06, 0x7b, 0xed, 0xd7, 0x37, 0x52, 0xb9, 0xbb, 0xe9, 0xc6, 0x87, 0x37, 0x92, |
||||
0xfa, 0x46, 0xcb, 0x87, 0x65, 0x0c, 0x97, 0xf6, 0x42, 0xa2, 0xbe, 0x90, 0x94, 0x56, 0xfa, 0x6d, |
||||
0x3c, 0x6a, 0x49, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x15, 0x40, 0xc5, 0xfe, 0x02, 0x00, |
||||
0x00, |
||||
} |
||||
@ -0,0 +1,83 @@ |
||||
// Code generated by protoc-gen-go. |
||||
// source: google/protobuf/compiler/plugin.proto |
||||
// DO NOT EDIT! |
||||
|
||||
package google_protobuf_compiler |
||||
|
||||
import proto "github.com/golang/protobuf/proto" |
||||
import "math" |
||||
import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" |
||||
|
||||
// Reference proto and math imports to suppress error if they are not otherwise used. |
||||
var _ = proto.GetString |
||||
var _ = math.Inf |
||||
|
||||
type CodeGeneratorRequest struct { |
||||
FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate" json:"file_to_generate,omitempty"` |
||||
Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"` |
||||
ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file" json:"proto_file,omitempty"` |
||||
XXX_unrecognized []byte `json:"-"` |
||||
} |
||||
|
||||
func (this *CodeGeneratorRequest) Reset() { *this = CodeGeneratorRequest{} } |
||||
func (this *CodeGeneratorRequest) String() string { return proto.CompactTextString(this) } |
||||
func (*CodeGeneratorRequest) ProtoMessage() {} |
||||
|
||||
func (this *CodeGeneratorRequest) GetParameter() string { |
||||
if this != nil && this.Parameter != nil { |
||||
return *this.Parameter |
||||
} |
||||
return "" |
||||
} |
||||
|
||||
type CodeGeneratorResponse struct { |
||||
Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` |
||||
File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` |
||||
XXX_unrecognized []byte `json:"-"` |
||||
} |
||||
|
||||
func (this *CodeGeneratorResponse) Reset() { *this = CodeGeneratorResponse{} } |
||||
func (this *CodeGeneratorResponse) String() string { return proto.CompactTextString(this) } |
||||
func (*CodeGeneratorResponse) ProtoMessage() {} |
||||
|
||||
func (this *CodeGeneratorResponse) GetError() string { |
||||
if this != nil && this.Error != nil { |
||||
return *this.Error |
||||
} |
||||
return "" |
||||
} |
||||
|
||||
type CodeGeneratorResponse_File struct { |
||||
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` |
||||
InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point" json:"insertion_point,omitempty"` |
||||
Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` |
||||
XXX_unrecognized []byte `json:"-"` |
||||
} |
||||
|
||||
func (this *CodeGeneratorResponse_File) Reset() { *this = CodeGeneratorResponse_File{} } |
||||
func (this *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(this) } |
||||
func (*CodeGeneratorResponse_File) ProtoMessage() {} |
||||
|
||||
func (this *CodeGeneratorResponse_File) GetName() string { |
||||
if this != nil && this.Name != nil { |
||||
return *this.Name |
||||
} |
||||
return "" |
||||
} |
||||
|
||||
func (this *CodeGeneratorResponse_File) GetInsertionPoint() string { |
||||
if this != nil && this.InsertionPoint != nil { |
||||
return *this.InsertionPoint |
||||
} |
||||
return "" |
||||
} |
||||
|
||||
func (this *CodeGeneratorResponse_File) GetContent() string { |
||||
if this != nil && this.Content != nil { |
||||
return *this.Content |
||||
} |
||||
return "" |
||||
} |
||||
|
||||
func init() { |
||||
} |
||||
@ -0,0 +1,167 @@ |
||||
// Protocol Buffers - Google's data interchange format |
||||
// Copyright 2008 Google Inc. All rights reserved. |
||||
// https://developers.google.com/protocol-buffers/ |
||||
// |
||||
// Redistribution and use in source and binary forms, with or without |
||||
// modification, are permitted provided that the following conditions are |
||||
// met: |
||||
// |
||||
// * Redistributions of source code must retain the above copyright |
||||
// notice, this list of conditions and the following disclaimer. |
||||
// * Redistributions in binary form must reproduce the above |
||||
// copyright notice, this list of conditions and the following disclaimer |
||||
// in the documentation and/or other materials provided with the |
||||
// distribution. |
||||
// * Neither the name of Google Inc. nor the names of its |
||||
// contributors may be used to endorse or promote products derived from |
||||
// this software without specific prior written permission. |
||||
// |
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||||
|
||||
// Author: kenton@google.com (Kenton Varda) |
||||
// |
||||
// WARNING: The plugin interface is currently EXPERIMENTAL and is subject to |
||||
// change. |
||||
// |
||||
// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is |
||||
// just a program that reads a CodeGeneratorRequest from stdin and writes a |
||||
// CodeGeneratorResponse to stdout. |
||||
// |
||||
// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead |
||||
// of dealing with the raw protocol defined here. |
||||
// |
||||
// A plugin executable needs only to be placed somewhere in the path. The |
||||
// plugin should be named "protoc-gen-$NAME", and will then be used when the |
||||
// flag "--${NAME}_out" is passed to protoc. |
||||
|
||||
syntax = "proto2"; |
||||
package google.protobuf.compiler; |
||||
option java_package = "com.google.protobuf.compiler"; |
||||
option java_outer_classname = "PluginProtos"; |
||||
|
||||
option go_package = "github.com/golang/protobuf/protoc-gen-go/plugin;plugin_go"; |
||||
|
||||
import "google/protobuf/descriptor.proto"; |
||||
|
||||
// The version number of protocol compiler. |
||||
message Version { |
||||
optional int32 major = 1; |
||||
optional int32 minor = 2; |
||||
optional int32 patch = 3; |
||||
// A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should |
||||
// be empty for mainline stable releases. |
||||
optional string suffix = 4; |
||||
} |
||||
|
||||
// An encoded CodeGeneratorRequest is written to the plugin's stdin. |
||||
message CodeGeneratorRequest { |
||||
// The .proto files that were explicitly listed on the command-line. The |
||||
// code generator should generate code only for these files. Each file's |
||||
// descriptor will be included in proto_file, below. |
||||
repeated string file_to_generate = 1; |
||||
|
||||
// The generator parameter passed on the command-line. |
||||
optional string parameter = 2; |
||||
|
||||
// FileDescriptorProtos for all files in files_to_generate and everything |
||||
// they import. The files will appear in topological order, so each file |
||||
// appears before any file that imports it. |
||||
// |
||||
// protoc guarantees that all proto_files will be written after |
||||
// the fields above, even though this is not technically guaranteed by the |
||||
// protobuf wire format. This theoretically could allow a plugin to stream |
||||
// in the FileDescriptorProtos and handle them one by one rather than read |
||||
// the entire set into memory at once. However, as of this writing, this |
||||
// is not similarly optimized on protoc's end -- it will store all fields in |
||||
// memory at once before sending them to the plugin. |
||||
// |
||||
// Type names of fields and extensions in the FileDescriptorProto are always |
||||
// fully qualified. |
||||
repeated FileDescriptorProto proto_file = 15; |
||||
|
||||
// The version number of protocol compiler. |
||||
optional Version compiler_version = 3; |
||||
|
||||
} |
||||
|
||||
// The plugin writes an encoded CodeGeneratorResponse to stdout. |
||||
message CodeGeneratorResponse { |
||||
// Error message. If non-empty, code generation failed. The plugin process |
||||
// should exit with status code zero even if it reports an error in this way. |
||||
// |
||||
// This should be used to indicate errors in .proto files which prevent the |
||||
// code generator from generating correct code. Errors which indicate a |
||||
// problem in protoc itself -- such as the input CodeGeneratorRequest being |
||||
// unparseable -- should be reported by writing a message to stderr and |
||||
// exiting with a non-zero status code. |
||||
optional string error = 1; |
||||
|
||||
// Represents a single generated file. |
||||
message File { |
||||
// The file name, relative to the output directory. The name must not |
||||
// contain "." or ".." components and must be relative, not be absolute (so, |
||||
// the file cannot lie outside the output directory). "/" must be used as |
||||
// the path separator, not "\". |
||||
// |
||||
// If the name is omitted, the content will be appended to the previous |
||||
// file. This allows the generator to break large files into small chunks, |
||||
// and allows the generated text to be streamed back to protoc so that large |
||||
// files need not reside completely in memory at one time. Note that as of |
||||
// this writing protoc does not optimize for this -- it will read the entire |
||||
// CodeGeneratorResponse before writing files to disk. |
||||
optional string name = 1; |
||||
|
||||
// If non-empty, indicates that the named file should already exist, and the |
||||
// content here is to be inserted into that file at a defined insertion |
||||
// point. This feature allows a code generator to extend the output |
||||
// produced by another code generator. The original generator may provide |
||||
// insertion points by placing special annotations in the file that look |
||||
// like: |
||||
// @@protoc_insertion_point(NAME) |
||||
// The annotation can have arbitrary text before and after it on the line, |
||||
// which allows it to be placed in a comment. NAME should be replaced with |
||||
// an identifier naming the point -- this is what other generators will use |
||||
// as the insertion_point. Code inserted at this point will be placed |
||||
// immediately above the line containing the insertion point (thus multiple |
||||
// insertions to the same point will come out in the order they were added). |
||||
// The double-@ is intended to make it unlikely that the generated code |
||||
// could contain things that look like insertion points by accident. |
||||
// |
||||
// For example, the C++ code generator places the following line in the |
||||
// .pb.h files that it generates: |
||||
// // @@protoc_insertion_point(namespace_scope) |
||||
// This line appears within the scope of the file's package namespace, but |
||||
// outside of any particular class. Another plugin can then specify the |
||||
// insertion_point "namespace_scope" to generate additional classes or |
||||
// other declarations that should be placed in this scope. |
||||
// |
||||
// Note that if the line containing the insertion point begins with |
||||
// whitespace, the same whitespace will be added to every line of the |
||||
// inserted text. This is useful for languages like Python, where |
||||
// indentation matters. In these languages, the insertion point comment |
||||
// should be indented the same amount as any inserted code will need to be |
||||
// in order to work correctly in that context. |
||||
// |
||||
// The code generator that generates the initial file and the one which |
||||
// inserts into it must both run as part of a single invocation of protoc. |
||||
// Code generators are executed in the order in which they appear on the |
||||
// command line. |
||||
// |
||||
// If |insertion_point| is present, |name| must also be present. |
||||
optional string insertion_point = 2; |
||||
|
||||
// The file contents. |
||||
optional string content = 15; |
||||
} |
||||
repeated File file = 15; |
||||
} |
||||
@ -0,0 +1,450 @@ |
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/protobuf/struct.proto
|
||||
|
||||
package structpb // import "github.com/golang/protobuf/ptypes/struct"
|
||||
|
||||
import proto "github.com/golang/protobuf/proto" |
||||
import fmt "fmt" |
||||
import math "math" |
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal |
||||
var _ = fmt.Errorf |
||||
var _ = math.Inf |
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// `NullValue` is a singleton enumeration to represent the null value for the
|
||||
// `Value` type union.
|
||||
//
|
||||
// The JSON representation for `NullValue` is JSON `null`.
|
||||
type NullValue int32 |
||||
|
||||
const ( |
||||
// Null value.
|
||||
NullValue_NULL_VALUE NullValue = 0 |
||||
) |
||||
|
||||
var NullValue_name = map[int32]string{ |
||||
0: "NULL_VALUE", |
||||
} |
||||
var NullValue_value = map[string]int32{ |
||||
"NULL_VALUE": 0, |
||||
} |
||||
|
||||
func (x NullValue) String() string { |
||||
return proto.EnumName(NullValue_name, int32(x)) |
||||
} |
||||
func (NullValue) EnumDescriptor() ([]byte, []int) { |
||||
return fileDescriptor_struct_3a5a94e0c7801b27, []int{0} |
||||
} |
||||
func (NullValue) XXX_WellKnownType() string { return "NullValue" } |
||||
|
||||
// `Struct` represents a structured data value, consisting of fields
|
||||
// which map to dynamically typed values. In some languages, `Struct`
|
||||
// might be supported by a native representation. For example, in
|
||||
// scripting languages like JS a struct is represented as an
|
||||
// object. The details of that representation are described together
|
||||
// with the proto support for the language.
|
||||
//
|
||||
// The JSON representation for `Struct` is JSON object.
|
||||
type Struct struct { |
||||
// Unordered map of dynamically typed values.
|
||||
Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` |
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"` |
||||
XXX_unrecognized []byte `json:"-"` |
||||
XXX_sizecache int32 `json:"-"` |
||||
} |
||||
|
||||
func (m *Struct) Reset() { *m = Struct{} } |
||||
func (m *Struct) String() string { return proto.CompactTextString(m) } |
||||
func (*Struct) ProtoMessage() {} |
||||
func (*Struct) Descriptor() ([]byte, []int) { |
||||
return fileDescriptor_struct_3a5a94e0c7801b27, []int{0} |
||||
} |
||||
func (*Struct) XXX_WellKnownType() string { return "Struct" } |
||||
func (m *Struct) XXX_Unmarshal(b []byte) error { |
||||
return xxx_messageInfo_Struct.Unmarshal(m, b) |
||||
} |
||||
func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
||||
return xxx_messageInfo_Struct.Marshal(b, m, deterministic) |
||||
} |
||||
func (dst *Struct) XXX_Merge(src proto.Message) { |
||||
xxx_messageInfo_Struct.Merge(dst, src) |
||||
} |
||||
func (m *Struct) XXX_Size() int { |
||||
return xxx_messageInfo_Struct.Size(m) |
||||
} |
||||
func (m *Struct) XXX_DiscardUnknown() { |
||||
xxx_messageInfo_Struct.DiscardUnknown(m) |
||||
} |
||||
|
||||
var xxx_messageInfo_Struct proto.InternalMessageInfo |
||||
|
||||
func (m *Struct) GetFields() map[string]*Value { |
||||
if m != nil { |
||||
return m.Fields |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// `Value` represents a dynamically typed value which can be either
|
||||
// null, a number, a string, a boolean, a recursive struct value, or a
|
||||
// list of values. A producer of value is expected to set one of that
|
||||
// variants, absence of any variant indicates an error.
|
||||
//
|
||||
// The JSON representation for `Value` is JSON value.
|
||||
type Value struct { |
||||
// The kind of value.
|
||||
//
|
||||
// Types that are valid to be assigned to Kind:
|
||||
// *Value_NullValue
|
||||
// *Value_NumberValue
|
||||
// *Value_StringValue
|
||||
// *Value_BoolValue
|
||||
// *Value_StructValue
|
||||
// *Value_ListValue
|
||||
Kind isValue_Kind `protobuf_oneof:"kind"` |
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"` |
||||
XXX_unrecognized []byte `json:"-"` |
||||
XXX_sizecache int32 `json:"-"` |
||||
} |
||||
|
||||
func (m *Value) Reset() { *m = Value{} } |
||||
func (m *Value) String() string { return proto.CompactTextString(m) } |
||||
func (*Value) ProtoMessage() {} |
||||
func (*Value) Descriptor() ([]byte, []int) { |
||||
return fileDescriptor_struct_3a5a94e0c7801b27, []int{1} |
||||
} |
||||
func (*Value) XXX_WellKnownType() string { return "Value" } |
||||
func (m *Value) XXX_Unmarshal(b []byte) error { |
||||
return xxx_messageInfo_Value.Unmarshal(m, b) |
||||
} |
||||
func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
||||
return xxx_messageInfo_Value.Marshal(b, m, deterministic) |
||||
} |
||||
func (dst *Value) XXX_Merge(src proto.Message) { |
||||
xxx_messageInfo_Value.Merge(dst, src) |
||||
} |
||||
func (m *Value) XXX_Size() int { |
||||
return xxx_messageInfo_Value.Size(m) |
||||
} |
||||
func (m *Value) XXX_DiscardUnknown() { |
||||
xxx_messageInfo_Value.DiscardUnknown(m) |
||||
} |
||||
|
||||
var xxx_messageInfo_Value proto.InternalMessageInfo |
||||
|
||||
type isValue_Kind interface { |
||||
isValue_Kind() |
||||
} |
||||
|
||||
type Value_NullValue struct { |
||||
NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` |
||||
} |
||||
|
||||
type Value_NumberValue struct { |
||||
NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"` |
||||
} |
||||
|
||||
type Value_StringValue struct { |
||||
StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"` |
||||
} |
||||
|
||||
type Value_BoolValue struct { |
||||
BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"` |
||||
} |
||||
|
||||
type Value_StructValue struct { |
||||
StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"` |
||||
} |
||||
|
||||
type Value_ListValue struct { |
||||
ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"` |
||||
} |
||||
|
||||
func (*Value_NullValue) isValue_Kind() {} |
||||
|
||||
func (*Value_NumberValue) isValue_Kind() {} |
||||
|
||||
func (*Value_StringValue) isValue_Kind() {} |
||||
|
||||
func (*Value_BoolValue) isValue_Kind() {} |
||||
|
||||
func (*Value_StructValue) isValue_Kind() {} |
||||
|
||||
func (*Value_ListValue) isValue_Kind() {} |
||||
|
||||
func (m *Value) GetKind() isValue_Kind { |
||||
if m != nil { |
||||
return m.Kind |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (m *Value) GetNullValue() NullValue { |
||||
if x, ok := m.GetKind().(*Value_NullValue); ok { |
||||
return x.NullValue |
||||
} |
||||
return NullValue_NULL_VALUE |
||||
} |
||||
|
||||
func (m *Value) GetNumberValue() float64 { |
||||
if x, ok := m.GetKind().(*Value_NumberValue); ok { |
||||
return x.NumberValue |
||||
} |
||||
return 0 |
||||
} |
||||
|
||||
func (m *Value) GetStringValue() string { |
||||
if x, ok := m.GetKind().(*Value_StringValue); ok { |
||||
return x.StringValue |
||||
} |
||||
return "" |
||||
} |
||||
|
||||
func (m *Value) GetBoolValue() bool { |
||||
if x, ok := m.GetKind().(*Value_BoolValue); ok { |
||||
return x.BoolValue |
||||
} |
||||
return false |
||||
} |
||||
|
||||
func (m *Value) GetStructValue() *Struct { |
||||
if x, ok := m.GetKind().(*Value_StructValue); ok { |
||||
return x.StructValue |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (m *Value) GetListValue() *ListValue { |
||||
if x, ok := m.GetKind().(*Value_ListValue); ok { |
||||
return x.ListValue |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// XXX_OneofFuncs is for the internal use of the proto package.
|
||||
func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { |
||||
return _Value_OneofMarshaler, _Value_OneofUnmarshaler, _Value_OneofSizer, []interface{}{ |
||||
(*Value_NullValue)(nil), |
||||
(*Value_NumberValue)(nil), |
||||
(*Value_StringValue)(nil), |
||||
(*Value_BoolValue)(nil), |
||||
(*Value_StructValue)(nil), |
||||
(*Value_ListValue)(nil), |
||||
} |
||||
} |
||||
|
||||
func _Value_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { |
||||
m := msg.(*Value) |
||||
// kind
|
||||
switch x := m.Kind.(type) { |
||||
case *Value_NullValue: |
||||
b.EncodeVarint(1<<3 | proto.WireVarint) |
||||
b.EncodeVarint(uint64(x.NullValue)) |
||||
case *Value_NumberValue: |
||||
b.EncodeVarint(2<<3 | proto.WireFixed64) |
||||
b.EncodeFixed64(math.Float64bits(x.NumberValue)) |
||||
case *Value_StringValue: |
||||
b.EncodeVarint(3<<3 | proto.WireBytes) |
||||
b.EncodeStringBytes(x.StringValue) |
||||
case *Value_BoolValue: |
||||
t := uint64(0) |
||||
if x.BoolValue { |
||||
t = 1 |
||||
} |
||||
b.EncodeVarint(4<<3 | proto.WireVarint) |
||||
b.EncodeVarint(t) |
||||
case *Value_StructValue: |
||||
b.EncodeVarint(5<<3 | proto.WireBytes) |
||||
if err := b.EncodeMessage(x.StructValue); err != nil { |
||||
return err |
||||
} |
||||
case *Value_ListValue: |
||||
b.EncodeVarint(6<<3 | proto.WireBytes) |
||||
if err := b.EncodeMessage(x.ListValue); err != nil { |
||||
return err |
||||
} |
||||
case nil: |
||||
default: |
||||
return fmt.Errorf("Value.Kind has unexpected type %T", x) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func _Value_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { |
||||
m := msg.(*Value) |
||||
switch tag { |
||||
case 1: // kind.null_value
|
||||
if wire != proto.WireVarint { |
||||
return true, proto.ErrInternalBadWireType |
||||
} |
||||
x, err := b.DecodeVarint() |
||||
m.Kind = &Value_NullValue{NullValue(x)} |
||||
return true, err |
||||
case 2: // kind.number_value
|
||||
if wire != proto.WireFixed64 { |
||||
return true, proto.ErrInternalBadWireType |
||||
} |
||||
x, err := b.DecodeFixed64() |
||||
m.Kind = &Value_NumberValue{math.Float64frombits(x)} |
||||
return true, err |
||||
case 3: // kind.string_value
|
||||
if wire != proto.WireBytes { |
||||
return true, proto.ErrInternalBadWireType |
||||
} |
||||
x, err := b.DecodeStringBytes() |
||||
m.Kind = &Value_StringValue{x} |
||||
return true, err |
||||
case 4: // kind.bool_value
|
||||
if wire != proto.WireVarint { |
||||
return true, proto.ErrInternalBadWireType |
||||
} |
||||
x, err := b.DecodeVarint() |
||||
m.Kind = &Value_BoolValue{x != 0} |
||||
return true, err |
||||
case 5: // kind.struct_value
|
||||
if wire != proto.WireBytes { |
||||
return true, proto.ErrInternalBadWireType |
||||
} |
||||
msg := new(Struct) |
||||
err := b.DecodeMessage(msg) |
||||
m.Kind = &Value_StructValue{msg} |
||||
return true, err |
||||
case 6: // kind.list_value
|
||||
if wire != proto.WireBytes { |
||||
return true, proto.ErrInternalBadWireType |
||||
} |
||||
msg := new(ListValue) |
||||
err := b.DecodeMessage(msg) |
||||
m.Kind = &Value_ListValue{msg} |
||||
return true, err |
||||
default: |
||||
return false, nil |
||||
} |
||||
} |
||||
|
||||
func _Value_OneofSizer(msg proto.Message) (n int) { |
||||
m := msg.(*Value) |
||||
// kind
|
||||
switch x := m.Kind.(type) { |
||||
case *Value_NullValue: |
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(x.NullValue)) |
||||
case *Value_NumberValue: |
||||
n += 1 // tag and wire
|
||||
n += 8 |
||||
case *Value_StringValue: |
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(len(x.StringValue))) |
||||
n += len(x.StringValue) |
||||
case *Value_BoolValue: |
||||
n += 1 // tag and wire
|
||||
n += 1 |
||||
case *Value_StructValue: |
||||
s := proto.Size(x.StructValue) |
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(s)) |
||||
n += s |
||||
case *Value_ListValue: |
||||
s := proto.Size(x.ListValue) |
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(s)) |
||||
n += s |
||||
case nil: |
||||
default: |
||||
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) |
||||
} |
||||
return n |
||||
} |
||||
|
||||
// `ListValue` is a wrapper around a repeated field of values.
|
||||
//
|
||||
// The JSON representation for `ListValue` is JSON array.
|
||||
type ListValue struct { |
||||
// Repeated field of dynamically typed values.
|
||||
Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` |
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"` |
||||
XXX_unrecognized []byte `json:"-"` |
||||
XXX_sizecache int32 `json:"-"` |
||||
} |
||||
|
||||
func (m *ListValue) Reset() { *m = ListValue{} } |
||||
func (m *ListValue) String() string { return proto.CompactTextString(m) } |
||||
func (*ListValue) ProtoMessage() {} |
||||
func (*ListValue) Descriptor() ([]byte, []int) { |
||||
return fileDescriptor_struct_3a5a94e0c7801b27, []int{2} |
||||
} |
||||
func (*ListValue) XXX_WellKnownType() string { return "ListValue" } |
||||
func (m *ListValue) XXX_Unmarshal(b []byte) error { |
||||
return xxx_messageInfo_ListValue.Unmarshal(m, b) |
||||
} |
||||
func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
||||
return xxx_messageInfo_ListValue.Marshal(b, m, deterministic) |
||||
} |
||||
func (dst *ListValue) XXX_Merge(src proto.Message) { |
||||
xxx_messageInfo_ListValue.Merge(dst, src) |
||||
} |
||||
func (m *ListValue) XXX_Size() int { |
||||
return xxx_messageInfo_ListValue.Size(m) |
||||
} |
||||
func (m *ListValue) XXX_DiscardUnknown() { |
||||
xxx_messageInfo_ListValue.DiscardUnknown(m) |
||||
} |
||||
|
||||
var xxx_messageInfo_ListValue proto.InternalMessageInfo |
||||
|
||||
func (m *ListValue) GetValues() []*Value { |
||||
if m != nil { |
||||
return m.Values |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func init() { |
||||
proto.RegisterType((*Struct)(nil), "google.protobuf.Struct") |
||||
proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry") |
||||
proto.RegisterType((*Value)(nil), "google.protobuf.Value") |
||||
proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue") |
||||
proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value) |
||||
} |
||||
|
||||
func init() { |
||||
proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_struct_3a5a94e0c7801b27) |
||||
} |
||||
|
||||
var fileDescriptor_struct_3a5a94e0c7801b27 = []byte{ |
||||
// 417 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40, |
||||
0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09, |
||||
0x22, 0x29, 0xd6, 0x8b, 0x18, 0x2f, 0x06, 0xd6, 0x5d, 0x30, 0x2c, 0x31, 0xba, 0x15, 0xbc, 0x94, |
||||
0x26, 0x4d, 0x63, 0xe8, 0x74, 0x26, 0x24, 0x33, 0x4a, 0x8f, 0x7e, 0x0b, 0xcf, 0x1e, 0x3d, 0xfa, |
||||
0xe9, 0x3c, 0xca, 0xcc, 0x24, 0xa9, 0xb4, 0xf4, 0x94, 0xbc, 0xf7, 0x7e, 0xef, 0x3f, 0xef, 0xff, |
||||
0x66, 0xe0, 0x71, 0xc1, 0x58, 0x41, 0xf2, 0x49, 0x55, 0x33, 0xce, 0x52, 0xb1, 0x9a, 0x34, 0xbc, |
||||
0x16, 0x19, 0xf7, 0x55, 0x8c, 0xef, 0xe9, 0xaa, 0xdf, 0x55, 0xc7, 0x3f, 0x11, 0x58, 0x1f, 0x15, |
||||
0x81, 0x03, 0xb0, 0x56, 0x65, 0x4e, 0x96, 0xcd, 0x08, 0xb9, 0xa6, 0xe7, 0x4c, 0x2f, 0xfc, 0x3d, |
||||
0xd8, 0xd7, 0xa0, 0xff, 0x4e, 0x51, 0x97, 0x94, 0xd7, 0xdb, 0xa4, 0x6d, 0x39, 0xff, 0x00, 0xce, |
||||
0x7f, 0x69, 0x7c, 0x06, 0xe6, 0x3a, 0xdf, 0x8e, 0x90, 0x8b, 0x3c, 0x3b, 0x91, 0xbf, 0xf8, 0x39, |
||||
0x0c, 0xbf, 0x2d, 0x88, 0xc8, 0x47, 0x86, 0x8b, 0x3c, 0x67, 0xfa, 0xe0, 0x40, 0x7c, 0x26, 0xab, |
||||
0x89, 0x86, 0x5e, 0x1b, 0xaf, 0xd0, 0xf8, 0x8f, 0x01, 0x43, 0x95, 0xc4, 0x01, 0x00, 0x15, 0x84, |
||||
0xcc, 0xb5, 0x80, 0x14, 0x3d, 0x9d, 0x9e, 0x1f, 0x08, 0xdc, 0x08, 0x42, 0x14, 0x7f, 0x3d, 0x48, |
||||
0x6c, 0xda, 0x05, 0xf8, 0x02, 0xee, 0x52, 0xb1, 0x49, 0xf3, 0x7a, 0xbe, 0x3b, 0x1f, 0x5d, 0x0f, |
||||
0x12, 0x47, 0x67, 0x7b, 0xa8, 0xe1, 0x75, 0x49, 0x8b, 0x16, 0x32, 0xe5, 0xe0, 0x12, 0xd2, 0x59, |
||||
0x0d, 0x3d, 0x05, 0x48, 0x19, 0xeb, 0xc6, 0x38, 0x71, 0x91, 0x77, 0x47, 0x1e, 0x25, 0x73, 0x1a, |
||||
0x78, 0xa3, 0x54, 0x44, 0xc6, 0x5b, 0x64, 0xa8, 0xac, 0x3e, 0x3c, 0xb2, 0xc7, 0x56, 0x5e, 0x64, |
||||
0xbc, 0x77, 0x49, 0xca, 0xa6, 0xeb, 0xb5, 0x54, 0xef, 0xa1, 0xcb, 0xa8, 0x6c, 0x78, 0xef, 0x92, |
||||
0x74, 0x41, 0x68, 0xc1, 0xc9, 0xba, 0xa4, 0xcb, 0x71, 0x00, 0x76, 0x4f, 0x60, 0x1f, 0x2c, 0x25, |
||||
0xd6, 0xdd, 0xe8, 0xb1, 0xa5, 0xb7, 0xd4, 0xb3, 0x47, 0x60, 0xf7, 0x4b, 0xc4, 0xa7, 0x00, 0x37, |
||||
0xb7, 0x51, 0x34, 0x9f, 0xbd, 0x8d, 0x6e, 0x2f, 0xcf, 0x06, 0xe1, 0x0f, 0x04, 0xf7, 0x33, 0xb6, |
||||
0xd9, 0x97, 0x08, 0x1d, 0xed, 0x26, 0x96, 0x71, 0x8c, 0xbe, 0xbc, 0x28, 0x4a, 0xfe, 0x55, 0xa4, |
||||
0x7e, 0xc6, 0x36, 0x93, 0x82, 0x91, 0x05, 0x2d, 0x76, 0x4f, 0xb1, 0xe2, 0xdb, 0x2a, 0x6f, 0xda, |
||||
0x17, 0x19, 0xe8, 0x4f, 0x95, 0xfe, 0x45, 0xe8, 0x97, 0x61, 0x5e, 0xc5, 0xe1, 0x6f, 0xe3, 0xc9, |
||||
0x95, 0x16, 0x8f, 0xbb, 0xf9, 0x3e, 0xe7, 0x84, 0xbc, 0xa7, 0xec, 0x3b, 0xfd, 0x24, 0x3b, 0x53, |
||||
0x4b, 0x49, 0xbd, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x1b, 0x59, 0xf8, 0xe5, 0x02, 0x00, |
||||
0x00, |
||||
} |
||||
@ -0,0 +1,96 @@ |
||||
// Protocol Buffers - Google's data interchange format |
||||
// Copyright 2008 Google Inc. All rights reserved. |
||||
// https://developers.google.com/protocol-buffers/ |
||||
// |
||||
// Redistribution and use in source and binary forms, with or without |
||||
// modification, are permitted provided that the following conditions are |
||||
// met: |
||||
// |
||||
// * Redistributions of source code must retain the above copyright |
||||
// notice, this list of conditions and the following disclaimer. |
||||
// * Redistributions in binary form must reproduce the above |
||||
// copyright notice, this list of conditions and the following disclaimer |
||||
// in the documentation and/or other materials provided with the |
||||
// distribution. |
||||
// * Neither the name of Google Inc. nor the names of its |
||||
// contributors may be used to endorse or promote products derived from |
||||
// this software without specific prior written permission. |
||||
// |
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package google.protobuf; |
||||
|
||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes"; |
||||
option cc_enable_arenas = true; |
||||
option go_package = "github.com/golang/protobuf/ptypes/struct;structpb"; |
||||
option java_package = "com.google.protobuf"; |
||||
option java_outer_classname = "StructProto"; |
||||
option java_multiple_files = true; |
||||
option objc_class_prefix = "GPB"; |
||||
|
||||
|
||||
// `Struct` represents a structured data value, consisting of fields |
||||
// which map to dynamically typed values. In some languages, `Struct` |
||||
// might be supported by a native representation. For example, in |
||||
// scripting languages like JS a struct is represented as an |
||||
// object. The details of that representation are described together |
||||
// with the proto support for the language. |
||||
// |
||||
// The JSON representation for `Struct` is JSON object. |
||||
message Struct { |
||||
// Unordered map of dynamically typed values. |
||||
map<string, Value> fields = 1; |
||||
} |
||||
|
||||
// `Value` represents a dynamically typed value which can be either |
||||
// null, a number, a string, a boolean, a recursive struct value, or a |
||||
// list of values. A producer of value is expected to set one of that |
||||
// variants, absence of any variant indicates an error. |
||||
// |
||||
// The JSON representation for `Value` is JSON value. |
||||
message Value { |
||||
// The kind of value. |
||||
oneof kind { |
||||
// Represents a null value. |
||||
NullValue null_value = 1; |
||||
// Represents a double value. |
||||
double number_value = 2; |
||||
// Represents a string value. |
||||
string string_value = 3; |
||||
// Represents a boolean value. |
||||
bool bool_value = 4; |
||||
// Represents a structured value. |
||||
Struct struct_value = 5; |
||||
// Represents a repeated `Value`. |
||||
ListValue list_value = 6; |
||||
} |
||||
} |
||||
|
||||
// `NullValue` is a singleton enumeration to represent the null value for the |
||||
// `Value` type union. |
||||
// |
||||
// The JSON representation for `NullValue` is JSON `null`. |
||||
enum NullValue { |
||||
// Null value. |
||||
NULL_VALUE = 0; |
||||
} |
||||
|
||||
// `ListValue` is a wrapper around a repeated field of values. |
||||
// |
||||
// The JSON representation for `ListValue` is JSON array. |
||||
message ListValue { |
||||
// Repeated field of dynamically typed values. |
||||
repeated Value values = 1; |
||||
} |
||||
@ -0,0 +1,9 @@ |
||||
language: go |
||||
|
||||
go: |
||||
- 1.4.3 |
||||
- 1.5.3 |
||||
- tip |
||||
|
||||
script: |
||||
- go test -v ./... |
||||
@ -0,0 +1,10 @@ |
||||
# How to contribute |
||||
|
||||
We definitely welcome patches and contribution to this project! |
||||
|
||||
### Legal requirements |
||||
|
||||
In order to protect both you and ourselves, you will need to sign the |
||||
[Contributor License Agreement](https://cla.developers.google.com/clas). |
||||
|
||||
You may have already signed it for other Google projects. |
||||
@ -0,0 +1,9 @@ |
||||
Paul Borman <borman@google.com> |
||||
bmatsuo |
||||
shawnps |
||||
theory |
||||
jboverfelt |
||||
dsymonds |
||||
cd1 |
||||
wallclockbuilder |
||||
dansouza |
||||
@ -0,0 +1,27 @@ |
||||
Copyright (c) 2009,2014 Google Inc. All rights reserved. |
||||
|
||||
Redistribution and use in source and binary forms, with or without |
||||
modification, are permitted provided that the following conditions are |
||||
met: |
||||
|
||||
* Redistributions of source code must retain the above copyright |
||||
notice, this list of conditions and the following disclaimer. |
||||
* Redistributions in binary form must reproduce the above |
||||
copyright notice, this list of conditions and the following disclaimer |
||||
in the documentation and/or other materials provided with the |
||||
distribution. |
||||
* Neither the name of Google Inc. nor the names of its |
||||
contributors may be used to endorse or promote products derived from |
||||
this software without specific prior written permission. |
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||||
@ -0,0 +1,19 @@ |
||||
# uuid  |
||||
The uuid package generates and inspects UUIDs based on |
||||
[RFC 4122](http://tools.ietf.org/html/rfc4122) |
||||
and DCE 1.1: Authentication and Security Services. |
||||
|
||||
This package is based on the github.com/pborman/uuid package (previously named |
||||
code.google.com/p/go-uuid). It differs from these earlier packages in that |
||||
a UUID is a 16 byte array rather than a byte slice. One loss due to this |
||||
change is the ability to represent an invalid UUID (vs a NIL UUID). |
||||
|
||||
###### Install |
||||
`go get github.com/google/uuid` |
||||
|
||||
###### Documentation |
||||
[](http://godoc.org/github.com/google/uuid) |
||||
|
||||
Full `go doc` style documentation for the package can be viewed online without |
||||
installing this package by using the GoDoc site here: |
||||
http://godoc.org/github.com/google/uuid |
||||
@ -0,0 +1,80 @@ |
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid |
||||
|
||||
import ( |
||||
"encoding/binary" |
||||
"fmt" |
||||
"os" |
||||
) |
||||
|
||||
// A Domain represents a Version 2 domain
|
||||
type Domain byte |
||||
|
||||
// Domain constants for DCE Security (Version 2) UUIDs.
|
||||
const ( |
||||
Person = Domain(0) |
||||
Group = Domain(1) |
||||
Org = Domain(2) |
||||
) |
||||
|
||||
// NewDCESecurity returns a DCE Security (Version 2) UUID.
|
||||
//
|
||||
// The domain should be one of Person, Group or Org.
|
||||
// On a POSIX system the id should be the users UID for the Person
|
||||
// domain and the users GID for the Group. The meaning of id for
|
||||
// the domain Org or on non-POSIX systems is site defined.
|
||||
//
|
||||
// For a given domain/id pair the same token may be returned for up to
|
||||
// 7 minutes and 10 seconds.
|
||||
func NewDCESecurity(domain Domain, id uint32) (UUID, error) { |
||||
uuid, err := NewUUID() |
||||
if err == nil { |
||||
uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2
|
||||
uuid[9] = byte(domain) |
||||
binary.BigEndian.PutUint32(uuid[0:], id) |
||||
} |
||||
return uuid, err |
||||
} |
||||
|
||||
// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
|
||||
// domain with the id returned by os.Getuid.
|
||||
//
|
||||
// NewDCESecurity(Person, uint32(os.Getuid()))
|
||||
func NewDCEPerson() (UUID, error) { |
||||
return NewDCESecurity(Person, uint32(os.Getuid())) |
||||
} |
||||
|
||||
// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
|
||||
// domain with the id returned by os.Getgid.
|
||||
//
|
||||
// NewDCESecurity(Group, uint32(os.Getgid()))
|
||||
func NewDCEGroup() (UUID, error) { |
||||
return NewDCESecurity(Group, uint32(os.Getgid())) |
||||
} |
||||
|
||||
// Domain returns the domain for a Version 2 UUID. Domains are only defined
|
||||
// for Version 2 UUIDs.
|
||||
func (uuid UUID) Domain() Domain { |
||||
return Domain(uuid[9]) |
||||
} |
||||
|
||||
// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2
|
||||
// UUIDs.
|
||||
func (uuid UUID) ID() uint32 { |
||||
return binary.BigEndian.Uint32(uuid[0:4]) |
||||
} |
||||
|
||||
func (d Domain) String() string { |
||||
switch d { |
||||
case Person: |
||||
return "Person" |
||||
case Group: |
||||
return "Group" |
||||
case Org: |
||||
return "Org" |
||||
} |
||||
return fmt.Sprintf("Domain%d", int(d)) |
||||
} |
||||
@ -0,0 +1,12 @@ |
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package uuid generates and inspects UUIDs.
|
||||
//
|
||||
// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security
|
||||
// Services.
|
||||
//
|
||||
// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to
|
||||
// maps or compared directly.
|
||||
package uuid |
||||
@ -0,0 +1 @@ |
||||
module github.com/google/uuid |
||||
@ -0,0 +1,53 @@ |
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid |
||||
|
||||
import ( |
||||
"crypto/md5" |
||||
"crypto/sha1" |
||||
"hash" |
||||
) |
||||
|
||||
// Well known namespace IDs and UUIDs
|
||||
var ( |
||||
NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) |
||||
NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) |
||||
NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) |
||||
NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) |
||||
Nil UUID // empty UUID, all zeros
|
||||
) |
||||
|
||||
// NewHash returns a new UUID derived from the hash of space concatenated with
|
||||
// data generated by h. The hash should be at least 16 byte in length. The
|
||||
// first 16 bytes of the hash are used to form the UUID. The version of the
|
||||
// UUID will be the lower 4 bits of version. NewHash is used to implement
|
||||
// NewMD5 and NewSHA1.
|
||||
func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { |
||||
h.Reset() |
||||
h.Write(space[:]) |
||||
h.Write(data) |
||||
s := h.Sum(nil) |
||||
var uuid UUID |
||||
copy(uuid[:], s) |
||||
uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) |
||||
uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
|
||||
return uuid |
||||
} |
||||
|
||||
// NewMD5 returns a new MD5 (Version 3) UUID based on the
|
||||
// supplied name space and data. It is the same as calling:
|
||||
//
|
||||
// NewHash(md5.New(), space, data, 3)
|
||||
func NewMD5(space UUID, data []byte) UUID { |
||||
return NewHash(md5.New(), space, data, 3) |
||||
} |
||||
|
||||
// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
|
||||
// supplied name space and data. It is the same as calling:
|
||||
//
|
||||
// NewHash(sha1.New(), space, data, 5)
|
||||
func NewSHA1(space UUID, data []byte) UUID { |
||||
return NewHash(sha1.New(), space, data, 5) |
||||
} |
||||
@ -0,0 +1,37 @@ |
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid |
||||
|
||||
import "fmt" |
||||
|
||||
// MarshalText implements encoding.TextMarshaler.
|
||||
func (uuid UUID) MarshalText() ([]byte, error) { |
||||
var js [36]byte |
||||
encodeHex(js[:], uuid) |
||||
return js[:], nil |
||||
} |
||||
|
||||
// UnmarshalText implements encoding.TextUnmarshaler.
|
||||
func (uuid *UUID) UnmarshalText(data []byte) error { |
||||
id, err := ParseBytes(data) |
||||
if err == nil { |
||||
*uuid = id |
||||
} |
||||
return err |
||||
} |
||||
|
||||
// MarshalBinary implements encoding.BinaryMarshaler.
|
||||
func (uuid UUID) MarshalBinary() ([]byte, error) { |
||||
return uuid[:], nil |
||||
} |
||||
|
||||
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
|
||||
func (uuid *UUID) UnmarshalBinary(data []byte) error { |
||||
if len(data) != 16 { |
||||
return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) |
||||
} |
||||
copy(uuid[:], data) |
||||
return nil |
||||
} |
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue