Merge pull request #2557 from prometheus/influxdb-read
Add InfluxDB read-back support to remote storage bridgepull/2562/head
commit
eda4286484
@ -0,0 +1,609 @@ |
||||
// Package client (v2) is the current official Go client for InfluxDB.
|
||||
package client // import "github.com/influxdata/influxdb/client/v2"
|
||||
|
||||
import ( |
||||
"bytes" |
||||
"crypto/tls" |
||||
"encoding/json" |
||||
"errors" |
||||
"fmt" |
||||
"io" |
||||
"io/ioutil" |
||||
"net/http" |
||||
"net/url" |
||||
"strconv" |
||||
"strings" |
||||
"time" |
||||
|
||||
"github.com/influxdata/influxdb/models" |
||||
) |
||||
|
||||
// HTTPConfig is the config data needed to create an HTTP Client.
|
||||
type HTTPConfig struct { |
||||
// Addr should be of the form "http://host:port"
|
||||
// or "http://[ipv6-host%zone]:port".
|
||||
Addr string |
||||
|
||||
// Username is the influxdb username, optional.
|
||||
Username string |
||||
|
||||
// Password is the influxdb password, optional.
|
||||
Password string |
||||
|
||||
// UserAgent is the http User Agent, defaults to "InfluxDBClient".
|
||||
UserAgent string |
||||
|
||||
// Timeout for influxdb writes, defaults to no timeout.
|
||||
Timeout time.Duration |
||||
|
||||
// InsecureSkipVerify gets passed to the http client, if true, it will
|
||||
// skip https certificate verification. Defaults to false.
|
||||
InsecureSkipVerify bool |
||||
|
||||
// TLSConfig allows the user to set their own TLS config for the HTTP
|
||||
// Client. If set, this option overrides InsecureSkipVerify.
|
||||
TLSConfig *tls.Config |
||||
} |
||||
|
||||
// BatchPointsConfig is the config data needed to create an instance of the BatchPoints struct.
|
||||
type BatchPointsConfig struct { |
||||
// Precision is the write precision of the points, defaults to "ns".
|
||||
Precision string |
||||
|
||||
// Database is the database to write points to.
|
||||
Database string |
||||
|
||||
// RetentionPolicy is the retention policy of the points.
|
||||
RetentionPolicy string |
||||
|
||||
// Write consistency is the number of servers required to confirm write.
|
||||
WriteConsistency string |
||||
} |
||||
|
||||
// Client is a client interface for writing & querying the database.
|
||||
type Client interface { |
||||
// Ping checks that status of cluster, and will always return 0 time and no
|
||||
// error for UDP clients.
|
||||
Ping(timeout time.Duration) (time.Duration, string, error) |
||||
|
||||
// Write takes a BatchPoints object and writes all Points to InfluxDB.
|
||||
Write(bp BatchPoints) error |
||||
|
||||
// Query makes an InfluxDB Query on the database. This will fail if using
|
||||
// the UDP client.
|
||||
Query(q Query) (*Response, error) |
||||
|
||||
// Close releases any resources a Client may be using.
|
||||
Close() error |
||||
} |
||||
|
||||
// NewHTTPClient returns a new Client from the provided config.
|
||||
// Client is safe for concurrent use by multiple goroutines.
|
||||
func NewHTTPClient(conf HTTPConfig) (Client, error) { |
||||
if conf.UserAgent == "" { |
||||
conf.UserAgent = "InfluxDBClient" |
||||
} |
||||
|
||||
u, err := url.Parse(conf.Addr) |
||||
if err != nil { |
||||
return nil, err |
||||
} else if u.Scheme != "http" && u.Scheme != "https" { |
||||
m := fmt.Sprintf("Unsupported protocol scheme: %s, your address"+ |
||||
" must start with http:// or https://", u.Scheme) |
||||
return nil, errors.New(m) |
||||
} |
||||
|
||||
tr := &http.Transport{ |
||||
TLSClientConfig: &tls.Config{ |
||||
InsecureSkipVerify: conf.InsecureSkipVerify, |
||||
}, |
||||
} |
||||
if conf.TLSConfig != nil { |
||||
tr.TLSClientConfig = conf.TLSConfig |
||||
} |
||||
return &client{ |
||||
url: *u, |
||||
username: conf.Username, |
||||
password: conf.Password, |
||||
useragent: conf.UserAgent, |
||||
httpClient: &http.Client{ |
||||
Timeout: conf.Timeout, |
||||
Transport: tr, |
||||
}, |
||||
transport: tr, |
||||
}, nil |
||||
} |
||||
|
||||
// Ping will check to see if the server is up with an optional timeout on waiting for leader.
|
||||
// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred.
|
||||
func (c *client) Ping(timeout time.Duration) (time.Duration, string, error) { |
||||
now := time.Now() |
||||
u := c.url |
||||
u.Path = "ping" |
||||
|
||||
req, err := http.NewRequest("GET", u.String(), nil) |
||||
if err != nil { |
||||
return 0, "", err |
||||
} |
||||
|
||||
req.Header.Set("User-Agent", c.useragent) |
||||
|
||||
if c.username != "" { |
||||
req.SetBasicAuth(c.username, c.password) |
||||
} |
||||
|
||||
if timeout > 0 { |
||||
params := req.URL.Query() |
||||
params.Set("wait_for_leader", fmt.Sprintf("%.0fs", timeout.Seconds())) |
||||
req.URL.RawQuery = params.Encode() |
||||
} |
||||
|
||||
resp, err := c.httpClient.Do(req) |
||||
if err != nil { |
||||
return 0, "", err |
||||
} |
||||
defer resp.Body.Close() |
||||
|
||||
body, err := ioutil.ReadAll(resp.Body) |
||||
if err != nil { |
||||
return 0, "", err |
||||
} |
||||
|
||||
if resp.StatusCode != http.StatusNoContent { |
||||
var err = fmt.Errorf(string(body)) |
||||
return 0, "", err |
||||
} |
||||
|
||||
version := resp.Header.Get("X-Influxdb-Version") |
||||
return time.Since(now), version, nil |
||||
} |
||||
|
||||
// Close releases the client's resources.
|
||||
func (c *client) Close() error { |
||||
c.transport.CloseIdleConnections() |
||||
return nil |
||||
} |
||||
|
||||
// client is safe for concurrent use as the fields are all read-only
|
||||
// once the client is instantiated.
|
||||
type client struct { |
||||
// N.B - if url.UserInfo is accessed in future modifications to the
|
||||
// methods on client, you will need to syncronise access to url.
|
||||
url url.URL |
||||
username string |
||||
password string |
||||
useragent string |
||||
httpClient *http.Client |
||||
transport *http.Transport |
||||
} |
||||
|
||||
// BatchPoints is an interface into a batched grouping of points to write into
|
||||
// InfluxDB together. BatchPoints is NOT thread-safe, you must create a separate
|
||||
// batch for each goroutine.
|
||||
type BatchPoints interface { |
||||
// AddPoint adds the given point to the Batch of points.
|
||||
AddPoint(p *Point) |
||||
// AddPoints adds the given points to the Batch of points.
|
||||
AddPoints(ps []*Point) |
||||
// Points lists the points in the Batch.
|
||||
Points() []*Point |
||||
|
||||
// Precision returns the currently set precision of this Batch.
|
||||
Precision() string |
||||
// SetPrecision sets the precision of this batch.
|
||||
SetPrecision(s string) error |
||||
|
||||
// Database returns the currently set database of this Batch.
|
||||
Database() string |
||||
// SetDatabase sets the database of this Batch.
|
||||
SetDatabase(s string) |
||||
|
||||
// WriteConsistency returns the currently set write consistency of this Batch.
|
||||
WriteConsistency() string |
||||
// SetWriteConsistency sets the write consistency of this Batch.
|
||||
SetWriteConsistency(s string) |
||||
|
||||
// RetentionPolicy returns the currently set retention policy of this Batch.
|
||||
RetentionPolicy() string |
||||
// SetRetentionPolicy sets the retention policy of this Batch.
|
||||
SetRetentionPolicy(s string) |
||||
} |
||||
|
||||
// NewBatchPoints returns a BatchPoints interface based on the given config.
|
||||
func NewBatchPoints(conf BatchPointsConfig) (BatchPoints, error) { |
||||
if conf.Precision == "" { |
||||
conf.Precision = "ns" |
||||
} |
||||
if _, err := time.ParseDuration("1" + conf.Precision); err != nil { |
||||
return nil, err |
||||
} |
||||
bp := &batchpoints{ |
||||
database: conf.Database, |
||||
precision: conf.Precision, |
||||
retentionPolicy: conf.RetentionPolicy, |
||||
writeConsistency: conf.WriteConsistency, |
||||
} |
||||
return bp, nil |
||||
} |
||||
|
||||
type batchpoints struct { |
||||
points []*Point |
||||
database string |
||||
precision string |
||||
retentionPolicy string |
||||
writeConsistency string |
||||
} |
||||
|
||||
func (bp *batchpoints) AddPoint(p *Point) { |
||||
bp.points = append(bp.points, p) |
||||
} |
||||
|
||||
func (bp *batchpoints) AddPoints(ps []*Point) { |
||||
bp.points = append(bp.points, ps...) |
||||
} |
||||
|
||||
func (bp *batchpoints) Points() []*Point { |
||||
return bp.points |
||||
} |
||||
|
||||
func (bp *batchpoints) Precision() string { |
||||
return bp.precision |
||||
} |
||||
|
||||
func (bp *batchpoints) Database() string { |
||||
return bp.database |
||||
} |
||||
|
||||
func (bp *batchpoints) WriteConsistency() string { |
||||
return bp.writeConsistency |
||||
} |
||||
|
||||
func (bp *batchpoints) RetentionPolicy() string { |
||||
return bp.retentionPolicy |
||||
} |
||||
|
||||
func (bp *batchpoints) SetPrecision(p string) error { |
||||
if _, err := time.ParseDuration("1" + p); err != nil { |
||||
return err |
||||
} |
||||
bp.precision = p |
||||
return nil |
||||
} |
||||
|
||||
func (bp *batchpoints) SetDatabase(db string) { |
||||
bp.database = db |
||||
} |
||||
|
||||
func (bp *batchpoints) SetWriteConsistency(wc string) { |
||||
bp.writeConsistency = wc |
||||
} |
||||
|
||||
func (bp *batchpoints) SetRetentionPolicy(rp string) { |
||||
bp.retentionPolicy = rp |
||||
} |
||||
|
||||
// Point represents a single data point.
|
||||
type Point struct { |
||||
pt models.Point |
||||
} |
||||
|
||||
// NewPoint returns a point with the given timestamp. If a timestamp is not
|
||||
// given, then data is sent to the database without a timestamp, in which case
|
||||
// the server will assign local time upon reception. NOTE: it is recommended to
|
||||
// send data with a timestamp.
|
||||
func NewPoint( |
||||
name string, |
||||
tags map[string]string, |
||||
fields map[string]interface{}, |
||||
t ...time.Time, |
||||
) (*Point, error) { |
||||
var T time.Time |
||||
if len(t) > 0 { |
||||
T = t[0] |
||||
} |
||||
|
||||
pt, err := models.NewPoint(name, models.NewTags(tags), fields, T) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &Point{ |
||||
pt: pt, |
||||
}, nil |
||||
} |
||||
|
||||
// String returns a line-protocol string of the Point.
|
||||
func (p *Point) String() string { |
||||
return p.pt.String() |
||||
} |
||||
|
||||
// PrecisionString returns a line-protocol string of the Point,
|
||||
// with the timestamp formatted for the given precision.
|
||||
func (p *Point) PrecisionString(precison string) string { |
||||
return p.pt.PrecisionString(precison) |
||||
} |
||||
|
||||
// Name returns the measurement name of the point.
|
||||
func (p *Point) Name() string { |
||||
return p.pt.Name() |
||||
} |
||||
|
||||
// Tags returns the tags associated with the point.
|
||||
func (p *Point) Tags() map[string]string { |
||||
return p.pt.Tags().Map() |
||||
} |
||||
|
||||
// Time return the timestamp for the point.
|
||||
func (p *Point) Time() time.Time { |
||||
return p.pt.Time() |
||||
} |
||||
|
||||
// UnixNano returns timestamp of the point in nanoseconds since Unix epoch.
|
||||
func (p *Point) UnixNano() int64 { |
||||
return p.pt.UnixNano() |
||||
} |
||||
|
||||
// Fields returns the fields for the point.
|
||||
func (p *Point) Fields() (map[string]interface{}, error) { |
||||
return p.pt.Fields() |
||||
} |
||||
|
||||
// NewPointFrom returns a point from the provided models.Point.
|
||||
func NewPointFrom(pt models.Point) *Point { |
||||
return &Point{pt: pt} |
||||
} |
||||
|
||||
func (c *client) Write(bp BatchPoints) error { |
||||
var b bytes.Buffer |
||||
|
||||
for _, p := range bp.Points() { |
||||
if _, err := b.WriteString(p.pt.PrecisionString(bp.Precision())); err != nil { |
||||
return err |
||||
} |
||||
|
||||
if err := b.WriteByte('\n'); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
|
||||
u := c.url |
||||
u.Path = "write" |
||||
req, err := http.NewRequest("POST", u.String(), &b) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
req.Header.Set("Content-Type", "") |
||||
req.Header.Set("User-Agent", c.useragent) |
||||
if c.username != "" { |
||||
req.SetBasicAuth(c.username, c.password) |
||||
} |
||||
|
||||
params := req.URL.Query() |
||||
params.Set("db", bp.Database()) |
||||
params.Set("rp", bp.RetentionPolicy()) |
||||
params.Set("precision", bp.Precision()) |
||||
params.Set("consistency", bp.WriteConsistency()) |
||||
req.URL.RawQuery = params.Encode() |
||||
|
||||
resp, err := c.httpClient.Do(req) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer resp.Body.Close() |
||||
|
||||
body, err := ioutil.ReadAll(resp.Body) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { |
||||
var err = fmt.Errorf(string(body)) |
||||
return err |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// Query defines a query to send to the server.
|
||||
type Query struct { |
||||
Command string |
||||
Database string |
||||
Precision string |
||||
Chunked bool |
||||
ChunkSize int |
||||
Parameters map[string]interface{} |
||||
} |
||||
|
||||
// NewQuery returns a query object.
|
||||
// The database and precision arguments can be empty strings if they are not needed for the query.
|
||||
func NewQuery(command, database, precision string) Query { |
||||
return Query{ |
||||
Command: command, |
||||
Database: database, |
||||
Precision: precision, |
||||
Parameters: make(map[string]interface{}), |
||||
} |
||||
} |
||||
|
||||
// NewQueryWithParameters returns a query object.
|
||||
// The database and precision arguments can be empty strings if they are not needed for the query.
|
||||
// parameters is a map of the parameter names used in the command to their values.
|
||||
func NewQueryWithParameters(command, database, precision string, parameters map[string]interface{}) Query { |
||||
return Query{ |
||||
Command: command, |
||||
Database: database, |
||||
Precision: precision, |
||||
Parameters: parameters, |
||||
} |
||||
} |
||||
|
||||
// Response represents a list of statement results.
|
||||
type Response struct { |
||||
Results []Result |
||||
Err string `json:"error,omitempty"` |
||||
} |
||||
|
||||
// Error returns the first error from any statement.
|
||||
// It returns nil if no errors occurred on any statements.
|
||||
func (r *Response) Error() error { |
||||
if r.Err != "" { |
||||
return fmt.Errorf(r.Err) |
||||
} |
||||
for _, result := range r.Results { |
||||
if result.Err != "" { |
||||
return fmt.Errorf(result.Err) |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// Message represents a user message.
|
||||
type Message struct { |
||||
Level string |
||||
Text string |
||||
} |
||||
|
||||
// Result represents a resultset returned from a single statement.
|
||||
type Result struct { |
||||
Series []models.Row |
||||
Messages []*Message |
||||
Err string `json:"error,omitempty"` |
||||
} |
||||
|
||||
// Query sends a command to the server and returns the Response.
|
||||
func (c *client) Query(q Query) (*Response, error) { |
||||
u := c.url |
||||
u.Path = "query" |
||||
|
||||
jsonParameters, err := json.Marshal(q.Parameters) |
||||
|
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
req, err := http.NewRequest("POST", u.String(), nil) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
req.Header.Set("Content-Type", "") |
||||
req.Header.Set("User-Agent", c.useragent) |
||||
|
||||
if c.username != "" { |
||||
req.SetBasicAuth(c.username, c.password) |
||||
} |
||||
|
||||
params := req.URL.Query() |
||||
params.Set("q", q.Command) |
||||
params.Set("db", q.Database) |
||||
params.Set("params", string(jsonParameters)) |
||||
if q.Chunked { |
||||
params.Set("chunked", "true") |
||||
if q.ChunkSize > 0 { |
||||
params.Set("chunk_size", strconv.Itoa(q.ChunkSize)) |
||||
} |
||||
} |
||||
|
||||
if q.Precision != "" { |
||||
params.Set("epoch", q.Precision) |
||||
} |
||||
req.URL.RawQuery = params.Encode() |
||||
|
||||
resp, err := c.httpClient.Do(req) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
defer resp.Body.Close() |
||||
|
||||
var response Response |
||||
if q.Chunked { |
||||
cr := NewChunkedResponse(resp.Body) |
||||
for { |
||||
r, err := cr.NextResponse() |
||||
if err != nil { |
||||
// If we got an error while decoding the response, send that back.
|
||||
return nil, err |
||||
} |
||||
|
||||
if r == nil { |
||||
break |
||||
} |
||||
|
||||
response.Results = append(response.Results, r.Results...) |
||||
if r.Err != "" { |
||||
response.Err = r.Err |
||||
break |
||||
} |
||||
} |
||||
} else { |
||||
dec := json.NewDecoder(resp.Body) |
||||
dec.UseNumber() |
||||
decErr := dec.Decode(&response) |
||||
|
||||
// ignore this error if we got an invalid status code
|
||||
if decErr != nil && decErr.Error() == "EOF" && resp.StatusCode != http.StatusOK { |
||||
decErr = nil |
||||
} |
||||
// If we got a valid decode error, send that back
|
||||
if decErr != nil { |
||||
return nil, fmt.Errorf("unable to decode json: received status code %d err: %s", resp.StatusCode, decErr) |
||||
} |
||||
} |
||||
// If we don't have an error in our json response, and didn't get statusOK
|
||||
// then send back an error
|
||||
if resp.StatusCode != http.StatusOK && response.Error() == nil { |
||||
return &response, fmt.Errorf("received status code %d from server", |
||||
resp.StatusCode) |
||||
} |
||||
return &response, nil |
||||
} |
||||
|
||||
// duplexReader reads responses and writes it to another writer while
|
||||
// satisfying the reader interface.
|
||||
type duplexReader struct { |
||||
r io.Reader |
||||
w io.Writer |
||||
} |
||||
|
||||
func (r *duplexReader) Read(p []byte) (n int, err error) { |
||||
n, err = r.r.Read(p) |
||||
if err == nil { |
||||
r.w.Write(p[:n]) |
||||
} |
||||
return n, err |
||||
} |
||||
|
||||
// ChunkedResponse represents a response from the server that
|
||||
// uses chunking to stream the output.
|
||||
type ChunkedResponse struct { |
||||
dec *json.Decoder |
||||
duplex *duplexReader |
||||
buf bytes.Buffer |
||||
} |
||||
|
||||
// NewChunkedResponse reads a stream and produces responses from the stream.
|
||||
func NewChunkedResponse(r io.Reader) *ChunkedResponse { |
||||
resp := &ChunkedResponse{} |
||||
resp.duplex = &duplexReader{r: r, w: &resp.buf} |
||||
resp.dec = json.NewDecoder(resp.duplex) |
||||
resp.dec.UseNumber() |
||||
return resp |
||||
} |
||||
|
||||
// NextResponse reads the next line of the stream and returns a response.
|
||||
func (r *ChunkedResponse) NextResponse() (*Response, error) { |
||||
var response Response |
||||
|
||||
if err := r.dec.Decode(&response); err != nil { |
||||
if err == io.EOF { |
||||
return nil, nil |
||||
} |
||||
// A decoding error happened. This probably means the server crashed
|
||||
// and sent a last-ditch error message to us. Ensure we have read the
|
||||
// entirety of the connection to get any remaining error text.
|
||||
io.Copy(ioutil.Discard, r.duplex) |
||||
return nil, errors.New(strings.TrimSpace(r.buf.String())) |
||||
} |
||||
|
||||
r.buf.Reset() |
||||
return &response, nil |
||||
} |
||||
@ -0,0 +1,112 @@ |
||||
package client |
||||
|
||||
import ( |
||||
"fmt" |
||||
"io" |
||||
"net" |
||||
"time" |
||||
) |
||||
|
||||
const ( |
||||
// UDPPayloadSize is a reasonable default payload size for UDP packets that
|
||||
// could be travelling over the internet.
|
||||
UDPPayloadSize = 512 |
||||
) |
||||
|
||||
// UDPConfig is the config data needed to create a UDP Client.
|
||||
type UDPConfig struct { |
||||
// Addr should be of the form "host:port"
|
||||
// or "[ipv6-host%zone]:port".
|
||||
Addr string |
||||
|
||||
// PayloadSize is the maximum size of a UDP client message, optional
|
||||
// Tune this based on your network. Defaults to UDPPayloadSize.
|
||||
PayloadSize int |
||||
} |
||||
|
||||
// NewUDPClient returns a client interface for writing to an InfluxDB UDP
|
||||
// service from the given config.
|
||||
func NewUDPClient(conf UDPConfig) (Client, error) { |
||||
var udpAddr *net.UDPAddr |
||||
udpAddr, err := net.ResolveUDPAddr("udp", conf.Addr) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
conn, err := net.DialUDP("udp", nil, udpAddr) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
payloadSize := conf.PayloadSize |
||||
if payloadSize == 0 { |
||||
payloadSize = UDPPayloadSize |
||||
} |
||||
|
||||
return &udpclient{ |
||||
conn: conn, |
||||
payloadSize: payloadSize, |
||||
}, nil |
||||
} |
||||
|
||||
// Close releases the udpclient's resources.
|
||||
func (uc *udpclient) Close() error { |
||||
return uc.conn.Close() |
||||
} |
||||
|
||||
type udpclient struct { |
||||
conn io.WriteCloser |
||||
payloadSize int |
||||
} |
||||
|
||||
func (uc *udpclient) Write(bp BatchPoints) error { |
||||
var b = make([]byte, 0, uc.payloadSize) // initial buffer size, it will grow as needed
|
||||
var d, _ = time.ParseDuration("1" + bp.Precision()) |
||||
|
||||
var delayedError error |
||||
|
||||
var checkBuffer = func(n int) { |
||||
if len(b) > 0 && len(b)+n > uc.payloadSize { |
||||
if _, err := uc.conn.Write(b); err != nil { |
||||
delayedError = err |
||||
} |
||||
b = b[:0] |
||||
} |
||||
} |
||||
|
||||
for _, p := range bp.Points() { |
||||
p.pt.Round(d) |
||||
pointSize := p.pt.StringSize() + 1 // include newline in size
|
||||
//point := p.pt.RoundedString(d) + "\n"
|
||||
|
||||
checkBuffer(pointSize) |
||||
|
||||
if p.Time().IsZero() || pointSize <= uc.payloadSize { |
||||
b = p.pt.AppendString(b) |
||||
b = append(b, '\n') |
||||
continue |
||||
} |
||||
|
||||
points := p.pt.Split(uc.payloadSize - 1) // account for newline character
|
||||
for _, sp := range points { |
||||
checkBuffer(sp.StringSize() + 1) |
||||
b = sp.AppendString(b) |
||||
b = append(b, '\n') |
||||
} |
||||
} |
||||
|
||||
if len(b) > 0 { |
||||
if _, err := uc.conn.Write(b); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
return delayedError |
||||
} |
||||
|
||||
func (uc *udpclient) Query(q Query) (*Response, error) { |
||||
return nil, fmt.Errorf("Querying via UDP is not supported") |
||||
} |
||||
|
||||
func (uc *udpclient) Ping(timeout time.Duration) (time.Duration, string, error) { |
||||
return 0, "", nil |
||||
} |
||||
@ -0,0 +1,48 @@ |
||||
package models |
||||
|
||||
import ( |
||||
"errors" |
||||
"strings" |
||||
) |
||||
|
||||
// ConsistencyLevel represent a required replication criteria before a write can
|
||||
// be returned as successful.
|
||||
//
|
||||
// The consistency level is handled in open-source InfluxDB but only applicable to clusters.
|
||||
type ConsistencyLevel int |
||||
|
||||
const ( |
||||
// ConsistencyLevelAny allows for hinted handoff, potentially no write happened yet.
|
||||
ConsistencyLevelAny ConsistencyLevel = iota |
||||
|
||||
// ConsistencyLevelOne requires at least one data node acknowledged a write.
|
||||
ConsistencyLevelOne |
||||
|
||||
// ConsistencyLevelQuorum requires a quorum of data nodes to acknowledge a write.
|
||||
ConsistencyLevelQuorum |
||||
|
||||
// ConsistencyLevelAll requires all data nodes to acknowledge a write.
|
||||
ConsistencyLevelAll |
||||
) |
||||
|
||||
var ( |
||||
// ErrInvalidConsistencyLevel is returned when parsing the string version
|
||||
// of a consistency level.
|
||||
ErrInvalidConsistencyLevel = errors.New("invalid consistency level") |
||||
) |
||||
|
||||
// ParseConsistencyLevel converts a consistency level string to the corresponding ConsistencyLevel const.
|
||||
func ParseConsistencyLevel(level string) (ConsistencyLevel, error) { |
||||
switch strings.ToLower(level) { |
||||
case "any": |
||||
return ConsistencyLevelAny, nil |
||||
case "one": |
||||
return ConsistencyLevelOne, nil |
||||
case "quorum": |
||||
return ConsistencyLevelQuorum, nil |
||||
case "all": |
||||
return ConsistencyLevelAll, nil |
||||
default: |
||||
return 0, ErrInvalidConsistencyLevel |
||||
} |
||||
} |
||||
@ -0,0 +1,32 @@ |
||||
package models // import "github.com/influxdata/influxdb/models"
|
||||
|
||||
// from stdlib hash/fnv/fnv.go
|
||||
const ( |
||||
prime64 = 1099511628211 |
||||
offset64 = 14695981039346656037 |
||||
) |
||||
|
||||
// InlineFNV64a is an alloc-free port of the standard library's fnv64a.
|
||||
// See https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function.
|
||||
type InlineFNV64a uint64 |
||||
|
||||
// NewInlineFNV64a returns a new instance of InlineFNV64a.
|
||||
func NewInlineFNV64a() InlineFNV64a { |
||||
return offset64 |
||||
} |
||||
|
||||
// Write adds data to the running hash.
|
||||
func (s *InlineFNV64a) Write(data []byte) (int, error) { |
||||
hash := uint64(*s) |
||||
for _, c := range data { |
||||
hash ^= uint64(c) |
||||
hash *= prime64 |
||||
} |
||||
*s = InlineFNV64a(hash) |
||||
return len(data), nil |
||||
} |
||||
|
||||
// Sum64 returns the uint64 of the current resulting hash.
|
||||
func (s *InlineFNV64a) Sum64() uint64 { |
||||
return uint64(*s) |
||||
} |
||||
@ -0,0 +1,38 @@ |
||||
package models // import "github.com/influxdata/influxdb/models"
|
||||
|
||||
import ( |
||||
"reflect" |
||||
"strconv" |
||||
"unsafe" |
||||
) |
||||
|
||||
// parseIntBytes is a zero-alloc wrapper around strconv.ParseInt.
|
||||
func parseIntBytes(b []byte, base int, bitSize int) (i int64, err error) { |
||||
s := unsafeBytesToString(b) |
||||
return strconv.ParseInt(s, base, bitSize) |
||||
} |
||||
|
||||
// parseFloatBytes is a zero-alloc wrapper around strconv.ParseFloat.
|
||||
func parseFloatBytes(b []byte, bitSize int) (float64, error) { |
||||
s := unsafeBytesToString(b) |
||||
return strconv.ParseFloat(s, bitSize) |
||||
} |
||||
|
||||
// parseBoolBytes is a zero-alloc wrapper around strconv.ParseBool.
|
||||
func parseBoolBytes(b []byte) (bool, error) { |
||||
return strconv.ParseBool(unsafeBytesToString(b)) |
||||
} |
||||
|
||||
// unsafeBytesToString converts a []byte to a string without a heap allocation.
|
||||
//
|
||||
// It is unsafe, and is intended to prepare input to short-lived functions
|
||||
// that require strings.
|
||||
func unsafeBytesToString(in []byte) string { |
||||
src := *(*reflect.SliceHeader)(unsafe.Pointer(&in)) |
||||
dst := reflect.StringHeader{ |
||||
Data: src.Data, |
||||
Len: src.Len, |
||||
} |
||||
s := *(*string)(unsafe.Pointer(&dst)) |
||||
return s |
||||
} |
||||
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,62 @@ |
||||
package models |
||||
|
||||
import ( |
||||
"sort" |
||||
) |
||||
|
||||
// Row represents a single row returned from the execution of a statement.
|
||||
type Row struct { |
||||
Name string `json:"name,omitempty"` |
||||
Tags map[string]string `json:"tags,omitempty"` |
||||
Columns []string `json:"columns,omitempty"` |
||||
Values [][]interface{} `json:"values,omitempty"` |
||||
Partial bool `json:"partial,omitempty"` |
||||
} |
||||
|
||||
// SameSeries returns true if r contains values for the same series as o.
|
||||
func (r *Row) SameSeries(o *Row) bool { |
||||
return r.tagsHash() == o.tagsHash() && r.Name == o.Name |
||||
} |
||||
|
||||
// tagsHash returns a hash of tag key/value pairs.
|
||||
func (r *Row) tagsHash() uint64 { |
||||
h := NewInlineFNV64a() |
||||
keys := r.tagsKeys() |
||||
for _, k := range keys { |
||||
h.Write([]byte(k)) |
||||
h.Write([]byte(r.Tags[k])) |
||||
} |
||||
return h.Sum64() |
||||
} |
||||
|
||||
// tagKeys returns a sorted list of tag keys.
|
||||
func (r *Row) tagsKeys() []string { |
||||
a := make([]string, 0, len(r.Tags)) |
||||
for k := range r.Tags { |
||||
a = append(a, k) |
||||
} |
||||
sort.Strings(a) |
||||
return a |
||||
} |
||||
|
||||
// Rows represents a collection of rows. Rows implements sort.Interface.
|
||||
type Rows []*Row |
||||
|
||||
// Len implements sort.Interface.
|
||||
func (p Rows) Len() int { return len(p) } |
||||
|
||||
// Less implements sort.Interface.
|
||||
func (p Rows) Less(i, j int) bool { |
||||
// Sort by name first.
|
||||
if p[i].Name != p[j].Name { |
||||
return p[i].Name < p[j].Name |
||||
} |
||||
|
||||
// Sort by tag set hash. Tags don't have a meaningful sort order so we
|
||||
// just compute a hash and sort by that instead. This allows the tests
|
||||
// to receive rows in a predictable order every time.
|
||||
return p[i].tagsHash() < p[j].tagsHash() |
||||
} |
||||
|
||||
// Swap implements sort.Interface.
|
||||
func (p Rows) Swap(i, j int) { p[i], p[j] = p[j], p[i] } |
||||
@ -0,0 +1,42 @@ |
||||
package models |
||||
|
||||
// Statistic is the representation of a statistic used by the monitoring service.
|
||||
type Statistic struct { |
||||
Name string `json:"name"` |
||||
Tags map[string]string `json:"tags"` |
||||
Values map[string]interface{} `json:"values"` |
||||
} |
||||
|
||||
// NewStatistic returns an initialized Statistic.
|
||||
func NewStatistic(name string) Statistic { |
||||
return Statistic{ |
||||
Name: name, |
||||
Tags: make(map[string]string), |
||||
Values: make(map[string]interface{}), |
||||
} |
||||
} |
||||
|
||||
// StatisticTags is a map that can be merged with others without causing
|
||||
// mutations to either map.
|
||||
type StatisticTags map[string]string |
||||
|
||||
// Merge creates a new map containing the merged contents of tags and t.
|
||||
// If both tags and the receiver map contain the same key, the value in tags
|
||||
// is used in the resulting map.
|
||||
//
|
||||
// Merge always returns a usable map.
|
||||
func (t StatisticTags) Merge(tags map[string]string) map[string]string { |
||||
// Add everything in tags to the result.
|
||||
out := make(map[string]string, len(tags)) |
||||
for k, v := range tags { |
||||
out[k] = v |
||||
} |
||||
|
||||
// Only add values from t that don't appear in tags.
|
||||
for k, v := range t { |
||||
if _, ok := tags[k]; !ok { |
||||
out[k] = v |
||||
} |
||||
} |
||||
return out |
||||
} |
||||
@ -0,0 +1,74 @@ |
||||
package models |
||||
|
||||
// Helper time methods since parsing time can easily overflow and we only support a
|
||||
// specific time range.
|
||||
|
||||
import ( |
||||
"fmt" |
||||
"math" |
||||
"time" |
||||
) |
||||
|
||||
const ( |
||||
// MinNanoTime is the minumum time that can be represented.
|
||||
//
|
||||
// 1677-09-21 00:12:43.145224194 +0000 UTC
|
||||
//
|
||||
// The two lowest minimum integers are used as sentinel values. The
|
||||
// minimum value needs to be used as a value lower than any other value for
|
||||
// comparisons and another separate value is needed to act as a sentinel
|
||||
// default value that is unusable by the user, but usable internally.
|
||||
// Because these two values need to be used for a special purpose, we do
|
||||
// not allow users to write points at these two times.
|
||||
MinNanoTime = int64(math.MinInt64) + 2 |
||||
|
||||
// MaxNanoTime is the maximum time that can be represented.
|
||||
//
|
||||
// 2262-04-11 23:47:16.854775806 +0000 UTC
|
||||
//
|
||||
// The highest time represented by a nanosecond needs to be used for an
|
||||
// exclusive range in the shard group, so the maximum time needs to be one
|
||||
// less than the possible maximum number of nanoseconds representable by an
|
||||
// int64 so that we don't lose a point at that one time.
|
||||
MaxNanoTime = int64(math.MaxInt64) - 1 |
||||
) |
||||
|
||||
var ( |
||||
minNanoTime = time.Unix(0, MinNanoTime).UTC() |
||||
maxNanoTime = time.Unix(0, MaxNanoTime).UTC() |
||||
|
||||
// ErrTimeOutOfRange gets returned when time is out of the representable range using int64 nanoseconds since the epoch.
|
||||
ErrTimeOutOfRange = fmt.Errorf("time outside range %d - %d", MinNanoTime, MaxNanoTime) |
||||
) |
||||
|
||||
// SafeCalcTime safely calculates the time given. Will return error if the time is outside the
|
||||
// supported range.
|
||||
func SafeCalcTime(timestamp int64, precision string) (time.Time, error) { |
||||
mult := GetPrecisionMultiplier(precision) |
||||
if t, ok := safeSignedMult(timestamp, mult); ok { |
||||
tme := time.Unix(0, t).UTC() |
||||
return tme, CheckTime(tme) |
||||
} |
||||
|
||||
return time.Time{}, ErrTimeOutOfRange |
||||
} |
||||
|
||||
// CheckTime checks that a time is within the safe range.
|
||||
func CheckTime(t time.Time) error { |
||||
if t.Before(minNanoTime) || t.After(maxNanoTime) { |
||||
return ErrTimeOutOfRange |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// Perform the multiplication and check to make sure it didn't overflow.
|
||||
func safeSignedMult(a, b int64) (int64, bool) { |
||||
if a == 0 || b == 0 || a == 1 || b == 1 { |
||||
return a * b, true |
||||
} |
||||
if a == MinNanoTime || b == MaxNanoTime { |
||||
return 0, false |
||||
} |
||||
c := a * b |
||||
return c, c/b == a |
||||
} |
||||
@ -0,0 +1,111 @@ |
||||
// Package escape contains utilities for escaping parts of InfluxQL
|
||||
// and InfluxDB line protocol.
|
||||
package escape // import "github.com/influxdata/influxdb/pkg/escape"
|
||||
|
||||
import ( |
||||
"bytes" |
||||
"strings" |
||||
) |
||||
|
||||
// Codes is a map of bytes to be escaped.
|
||||
var Codes = map[byte][]byte{ |
||||
',': []byte(`\,`), |
||||
'"': []byte(`\"`), |
||||
' ': []byte(`\ `), |
||||
'=': []byte(`\=`), |
||||
} |
||||
|
||||
// Bytes escapes characters on the input slice, as defined by Codes.
|
||||
func Bytes(in []byte) []byte { |
||||
for b, esc := range Codes { |
||||
in = bytes.Replace(in, []byte{b}, esc, -1) |
||||
} |
||||
return in |
||||
} |
||||
|
||||
const escapeChars = `," =` |
||||
|
||||
// IsEscaped returns whether b has any escaped characters,
|
||||
// i.e. whether b seems to have been processed by Bytes.
|
||||
func IsEscaped(b []byte) bool { |
||||
for len(b) > 0 { |
||||
i := bytes.IndexByte(b, '\\') |
||||
if i < 0 { |
||||
return false |
||||
} |
||||
|
||||
if i+1 < len(b) && strings.IndexByte(escapeChars, b[i+1]) >= 0 { |
||||
return true |
||||
} |
||||
b = b[i+1:] |
||||
} |
||||
return false |
||||
} |
||||
|
||||
// AppendUnescaped appends the unescaped version of src to dst
|
||||
// and returns the resulting slice.
|
||||
func AppendUnescaped(dst, src []byte) []byte { |
||||
var pos int |
||||
for len(src) > 0 { |
||||
next := bytes.IndexByte(src[pos:], '\\') |
||||
if next < 0 || pos+next+1 >= len(src) { |
||||
return append(dst, src...) |
||||
} |
||||
|
||||
if pos+next+1 < len(src) && strings.IndexByte(escapeChars, src[pos+next+1]) >= 0 { |
||||
if pos+next > 0 { |
||||
dst = append(dst, src[:pos+next]...) |
||||
} |
||||
src = src[pos+next+1:] |
||||
pos = 0 |
||||
} else { |
||||
pos += next + 1 |
||||
} |
||||
} |
||||
|
||||
return dst |
||||
} |
||||
|
||||
// Unescape returns a new slice containing the unescaped version of in.
|
||||
func Unescape(in []byte) []byte { |
||||
if len(in) == 0 { |
||||
return nil |
||||
} |
||||
|
||||
if bytes.IndexByte(in, '\\') == -1 { |
||||
return in |
||||
} |
||||
|
||||
i := 0 |
||||
inLen := len(in) |
||||
var out []byte |
||||
|
||||
for { |
||||
if i >= inLen { |
||||
break |
||||
} |
||||
if in[i] == '\\' && i+1 < inLen { |
||||
switch in[i+1] { |
||||
case ',': |
||||
out = append(out, ',') |
||||
i += 2 |
||||
continue |
||||
case '"': |
||||
out = append(out, '"') |
||||
i += 2 |
||||
continue |
||||
case ' ': |
||||
out = append(out, ' ') |
||||
i += 2 |
||||
continue |
||||
case '=': |
||||
out = append(out, '=') |
||||
i += 2 |
||||
continue |
||||
} |
||||
} |
||||
out = append(out, in[i]) |
||||
i += 1 |
||||
} |
||||
return out |
||||
} |
||||
@ -0,0 +1,21 @@ |
||||
package escape |
||||
|
||||
import "strings" |
||||
|
||||
var ( |
||||
escaper = strings.NewReplacer(`,`, `\,`, `"`, `\"`, ` `, `\ `, `=`, `\=`) |
||||
unescaper = strings.NewReplacer(`\,`, `,`, `\"`, `"`, `\ `, ` `, `\=`, `=`) |
||||
) |
||||
|
||||
// UnescapeString returns unescaped version of in.
|
||||
func UnescapeString(in string) string { |
||||
if strings.IndexByte(in, '\\') == -1 { |
||||
return in |
||||
} |
||||
return unescaper.Replace(in) |
||||
} |
||||
|
||||
// String returns the escaped version of in.
|
||||
func String(in string) string { |
||||
return escaper.Replace(in) |
||||
} |
||||
@ -1,180 +0,0 @@ |
||||
package client |
||||
|
||||
import ( |
||||
"bytes" |
||||
"fmt" |
||||
"io/ioutil" |
||||
"net/http" |
||||
"net/url" |
||||
"time" |
||||
|
||||
"github.com/influxdb/influxdb/tsdb" |
||||
) |
||||
|
||||
const ( |
||||
// DefaultTimeout is the default connection timeout used to connect to an InfluxDB instance
|
||||
DefaultTimeout = 0 |
||||
) |
||||
|
||||
// Config is used to specify what server to connect to.
|
||||
// URL: The URL of the server connecting to.
|
||||
// Username/Password are optional. They will be passed via basic auth if provided.
|
||||
// UserAgent: If not provided, will default "InfluxDBClient",
|
||||
// Timeout: If not provided, will default to 0 (no timeout)
|
||||
type Config struct { |
||||
URL url.URL |
||||
Username string |
||||
Password string |
||||
UserAgent string |
||||
Timeout time.Duration |
||||
Precision string |
||||
} |
||||
|
||||
// NewConfig will create a config to be used in connecting to the client
|
||||
func NewConfig() Config { |
||||
return Config{ |
||||
Timeout: DefaultTimeout, |
||||
} |
||||
} |
||||
|
||||
// Client is used to make calls to the server.
|
||||
type Client struct { |
||||
url url.URL |
||||
username string |
||||
password string |
||||
httpClient *http.Client |
||||
userAgent string |
||||
precision string |
||||
} |
||||
|
||||
const ( |
||||
ConsistencyOne = "one" |
||||
ConsistencyAll = "all" |
||||
ConsistencyQuorum = "quorum" |
||||
ConsistencyAny = "any" |
||||
) |
||||
|
||||
// NewClient will instantiate and return a connected client to issue commands to the server.
|
||||
func NewClient(c Config) (*Client, error) { |
||||
client := Client{ |
||||
url: c.URL, |
||||
username: c.Username, |
||||
password: c.Password, |
||||
httpClient: &http.Client{Timeout: c.Timeout}, |
||||
userAgent: c.UserAgent, |
||||
precision: c.Precision, |
||||
} |
||||
if client.userAgent == "" { |
||||
client.userAgent = "InfluxDBClient" |
||||
} |
||||
return &client, nil |
||||
} |
||||
|
||||
// Write takes BatchPoints and allows for writing of multiple points with defaults
|
||||
// If successful, error is nil and Response is nil
|
||||
// If an error occurs, Response may contain additional information if populated.
|
||||
func (c *Client) Write(bp BatchPoints) (*Response, error) { |
||||
u := c.url |
||||
u.Path = "write" |
||||
|
||||
var b bytes.Buffer |
||||
for _, p := range bp.Points { |
||||
if p.Raw != "" { |
||||
if _, err := b.WriteString(p.Raw); err != nil { |
||||
return nil, err |
||||
} |
||||
} else { |
||||
for k, v := range bp.Tags { |
||||
if p.Tags == nil { |
||||
p.Tags = make(map[string]string, len(bp.Tags)) |
||||
} |
||||
p.Tags[k] = v |
||||
} |
||||
|
||||
if _, err := b.WriteString(p.MarshalString()); err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
|
||||
if err := b.WriteByte('\n'); err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
|
||||
req, err := http.NewRequest("POST", u.String(), &b) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
req.Header.Set("Content-Type", "") |
||||
req.Header.Set("User-Agent", c.userAgent) |
||||
if c.username != "" { |
||||
req.SetBasicAuth(c.username, c.password) |
||||
} |
||||
params := req.URL.Query() |
||||
params.Set("db", bp.Database) |
||||
params.Set("rp", bp.RetentionPolicy) |
||||
params.Set("precision", bp.Precision) |
||||
params.Set("consistency", bp.WriteConsistency) |
||||
req.URL.RawQuery = params.Encode() |
||||
|
||||
resp, err := c.httpClient.Do(req) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
defer resp.Body.Close() |
||||
|
||||
var response Response |
||||
body, err := ioutil.ReadAll(resp.Body) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { |
||||
var err = fmt.Errorf(string(body)) |
||||
response.Err = err |
||||
return &response, err |
||||
} |
||||
|
||||
return nil, nil |
||||
} |
||||
|
||||
// Structs
|
||||
|
||||
// Response represents a list of statement results.
|
||||
type Response struct { |
||||
Err error |
||||
} |
||||
|
||||
// Point defines the fields that will be written to the database
|
||||
// Measurement, Time, and Fields are required
|
||||
// Precision can be specified if the time is in epoch format (integer).
|
||||
// Valid values for Precision are n, u, ms, s, m, and h
|
||||
type Point struct { |
||||
Measurement string |
||||
Tags map[string]string |
||||
Time time.Time |
||||
Fields map[string]interface{} |
||||
Precision string |
||||
Raw string |
||||
} |
||||
|
||||
func (p *Point) MarshalString() string { |
||||
return tsdb.NewPoint(p.Measurement, p.Tags, p.Fields, p.Time).String() |
||||
} |
||||
|
||||
// BatchPoints is used to send batched data in a single write.
|
||||
// Database and Points are required
|
||||
// If no retention policy is specified, it will use the databases default retention policy.
|
||||
// If tags are specified, they will be "merged" with all points. If a point already has that tag, it is ignored.
|
||||
// If time is specified, it will be applied to any point with an empty time.
|
||||
// Precision can be specified if the time is in epoch format (integer).
|
||||
// Valid values for Precision are n, u, ms, s, m, and h
|
||||
type BatchPoints struct { |
||||
Points []Point `json:"points,omitempty"` |
||||
Database string `json:"database,omitempty"` |
||||
RetentionPolicy string `json:"retentionPolicy,omitempty"` |
||||
Tags map[string]string `json:"tags,omitempty"` |
||||
Time time.Time `json:"time,omitempty"` |
||||
Precision string `json:"precision,omitempty"` |
||||
WriteConsistency string `json:"-"` |
||||
} |
||||
File diff suppressed because it is too large
Load Diff
Loading…
Reference in new issue