fix(deps): update module github.com/ncw/swift to v2 (#13951)

Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Paul Rogers <paul.rogers@grafana.com>
pull/13317/head^2
renovate[bot] 9 months ago committed by GitHub
parent 96b5c79e77
commit 246a1dfbe2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 3
      go.mod
  2. 2
      go.sum
  3. 70
      pkg/storage/chunk/client/openstack/swift_object_client.go
  4. 4
      vendor/github.com/ncw/swift/v2/.gitignore
  5. 56
      vendor/github.com/ncw/swift/v2/.golangci.yml
  6. 20
      vendor/github.com/ncw/swift/v2/COPYING
  7. 172
      vendor/github.com/ncw/swift/v2/README.md
  8. 336
      vendor/github.com/ncw/swift/v2/auth.go
  9. 300
      vendor/github.com/ncw/swift/v2/auth_v3.go
  10. 29
      vendor/github.com/ncw/swift/v2/compatibility_1_0.go
  11. 25
      vendor/github.com/ncw/swift/v2/compatibility_1_1.go
  12. 24
      vendor/github.com/ncw/swift/v2/compatibility_1_6.go
  13. 14
      vendor/github.com/ncw/swift/v2/compatibility_not_1_6.go
  14. 158
      vendor/github.com/ncw/swift/v2/dlo.go
  15. 18
      vendor/github.com/ncw/swift/v2/doc.go
  16. 57
      vendor/github.com/ncw/swift/v2/integration_test.sh
  17. 487
      vendor/github.com/ncw/swift/v2/largeobjects.go
  18. 174
      vendor/github.com/ncw/swift/v2/meta.go
  19. 55
      vendor/github.com/ncw/swift/v2/notes.txt
  20. 185
      vendor/github.com/ncw/swift/v2/slo.go
  21. 2347
      vendor/github.com/ncw/swift/v2/swift.go
  22. 58
      vendor/github.com/ncw/swift/v2/timeout_reader.go
  23. 22
      vendor/github.com/ncw/swift/v2/travis_realserver.sh
  24. 55
      vendor/github.com/ncw/swift/v2/watchdog_reader.go
  25. 3
      vendor/modules.txt

@ -72,7 +72,6 @@ require (
github.com/mitchellh/mapstructure v1.5.0
github.com/modern-go/reflect2 v1.0.2
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
github.com/ncw/swift v1.0.53
github.com/oklog/run v1.1.0
github.com/oklog/ulid v1.3.1
github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e
@ -132,6 +131,7 @@ require (
github.com/heroku/x v0.0.61
github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db
github.com/ncw/swift/v2 v2.0.2
github.com/prometheus/alertmanager v0.27.0
github.com/prometheus/common/sigv4 v0.1.0
github.com/richardartoul/molecule v1.0.0
@ -169,6 +169,7 @@ require (
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/sys/userns v0.1.0 // indirect
github.com/ncw/swift v1.0.53 // indirect
github.com/pires/go-proxyproto v0.7.0 // indirect
github.com/pkg/xattr v0.4.10 // indirect
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect

@ -1497,6 +1497,8 @@ github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OS
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms=
github.com/ncw/swift v1.0.53 h1:luHjjTNtekIEvHg5KdAFIBaH7bWfNkefwFnpDffSIks=
github.com/ncw/swift v1.0.53/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
github.com/ncw/swift/v2 v2.0.2 h1:jx282pcAKFhmoZBSdMcCRFn9VWkoBIRsCpe+yZq7vEk=
github.com/ncw/swift/v2 v2.0.2/go.mod h1:z0A9RVdYPjNjXVo2pDOPxZ4eu3oarO1P91fTItcb+Kg=
github.com/newrelic/newrelic-telemetry-sdk-go v0.2.0/go.mod h1:G9MqE/cHGv3Hx3qpYhfuyFUsGx2DpVcGi1iJIqTg+JQ=
github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2/go.mod h1:TLb2Sg7HQcgGdloNxkrmtgDNR9uVYF3lfdFIN4Ro6Sk=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=

@ -9,7 +9,7 @@ import (
"net/http"
"time"
"github.com/ncw/swift"
swift "github.com/ncw/swift/v2"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
@ -61,7 +61,7 @@ func NewSwiftObjectClient(cfg SwiftConfig, hedgingCfg hedging.Config) (*SwiftObj
return nil, err
}
// Ensure the container is created, no error is returned if it already exists.
if err := c.ContainerCreate(cfg.ContainerName, nil); err != nil {
if err := c.ContainerCreate(context.Background(), cfg.Config.ContainerName, nil); err != nil {
return nil, err
}
hedging, err := createConnection(cfg, hedgingCfg, true)
@ -78,30 +78,30 @@ func NewSwiftObjectClient(cfg SwiftConfig, hedgingCfg hedging.Config) (*SwiftObj
func createConnection(cfg SwiftConfig, hedgingCfg hedging.Config, hedging bool) (*swift.Connection, error) {
// Create a connection
c := &swift.Connection{
AuthVersion: cfg.AuthVersion,
AuthUrl: cfg.AuthURL,
Internal: cfg.Internal,
ApiKey: cfg.Password,
UserName: cfg.Username,
UserId: cfg.UserID,
Retries: cfg.MaxRetries,
ConnectTimeout: cfg.ConnectTimeout,
Timeout: cfg.RequestTimeout,
TenantId: cfg.ProjectID,
Tenant: cfg.ProjectName,
TenantDomain: cfg.ProjectDomainName,
TenantDomainId: cfg.ProjectDomainID,
Domain: cfg.DomainName,
DomainId: cfg.DomainID,
Region: cfg.RegionName,
AuthVersion: cfg.Config.AuthVersion,
AuthUrl: cfg.Config.AuthURL,
Internal: cfg.Config.Internal,
ApiKey: cfg.Config.Password,
UserName: cfg.Config.Username,
UserId: cfg.Config.UserID,
Retries: cfg.Config.MaxRetries,
ConnectTimeout: cfg.Config.ConnectTimeout,
Timeout: cfg.Config.RequestTimeout,
TenantId: cfg.Config.ProjectID,
Tenant: cfg.Config.ProjectName,
TenantDomain: cfg.Config.ProjectDomainName,
TenantDomainId: cfg.Config.ProjectDomainID,
Domain: cfg.Config.DomainName,
DomainId: cfg.Config.DomainID,
Region: cfg.Config.RegionName,
Transport: defaultTransport,
}
switch {
case cfg.UserDomainName != "":
c.Domain = cfg.UserDomainName
case cfg.UserDomainID != "":
c.DomainId = cfg.UserDomainID
case cfg.Config.UserDomainName != "":
c.Domain = cfg.Config.UserDomainName
case cfg.Config.UserDomainID != "":
c.DomainId = cfg.Config.UserDomainID
}
if hedging {
var err error
@ -111,7 +111,7 @@ func createConnection(cfg SwiftConfig, hedgingCfg hedging.Config, hedging bool)
}
}
err := c.Authenticate()
err := c.Authenticate(context.TODO())
if err != nil {
return nil, err
}
@ -124,8 +124,8 @@ func (s *SwiftObjectClient) Stop() {
s.hedgingConn.UnAuthenticate()
}
func (s *SwiftObjectClient) ObjectExists(_ context.Context, objectKey string) (bool, error) {
_, _, err := s.hedgingConn.Object(s.cfg.ContainerName, objectKey)
func (s *SwiftObjectClient) ObjectExists(ctx context.Context, objectKey string) (bool, error) {
_, _, err := s.hedgingConn.Object(ctx, s.cfg.Config.ContainerName, objectKey)
if err != nil {
return false, err
}
@ -134,9 +134,9 @@ func (s *SwiftObjectClient) ObjectExists(_ context.Context, objectKey string) (b
}
// GetObject returns a reader and the size for the specified object key from the configured swift container.
func (s *SwiftObjectClient) GetObject(_ context.Context, objectKey string) (io.ReadCloser, int64, error) {
func (s *SwiftObjectClient) GetObject(ctx context.Context, objectKey string) (io.ReadCloser, int64, error) {
var buf bytes.Buffer
_, err := s.hedgingConn.ObjectGet(s.cfg.ContainerName, objectKey, &buf, false, nil)
_, err := s.hedgingConn.ObjectGet(ctx, s.cfg.Config.ContainerName, objectKey, &buf, false, nil)
if err != nil {
return nil, 0, err
}
@ -145,12 +145,12 @@ func (s *SwiftObjectClient) GetObject(_ context.Context, objectKey string) (io.R
}
// GetObject returns a reader and the size for the specified object key from the configured swift container.
func (s *SwiftObjectClient) GetObjectRange(_ context.Context, objectKey string, offset, length int64) (io.ReadCloser, error) {
func (s *SwiftObjectClient) GetObjectRange(ctx context.Context, objectKey string, offset, length int64) (io.ReadCloser, error) {
var buf bytes.Buffer
h := swift.Headers{
"Range": fmt.Sprintf("bytes=%d-%d", offset, offset+length-1),
}
_, err := s.hedgingConn.ObjectGet(s.cfg.ContainerName, objectKey, &buf, false, h)
_, err := s.hedgingConn.ObjectGet(ctx, s.cfg.Config.ContainerName, objectKey, &buf, false, h)
if err != nil {
return nil, err
}
@ -159,13 +159,13 @@ func (s *SwiftObjectClient) GetObjectRange(_ context.Context, objectKey string,
}
// PutObject puts the specified bytes into the configured Swift container at the provided key
func (s *SwiftObjectClient) PutObject(_ context.Context, objectKey string, object io.Reader) error {
_, err := s.conn.ObjectPut(s.cfg.ContainerName, objectKey, object, false, "", "", nil)
func (s *SwiftObjectClient) PutObject(ctx context.Context, objectKey string, object io.Reader) error {
_, err := s.conn.ObjectPut(ctx, s.cfg.Config.ContainerName, objectKey, object, false, "", "", nil)
return err
}
// List only objects from the store non-recursively
func (s *SwiftObjectClient) List(_ context.Context, prefix, delimiter string) ([]client.StorageObject, []client.StorageCommonPrefix, error) {
func (s *SwiftObjectClient) List(ctx context.Context, prefix, delimiter string) ([]client.StorageObject, []client.StorageCommonPrefix, error) {
if len(delimiter) > 1 {
return nil, nil, fmt.Errorf("delimiter must be a single character but was %s", delimiter)
}
@ -177,7 +177,7 @@ func (s *SwiftObjectClient) List(_ context.Context, prefix, delimiter string) ([
opts.Delimiter = []rune(delimiter)[0]
}
objs, err := s.conn.ObjectsAll(s.cfg.ContainerName, opts)
objs, err := s.conn.ObjectsAll(ctx, s.cfg.Config.ContainerName, opts)
if err != nil {
return nil, nil, err
}
@ -203,8 +203,8 @@ func (s *SwiftObjectClient) List(_ context.Context, prefix, delimiter string) ([
}
// DeleteObject deletes the specified object key from the configured Swift container.
func (s *SwiftObjectClient) DeleteObject(_ context.Context, objectKey string) error {
return s.conn.ObjectDelete(s.cfg.ContainerName, objectKey)
func (s *SwiftObjectClient) DeleteObject(ctx context.Context, objectKey string) error {
return s.conn.ObjectDelete(ctx, s.cfg.Config.ContainerName, objectKey)
}
// IsObjectNotFoundErr returns true if error means that object is not found. Relevant to GetObject and DeleteObject operations.

@ -0,0 +1,4 @@
*~
*.pyc
test-env*
junk/

@ -0,0 +1,56 @@
# golangci-lint configuration options
linters:
enable:
- errcheck
- goimports
- revive
- ineffassign
- govet
- unconvert
- staticcheck
- gosimple
- stylecheck
- unused
- misspell
#- prealloc
#- maligned
disable-all: true
issues:
# Enable some lints excluded by default
exclude-use-default: false
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
max-issues-per-linter: 0
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
max-same-issues: 0
exclude-rules:
- linters:
- staticcheck
text: 'SA1019: "github.com/rclone/rclone/cmd/serve/httplib" is deprecated'
run:
# timeout for analysis, e.g. 30s, 5m, default is 1m
timeout: 10m
linters-settings:
revive:
rules:
- name: unreachable-code
disabled: true
- name: unused-parameter
disabled: true
- name: empty-block
disabled: true
- name: redefines-builtin-id
disabled: true
- name: superfluous-else
disabled: true
stylecheck:
# Only enable the checks performed by the staticcheck stand-alone tool,
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]

@ -0,0 +1,20 @@
Copyright (C) 2012 by Nick Craig-Wood http://www.craig-wood.com/nick/
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

@ -0,0 +1,172 @@
Swift
=====
This package provides an easy to use library for interfacing with Swift / Openstack Object Storage / Rackspace cloud
files from the Go Language
[![Build Status](https://github.com/ncw/swift/workflows/build/badge.svg?branch=master)](https://github.com/ncw/swift/actions)
[![Go Reference](https://pkg.go.dev/badge/github.com/ncw/v2/swift.svg)](https://pkg.go.dev/github.com/ncw/swift/v2)
Install
-------
Use go to install the library
go get github.com/ncw/swift/v2
Usage
-----
See here for full package docs
- https://pkg.go.dev/github.com/ncw/swift/v2
Here is a short example from the docs
```go
import "github.com/ncw/swift/v2"
// Create a connection
c := swift.Connection{
UserName: "user",
ApiKey: "key",
AuthUrl: "auth_url",
Domain: "domain", // Name of the domain (v3 auth only)
Tenant: "tenant", // Name of the tenant (v2 auth only)
}
// Authenticate
err := c.Authenticate()
if err != nil {
panic(err)
}
// List all the containers
containers, err := c.ContainerNames(nil)
fmt.Println(containers)
// etc...
```
Migrating from `v1`
-----
The library has current major version v2. If you want to migrate from the first version of
library `github.com/ncw/swift` you have to explicitly add the `/v2` suffix to the imports.
Most of the exported functions were added a new `context.Context` parameter in the `v2`, which you will have to provide
when migrating.
Additions
---------
The `rs` sub project contains a wrapper for the Rackspace specific CDN Management interface.
Testing
-------
To run the tests you can either use an embedded fake Swift server either use a real Openstack Swift server or a
Rackspace Cloud files account.
When using a real Swift server, you need to set these environment variables before running the tests
export SWIFT_API_USER='user'
export SWIFT_API_KEY='key'
export SWIFT_AUTH_URL='https://url.of.auth.server/v1.0'
And optionally these if using v2 authentication
export SWIFT_TENANT='TenantName'
export SWIFT_TENANT_ID='TenantId'
And optionally these if using v3 authentication
export SWIFT_TENANT='TenantName'
export SWIFT_TENANT_ID='TenantId'
export SWIFT_API_DOMAIN_ID='domain id'
export SWIFT_API_DOMAIN='domain name'
And optionally these if using v3 trust
export SWIFT_TRUST_ID='TrustId'
And optionally this if you want to skip server certificate validation
export SWIFT_AUTH_INSECURE=1
And optionally this to configure the connect channel timeout, in seconds
export SWIFT_CONNECTION_CHANNEL_TIMEOUT=60
And optionally this to configure the data channel timeout, in seconds
export SWIFT_DATA_CHANNEL_TIMEOUT=60
Then run the tests with `go test`
License
-------
This is free software under the terms of MIT license (check COPYING file included in this package).
Contact and support
-------------------
The project website is at:
- https://github.com/ncw/swift
There you can file bug reports, ask for help or contribute patches.
Authors
-------
- Nick Craig-Wood <nick@craig-wood.com>
Contributors
------------
- Brian "bojo" Jones <mojobojo@gmail.com>
- Janika Liiv <janika@toggl.com>
- Yamamoto, Hirotaka <ymmt2005@gmail.com>
- Stephen <yo@groks.org>
- platformpurple <stephen@platformpurple.com>
- Paul Querna <pquerna@apache.org>
- Livio Soares <liviobs@gmail.com>
- thesyncim <thesyncim@gmail.com>
- lsowen <lsowen@s1network.com> <logan@s1network.com>
- Sylvain Baubeau <sbaubeau@redhat.com>
- Chris Kastorff <encryptio@gmail.com>
- Dai HaoJun <haojun.dai@hp.com>
- Hua Wang <wanghua.humble@gmail.com>
- Fabian Ruff <fabian@progra.de> <fabian.ruff@sap.com>
- Arturo Reuschenbach Puncernau <reuschenbach@gmail.com>
- Petr Kotek <petr.kotek@bigcommerce.com>
- Stefan Majewsky <stefan.majewsky@sap.com> <majewsky@gmx.net>
- Cezar Sa Espinola <cezarsa@gmail.com>
- Sam Gunaratne <samgzeit@gmail.com>
- Richard Scothern <richard.scothern@gmail.com>
- Michel Couillard <!--<couillard.michel@voxlog.ca>--> <michel.couillard@gmail.com>
- Christopher Waldon <ckwaldon@us.ibm.com>
- dennis <dai.haojun@gmail.com>
- hag <hannes.georg@xing.com>
- Alexander Neumann <alexander@bumpern.de>
- eclipseo <30413512+eclipseo@users.noreply.github.com>
- Yuri Per <yuri@acronis.com>
- Falk Reimann <falk.reimann@sap.com>
- Arthur Paim Arnold <arthurpaimarnold@gmail.com>
- Bruno Michel <bmichel@menfin.info>
- Charles Hsu <charles0126@gmail.com>
- Omar Ali <omarali@users.noreply.github.com>
- Andreas Andersen <andreas@softwaredesign.se>
- kayrus <kay.diam@gmail.com>
- CodeLingo Bot <bot@codelingo.io>
- Jérémy Clerc <jeremy.clerc@tagpay.fr>
- 4xicom <37339705+4xicom@users.noreply.github.com>
- Bo <bo@4xi.com>
- Thiago da Silva <thiagodasilva@users.noreply.github.com>
- Brandon WELSCH <dev@brandon-welsch.eu>
- Damien Tournoud <damien@platform.sh>
- Pedro Kiefer <pedro@kiefer.com.br>
- Martin Chodur <m.chodur@seznam.cz>
- Devendra <devendranath.thadi3@gmail.com>
- timss <timsateroy@gmail.com>
- Jos Houtman <jos@houtman.it>
- Paul Collins <paul.collins@canonical.com>
- Joe Cai <joe.cai@bigcommerce.com>

@ -0,0 +1,336 @@
package swift
import (
"bytes"
"context"
"encoding/json"
"net/http"
"net/url"
"strings"
"time"
)
// Auth defines the operations needed to authenticate with swift
//
// This encapsulates the different authentication schemes in use
type Authenticator interface {
// Request creates an http.Request for the auth - return nil if not needed
Request(context.Context, *Connection) (*http.Request, error)
// Response parses the http.Response
Response(ctx context.Context, resp *http.Response) error
// The public storage URL - set Internal to true to read
// internal/service net URL
StorageUrl(Internal bool) string
// The access token
Token() string
// The CDN url if available
CdnUrl() string
}
// Expireser is an optional interface to read the expiration time of the token
type Expireser interface {
Expires() time.Time
}
type CustomEndpointAuthenticator interface {
StorageUrlForEndpoint(endpointType EndpointType) string
}
type EndpointType string
const (
// Use public URL as storage URL
EndpointTypePublic = EndpointType("public")
// Use internal URL as storage URL
EndpointTypeInternal = EndpointType("internal")
// Use admin URL as storage URL
EndpointTypeAdmin = EndpointType("admin")
)
// newAuth - create a new Authenticator from the AuthUrl
//
// A hint for AuthVersion can be provided
func newAuth(c *Connection) (Authenticator, error) {
AuthVersion := c.AuthVersion
if AuthVersion == 0 {
if strings.Contains(c.AuthUrl, "v3") {
AuthVersion = 3
} else if strings.Contains(c.AuthUrl, "v2") {
AuthVersion = 2
} else if strings.Contains(c.AuthUrl, "v1") {
AuthVersion = 1
} else {
return nil, newErrorf(500, "Can't find AuthVersion in AuthUrl - set explicitly")
}
}
switch AuthVersion {
case 1:
return &v1Auth{}, nil
case 2:
return &v2Auth{
// Guess as to whether using API key or
// password it will try both eventually so
// this is just an optimization.
useApiKey: len(c.ApiKey) >= 32,
}, nil
case 3:
return &v3Auth{}, nil
}
return nil, newErrorf(500, "Auth Version %d not supported", AuthVersion)
}
// ------------------------------------------------------------
// v1 auth
type v1Auth struct {
Headers http.Header // V1 auth: the authentication headers so extensions can access them
}
// v1 Authentication - make request
func (auth *v1Auth) Request(ctx context.Context, c *Connection) (*http.Request, error) {
req, err := http.NewRequestWithContext(ctx, "GET", c.AuthUrl, nil)
if err != nil {
return nil, err
}
req.Header.Set("User-Agent", c.UserAgent)
req.Header.Set("X-Auth-Key", c.ApiKey)
req.Header.Set("X-Auth-User", c.UserName)
return req, nil
}
// v1 Authentication - read response
func (auth *v1Auth) Response(_ context.Context, resp *http.Response) error {
auth.Headers = resp.Header
return nil
}
// v1 Authentication - read storage url
func (auth *v1Auth) StorageUrl(Internal bool) string {
storageUrl := auth.Headers.Get("X-Storage-Url")
if Internal {
newUrl, err := url.Parse(storageUrl)
if err != nil {
return storageUrl
}
newUrl.Host = "snet-" + newUrl.Host
storageUrl = newUrl.String()
}
return storageUrl
}
// v1 Authentication - read auth token
func (auth *v1Auth) Token() string {
return auth.Headers.Get("X-Auth-Token")
}
// v1 Authentication - read cdn url
func (auth *v1Auth) CdnUrl() string {
return auth.Headers.Get("X-CDN-Management-Url")
}
// ------------------------------------------------------------
// v2 Authentication
type v2Auth struct {
Auth *v2AuthResponse
Region string
useApiKey bool // if set will use API key not Password
useApiKeyOk bool // if set won't change useApiKey any more
notFirst bool // set after first run
}
// v2 Authentication - make request
func (auth *v2Auth) Request(ctx context.Context, c *Connection) (*http.Request, error) {
auth.Region = c.Region
// Toggle useApiKey if not first run and not OK yet
if auth.notFirst && !auth.useApiKeyOk {
auth.useApiKey = !auth.useApiKey
}
auth.notFirst = true
// Create a V2 auth request for the body of the connection
var v2i interface{}
if !auth.useApiKey {
// Normal swift authentication
v2 := v2AuthRequest{}
v2.Auth.PasswordCredentials.UserName = c.UserName
v2.Auth.PasswordCredentials.Password = c.ApiKey
v2.Auth.Tenant = c.Tenant
v2.Auth.TenantId = c.TenantId
v2i = v2
} else {
// Rackspace special with API Key
v2 := v2AuthRequestRackspace{}
v2.Auth.ApiKeyCredentials.UserName = c.UserName
v2.Auth.ApiKeyCredentials.ApiKey = c.ApiKey
v2.Auth.Tenant = c.Tenant
v2.Auth.TenantId = c.TenantId
v2i = v2
}
body, err := json.Marshal(v2i)
if err != nil {
return nil, err
}
url := c.AuthUrl
if !strings.HasSuffix(url, "/") {
url += "/"
}
url += "tokens"
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(body))
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("User-Agent", c.UserAgent)
return req, nil
}
// v2 Authentication - read response
func (auth *v2Auth) Response(_ context.Context, resp *http.Response) error {
auth.Auth = new(v2AuthResponse)
err := readJson(resp, auth.Auth)
// If successfully read Auth then no need to toggle useApiKey any more
if err == nil {
auth.useApiKeyOk = true
}
return err
}
// Finds the Endpoint Url of "type" from the v2AuthResponse using the
// Region if set or defaulting to the first one if not
//
// Returns "" if not found
func (auth *v2Auth) endpointUrl(Type string, endpointType EndpointType) string {
for _, catalog := range auth.Auth.Access.ServiceCatalog {
if catalog.Type == Type {
for _, endpoint := range catalog.Endpoints {
if auth.Region == "" || (auth.Region == endpoint.Region) {
switch endpointType {
case EndpointTypeInternal:
return endpoint.InternalUrl
case EndpointTypePublic:
return endpoint.PublicUrl
case EndpointTypeAdmin:
return endpoint.AdminUrl
default:
return ""
}
}
}
}
}
return ""
}
// v2 Authentication - read storage url
//
// If Internal is true then it reads the private (internal / service
// net) URL.
func (auth *v2Auth) StorageUrl(Internal bool) string {
endpointType := EndpointTypePublic
if Internal {
endpointType = EndpointTypeInternal
}
return auth.StorageUrlForEndpoint(endpointType)
}
// v2 Authentication - read storage url
//
// Use the indicated endpointType to choose a URL.
func (auth *v2Auth) StorageUrlForEndpoint(endpointType EndpointType) string {
return auth.endpointUrl("object-store", endpointType)
}
// v2 Authentication - read auth token
func (auth *v2Auth) Token() string {
return auth.Auth.Access.Token.Id
}
// v2 Authentication - read expires
func (auth *v2Auth) Expires() time.Time {
t, err := time.Parse(time.RFC3339, auth.Auth.Access.Token.Expires)
if err != nil {
return time.Time{} // return Zero if not parsed
}
return t
}
// v2 Authentication - read cdn url
func (auth *v2Auth) CdnUrl() string {
return auth.endpointUrl("rax:object-cdn", EndpointTypePublic)
}
// ------------------------------------------------------------
// V2 Authentication request
//
// http://docs.openstack.org/developer/keystone/api_curl_examples.html
// http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/curl_auth.html
// http://docs.openstack.org/api/openstack-identity-service/2.0/content/POST_authenticate_v2.0_tokens_.html
type v2AuthRequest struct {
Auth struct {
PasswordCredentials struct {
UserName string `json:"username"`
Password string `json:"password"`
} `json:"passwordCredentials"`
Tenant string `json:"tenantName,omitempty"`
TenantId string `json:"tenantId,omitempty"`
} `json:"auth"`
}
// V2 Authentication request - Rackspace variant
//
// http://docs.openstack.org/developer/keystone/api_curl_examples.html
// http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/curl_auth.html
// http://docs.openstack.org/api/openstack-identity-service/2.0/content/POST_authenticate_v2.0_tokens_.html
type v2AuthRequestRackspace struct {
Auth struct {
ApiKeyCredentials struct {
UserName string `json:"username"`
ApiKey string `json:"apiKey"`
} `json:"RAX-KSKEY:apiKeyCredentials"`
Tenant string `json:"tenantName,omitempty"`
TenantId string `json:"tenantId,omitempty"`
} `json:"auth"`
}
// V2 Authentication reply
//
// http://docs.openstack.org/developer/keystone/api_curl_examples.html
// http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/curl_auth.html
// http://docs.openstack.org/api/openstack-identity-service/2.0/content/POST_authenticate_v2.0_tokens_.html
type v2AuthResponse struct {
Access struct {
ServiceCatalog []struct {
Endpoints []struct {
InternalUrl string
PublicUrl string
AdminUrl string
Region string
TenantId string
}
Name string
Type string
}
Token struct {
Expires string
Id string
Tenant struct {
Id string
Name string
}
}
User struct {
DefaultRegion string `json:"RAX-AUTH:defaultRegion"`
Id string
Name string
Roles []struct {
Description string
Id string
Name string
TenantId string
}
}
}
}

@ -0,0 +1,300 @@
package swift
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"strings"
"time"
)
const (
v3AuthMethodToken = "token"
v3AuthMethodPassword = "password"
v3AuthMethodApplicationCredential = "application_credential"
)
// V3 Authentication request
// http://docs.openstack.org/developer/keystone/api_curl_examples.html
// http://developer.openstack.org/api-ref-identity-v3.html
type v3AuthRequest struct {
Auth struct {
Identity struct {
Methods []string `json:"methods"`
Password *v3AuthPassword `json:"password,omitempty"`
Token *v3AuthToken `json:"token,omitempty"`
ApplicationCredential *v3AuthApplicationCredential `json:"application_credential,omitempty"`
} `json:"identity"`
Scope *v3Scope `json:"scope,omitempty"`
} `json:"auth"`
}
type v3Scope struct {
Project *v3Project `json:"project,omitempty"`
Domain *v3Domain `json:"domain,omitempty"`
Trust *v3Trust `json:"OS-TRUST:trust,omitempty"`
}
type v3Domain struct {
Id string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
}
type v3Project struct {
Name string `json:"name,omitempty"`
Id string `json:"id,omitempty"`
Domain *v3Domain `json:"domain,omitempty"`
}
type v3Trust struct {
Id string `json:"id"`
}
type v3User struct {
Domain *v3Domain `json:"domain,omitempty"`
Id string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Password string `json:"password,omitempty"`
}
type v3AuthToken struct {
Id string `json:"id"`
}
type v3AuthPassword struct {
User v3User `json:"user"`
}
type v3AuthApplicationCredential struct {
Id string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Secret string `json:"secret,omitempty"`
User *v3User `json:"user,omitempty"`
}
// V3 Authentication response
type v3AuthResponse struct {
Token struct {
ExpiresAt string `json:"expires_at"`
IssuedAt string `json:"issued_at"`
Methods []string
Roles []struct {
Id, Name string
Links struct {
Self string
}
}
Project struct {
Domain struct {
Id, Name string
}
Id, Name string
}
Catalog []struct {
Id, Namem, Type string
Endpoints []struct {
Id, Region_Id, Url, Region string
Interface EndpointType
}
}
User struct {
Id, Name string
Domain struct {
Id, Name string
Links struct {
Self string
}
}
}
Audit_Ids []string
}
}
type v3Auth struct {
Region string
Auth *v3AuthResponse
Headers http.Header
}
func (auth *v3Auth) Request(ctx context.Context, c *Connection) (*http.Request, error) {
auth.Region = c.Region
var v3i interface{}
v3 := v3AuthRequest{}
if (c.ApplicationCredentialId != "" || c.ApplicationCredentialName != "") && c.ApplicationCredentialSecret != "" {
var user *v3User
if c.ApplicationCredentialId != "" {
c.ApplicationCredentialName = ""
user = &v3User{}
}
if user == nil && c.UserId != "" {
// UserID could be used without the domain information
user = &v3User{
Id: c.UserId,
}
}
if user == nil && c.UserName == "" {
// Make sure that Username or UserID are provided
return nil, fmt.Errorf("UserID or Name should be provided")
}
if user == nil && c.DomainId != "" {
user = &v3User{
Name: c.UserName,
Domain: &v3Domain{
Id: c.DomainId,
},
}
}
if user == nil && c.Domain != "" {
user = &v3User{
Name: c.UserName,
Domain: &v3Domain{
Name: c.Domain,
},
}
}
// Make sure that DomainID or DomainName are provided among Username
if user == nil {
return nil, fmt.Errorf("DomainID or Domain should be provided")
}
v3.Auth.Identity.Methods = []string{v3AuthMethodApplicationCredential}
v3.Auth.Identity.ApplicationCredential = &v3AuthApplicationCredential{
Id: c.ApplicationCredentialId,
Name: c.ApplicationCredentialName,
Secret: c.ApplicationCredentialSecret,
User: user,
}
} else if c.UserName == "" && c.UserId == "" {
v3.Auth.Identity.Methods = []string{v3AuthMethodToken}
v3.Auth.Identity.Token = &v3AuthToken{Id: c.ApiKey}
} else {
v3.Auth.Identity.Methods = []string{v3AuthMethodPassword}
v3.Auth.Identity.Password = &v3AuthPassword{
User: v3User{
Name: c.UserName,
Id: c.UserId,
Password: c.ApiKey,
},
}
var domain *v3Domain
if c.Domain != "" {
domain = &v3Domain{Name: c.Domain}
} else if c.DomainId != "" {
domain = &v3Domain{Id: c.DomainId}
}
v3.Auth.Identity.Password.User.Domain = domain
}
if v3.Auth.Identity.Methods[0] != v3AuthMethodApplicationCredential {
if c.TrustId != "" {
v3.Auth.Scope = &v3Scope{Trust: &v3Trust{Id: c.TrustId}}
} else if c.TenantId != "" || c.Tenant != "" {
v3.Auth.Scope = &v3Scope{Project: &v3Project{}}
if c.TenantId != "" {
v3.Auth.Scope.Project.Id = c.TenantId
} else if c.Tenant != "" {
v3.Auth.Scope.Project.Name = c.Tenant
switch {
case c.TenantDomain != "":
v3.Auth.Scope.Project.Domain = &v3Domain{Name: c.TenantDomain}
case c.TenantDomainId != "":
v3.Auth.Scope.Project.Domain = &v3Domain{Id: c.TenantDomainId}
case c.Domain != "":
v3.Auth.Scope.Project.Domain = &v3Domain{Name: c.Domain}
case c.DomainId != "":
v3.Auth.Scope.Project.Domain = &v3Domain{Id: c.DomainId}
default:
v3.Auth.Scope.Project.Domain = &v3Domain{Name: "Default"}
}
}
}
}
v3i = v3
body, err := json.Marshal(v3i)
if err != nil {
return nil, err
}
url := c.AuthUrl
if !strings.HasSuffix(url, "/") {
url += "/"
}
url += "auth/tokens"
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(body))
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("User-Agent", c.UserAgent)
return req, nil
}
func (auth *v3Auth) Response(_ context.Context, resp *http.Response) error {
auth.Auth = &v3AuthResponse{}
auth.Headers = resp.Header
err := readJson(resp, auth.Auth)
return err
}
func (auth *v3Auth) endpointUrl(Type string, endpointType EndpointType) string {
for _, catalog := range auth.Auth.Token.Catalog {
if catalog.Type == Type {
for _, endpoint := range catalog.Endpoints {
if endpoint.Interface == endpointType && (auth.Region == "" || (auth.Region == endpoint.Region)) {
return endpoint.Url
}
}
}
}
return ""
}
func (auth *v3Auth) StorageUrl(Internal bool) string {
endpointType := EndpointTypePublic
if Internal {
endpointType = EndpointTypeInternal
}
return auth.StorageUrlForEndpoint(endpointType)
}
func (auth *v3Auth) StorageUrlForEndpoint(endpointType EndpointType) string {
return auth.endpointUrl("object-store", endpointType)
}
func (auth *v3Auth) Token() string {
return auth.Headers.Get("X-Subject-Token")
}
func (auth *v3Auth) Expires() time.Time {
t, err := time.Parse(time.RFC3339, auth.Auth.Token.ExpiresAt)
if err != nil {
return time.Time{} // return Zero if not parsed
}
return t
}
func (auth *v3Auth) CdnUrl() string {
return ""
}

@ -0,0 +1,29 @@
// Go 1.0 compatibility functions
//go:build !go1.1
// +build !go1.1
package swift
import (
"log"
"net/http"
"time"
)
// Cancel the request - doesn't work under < go 1.1
func cancelRequest(transport http.RoundTripper, req *http.Request) {
log.Printf("Tried to cancel a request but couldn't - recompile with go 1.1")
}
// Reset a timer - Doesn't work properly < go 1.1
//
// This is quite hard to do properly under go < 1.1 so we do a crude
// approximation and hope that everyone upgrades to go 1.1 quickly
func resetTimer(t *time.Timer, d time.Duration) {
t.Stop()
// Very likely this doesn't actually work if we are already
// selecting on t.C. However we've stopped the original timer
// so won't break transfers but may not time them out :-(
*t = *time.NewTimer(d)
}

@ -0,0 +1,25 @@
// Go 1.1 and later compatibility functions
//
//go:build go1.1
// +build go1.1
package swift
import (
"net/http"
"time"
)
// Cancel the request
func cancelRequest(transport http.RoundTripper, req *http.Request) {
if tr, ok := transport.(interface {
CancelRequest(*http.Request)
}); ok {
tr.CancelRequest(req)
}
}
// Reset a timer
func resetTimer(t *time.Timer, d time.Duration) {
t.Reset(d)
}

@ -0,0 +1,24 @@
//go:build go1.6
// +build go1.6
package swift
import (
"net/http"
"time"
)
const IS_AT_LEAST_GO_16 = true
func SetExpectContinueTimeout(tr *http.Transport, t time.Duration) {
tr.ExpectContinueTimeout = t
}
func AddExpectAndTransferEncoding(req *http.Request, hasContentLength bool) {
if req.Body != nil {
req.Header.Add("Expect", "100-continue")
}
if !hasContentLength {
req.TransferEncoding = []string{"chunked"}
}
}

@ -0,0 +1,14 @@
//go:build !go1.6
// +build !go1.6
package swift
import (
"net/http"
"time"
)
const IS_AT_LEAST_GO_16 = false
func SetExpectContinueTimeout(tr *http.Transport, t time.Duration) {}
func AddExpectAndTransferEncoding(req *http.Request, hasContentLength bool) {}

@ -0,0 +1,158 @@
package swift
import (
"context"
"os"
"strings"
)
// DynamicLargeObjectCreateFile represents an open static large object
type DynamicLargeObjectCreateFile struct {
largeObjectCreateFile
}
// DynamicLargeObjectCreateFile creates a dynamic large object
// returning an object which satisfies io.Writer, io.Seeker, io.Closer
// and io.ReaderFrom. The flags are as passes to the
// largeObjectCreate method.
func (c *Connection) DynamicLargeObjectCreateFile(ctx context.Context, opts *LargeObjectOpts) (LargeObjectFile, error) {
lo, err := c.largeObjectCreate(ctx, opts)
if err != nil {
return nil, err
}
return withBuffer(opts, &DynamicLargeObjectCreateFile{
largeObjectCreateFile: *lo,
}), nil
}
// DynamicLargeObjectCreate creates or truncates an existing dynamic
// large object returning a writeable object. This sets opts.Flags to
// an appropriate value before calling DynamicLargeObjectCreateFile
func (c *Connection) DynamicLargeObjectCreate(ctx context.Context, opts *LargeObjectOpts) (LargeObjectFile, error) {
opts.Flags = os.O_TRUNC | os.O_CREATE
return c.DynamicLargeObjectCreateFile(ctx, opts)
}
// DynamicLargeObjectDelete deletes a dynamic large object and all of its segments.
func (c *Connection) DynamicLargeObjectDelete(ctx context.Context, container string, path string) error {
return c.LargeObjectDelete(ctx, container, path)
}
// DynamicLargeObjectMove moves a dynamic large object from srcContainer, srcObjectName to dstContainer, dstObjectName
func (c *Connection) DynamicLargeObjectMove(ctx context.Context, srcContainer string, srcObjectName string, dstContainer string, dstObjectName string) error {
info, headers, err := c.Object(ctx, srcContainer, srcObjectName)
if err != nil {
return err
}
segmentContainer, segmentPath, err := parseFullPath(headers["X-Object-Manifest"])
if err != nil {
return err
}
if err := c.createDLOManifest(ctx, dstContainer, dstObjectName, segmentContainer+"/"+segmentPath, info.ContentType, sanitizeLargeObjectMoveHeaders(headers)); err != nil {
return err
}
if err := c.ObjectDelete(ctx, srcContainer, srcObjectName); err != nil {
return err
}
return nil
}
func sanitizeLargeObjectMoveHeaders(headers Headers) Headers {
sanitizedHeaders := make(map[string]string, len(headers))
for k, v := range headers {
if strings.HasPrefix(k, "X-") { //Some of the fields does not effect the request e,g, X-Timestamp, X-Trans-Id, X-Openstack-Request-Id. Open stack will generate new ones anyway.
sanitizedHeaders[k] = v
}
}
return sanitizedHeaders
}
// createDLOManifest creates a dynamic large object manifest
func (c *Connection) createDLOManifest(ctx context.Context, container string, objectName string, prefix string, contentType string, headers Headers) error {
if headers == nil {
headers = make(Headers)
}
headers["X-Object-Manifest"] = prefix
manifest, err := c.ObjectCreate(ctx, container, objectName, false, "", contentType, headers)
if err != nil {
return err
}
if err := manifest.Close(); err != nil {
return err
}
return nil
}
// Close satisfies the io.Closer interface
func (file *DynamicLargeObjectCreateFile) Close() error {
return file.CloseWithContext(context.Background())
}
func (file *DynamicLargeObjectCreateFile) CloseWithContext(ctx context.Context) error {
return file.Flush(ctx)
}
func (file *DynamicLargeObjectCreateFile) Flush(ctx context.Context) error {
err := file.conn.createDLOManifest(ctx, file.container, file.objectName, file.segmentContainer+"/"+file.prefix, file.contentType, file.headers)
if err != nil {
return err
}
return file.conn.waitForSegmentsToShowUp(ctx, file.container, file.objectName, file.Size())
}
func (c *Connection) getAllDLOSegments(ctx context.Context, segmentContainer, segmentPath string) ([]Object, error) {
//a simple container listing works 99.9% of the time
segments, err := c.ObjectsAll(ctx, segmentContainer, &ObjectsOpts{Prefix: segmentPath})
if err != nil {
return nil, err
}
hasObjectName := make(map[string]struct{})
for _, segment := range segments {
hasObjectName[segment.Name] = struct{}{}
}
//The container listing might be outdated (i.e. not contain all existing
//segment objects yet) because of temporary inconsistency (Swift is only
//eventually consistent!). Check its completeness.
segmentNumber := 0
for {
segmentNumber++
segmentName := getSegment(segmentPath, segmentNumber)
if _, seen := hasObjectName[segmentName]; seen {
continue
}
//This segment is missing in the container listing. Use a more reliable
//request to check its existence. (HEAD requests on segments are
//guaranteed to return the correct metadata, except for the pathological
//case of an outage of large parts of the Swift cluster or its network,
//since every segment is only written once.)
segment, _, err := c.Object(ctx, segmentContainer, segmentName)
switch err {
case nil:
//found new segment -> add it in the correct position and keep
//going, more might be missing
if segmentNumber <= len(segments) {
segments = append(segments[:segmentNumber], segments[segmentNumber-1:]...)
segments[segmentNumber-1] = segment
} else {
segments = append(segments, segment)
}
continue
case ObjectNotFound:
//This segment is missing. Since we upload segments sequentially,
//there won't be any more segments after it.
return segments, nil
default:
return nil, err //unexpected error
}
}
}

@ -0,0 +1,18 @@
/*
Package swift provides an easy to use interface to Swift / Openstack Object Storage / Rackspace Cloud Files
# Standard Usage
Most of the work is done through the Container*() and Object*() methods.
All methods are safe to use concurrently in multiple go routines.
# Object Versioning
As defined by http://docs.openstack.org/api/openstack-object-storage/1.0/content/Object_Versioning-e1e3230.html#d6e983 one can create a container which allows for version control of files. The suggested method is to create a version container for holding all non-current files, and a current container for holding the latest version that the file points to. The container and objects inside it can be used in the standard manner, however, pushing a file multiple times will result in it being copied to the version container and the new file put in it's place. If the current file is deleted, the previous file in the version container will replace it. This means that if a file is updated 5 times, it must be deleted 5 times to be completely removed from the system.
# Rackspace Sub Module
This module specifically allows the enabling/disabling of Rackspace Cloud File CDN management on a container. This is specific to the Rackspace API and not Swift/Openstack, therefore it has been placed in a submodule. One can easily create a RsConnection and use it like the standard Connection to access and manipulate containers and objects.
*/
package swift

@ -0,0 +1,57 @@
#!/bin/bash
# Run the swift tests against an openstack server from a swift all in
# one docker image
set -e
NAME=swift-aio
HOST=127.0.0.1
PORT=8294
AUTH=v1
case $AUTH in
v1)
export SWIFT_AUTH_URL="http://${HOST}:${PORT}/auth/v1.0"
export SWIFT_API_USER='test:tester'
export SWIFT_API_KEY='testing'
;;
v2)
# NB v2 auth doesn't work for unknown reasons!
export SWIFT_AUTH_URL="http://${HOST}:${PORT}/auth/v2.0"
export SWIFT_TENANT='tester'
export SWIFT_API_USER='test'
export SWIFT_API_KEY='testing'
;;
*)
echo "Bad AUTH %AUTH"
exit 1
;;
esac
echo "Starting test server"
docker run --rm -d --name ${NAME} -p ${HOST}:${PORT}:8080 bouncestorage/swift-aio
function cleanup {
echo "Killing test server"
docker kill ${NAME}
}
trap cleanup EXIT
echo -n "Waiting for test server to startup"
tries=30
while [[ $tries -gt 0 ]]; do
echo -n "."
STATUS_RECEIVED=$(curl -s -o /dev/null -L -w ''%{http_code}'' ${SWIFT_AUTH_URL} || true)
if [[ ${STATUS_RECEIVED} -ge 200 ]]; then
break
fi
let tries-=1
sleep 1
done
echo "OK"
echo "Running tests"
go test -v

@ -0,0 +1,487 @@
package swift
import (
"bufio"
"bytes"
"context"
"crypto/rand"
"crypto/sha1"
"encoding/hex"
"errors"
"fmt"
"io"
"net/url"
"os"
gopath "path"
"strconv"
"strings"
"time"
)
// NotLargeObject is returned if an operation is performed on an object which isn't large.
//
//nolint:stylecheck
var NotLargeObject = errors.New("not a large object")
// readAfterWriteTimeout defines the time we wait before an object appears after having been uploaded
var readAfterWriteTimeout = 15 * time.Second
// readAfterWriteWait defines the time to sleep between two retries
var readAfterWriteWait = 200 * time.Millisecond
// largeObjectCreateFile represents an open static or dynamic large object
type largeObjectCreateFile struct {
conn *Connection
container string
objectName string
currentLength int64
filePos int64
chunkSize int64
segmentContainer string
prefix string
contentType string
checkHash bool
segments []Object
headers Headers
minChunkSize int64
}
func swiftSegmentPath(path string) (string, error) {
checksum := sha1.New()
random := make([]byte, 32)
if _, err := rand.Read(random); err != nil {
return "", err
}
path = hex.EncodeToString(checksum.Sum(append([]byte(path), random...)))
return strings.TrimLeft(strings.TrimRight("segments/"+path[0:3]+"/"+path[3:], "/"), "/"), nil
}
func getSegment(segmentPath string, partNumber int) string {
return fmt.Sprintf("%s/%016d", segmentPath, partNumber)
}
func parseFullPath(manifest string) (container string, prefix string, err error) {
manifest, err = url.PathUnescape(manifest)
if err != nil {
return
}
components := strings.SplitN(manifest, "/", 2)
container = components[0]
if len(components) > 1 {
prefix = components[1]
}
return container, prefix, nil
}
func (headers Headers) IsLargeObjectDLO() bool {
_, isDLO := headers["X-Object-Manifest"]
return isDLO
}
func (headers Headers) IsLargeObjectSLO() bool {
_, isSLO := headers["X-Static-Large-Object"]
return isSLO
}
func (headers Headers) IsLargeObject() bool {
return headers.IsLargeObjectSLO() || headers.IsLargeObjectDLO()
}
func (c *Connection) getAllSegments(ctx context.Context, container string, path string, headers Headers) (string, []Object, error) {
if manifest, isDLO := headers["X-Object-Manifest"]; isDLO {
segmentContainer, segmentPath, err := parseFullPath(manifest)
if err != nil {
return segmentContainer, nil, err
}
segments, err := c.getAllDLOSegments(ctx, segmentContainer, segmentPath)
return segmentContainer, segments, err
}
if headers.IsLargeObjectSLO() {
return c.getAllSLOSegments(ctx, container, path)
}
return "", nil, NotLargeObject
}
// LargeObjectOpts describes how a large object should be created
type LargeObjectOpts struct {
Container string // Name of container to place object
ObjectName string // Name of object
Flags int // Creation flags
CheckHash bool // If set Check the hash
Hash string // If set use this hash to check
ContentType string // Content-Type of the object
Headers Headers // Additional headers to upload the object with
ChunkSize int64 // Size of chunks of the object, defaults to 10MB if not set
MinChunkSize int64 // Minimum chunk size, automatically set for SLO's based on info
SegmentContainer string // Name of the container to place segments
SegmentPrefix string // Prefix to use for the segments
NoBuffer bool // Prevents using a bufio.Writer to write segments
}
type LargeObjectFile interface {
io.Seeker
io.Writer
io.Closer
WriteWithContext(ctx context.Context, p []byte) (n int, err error)
CloseWithContext(ctx context.Context) error
Size() int64
Flush(ctx context.Context) error
}
// largeObjectCreate creates a large object at opts.Container, opts.ObjectName.
//
// opts.Flags can have the following bits set
//
// os.TRUNC - remove the contents of the large object if it exists
// os.APPEND - write at the end of the large object
func (c *Connection) largeObjectCreate(ctx context.Context, opts *LargeObjectOpts) (*largeObjectCreateFile, error) {
var (
segmentPath string
segmentContainer string
segments []Object
currentLength int64
err error
)
if opts.SegmentPrefix != "" {
segmentPath = opts.SegmentPrefix
} else if segmentPath, err = swiftSegmentPath(opts.ObjectName); err != nil {
return nil, err
}
if info, headers, err := c.Object(ctx, opts.Container, opts.ObjectName); err == nil {
if opts.Flags&os.O_TRUNC != 0 {
err := c.LargeObjectDelete(ctx, opts.Container, opts.ObjectName)
if err != nil {
return nil, err
}
} else {
currentLength = info.Bytes
if headers.IsLargeObject() {
segmentContainer, segments, err = c.getAllSegments(ctx, opts.Container, opts.ObjectName, headers)
if err != nil {
return nil, err
}
if len(segments) > 0 {
segmentPath = gopath.Dir(segments[0].Name)
}
} else {
if err = c.ObjectMove(ctx, opts.Container, opts.ObjectName, opts.Container, getSegment(segmentPath, 1)); err != nil {
return nil, err
}
segments = append(segments, info)
}
}
} else if err != ObjectNotFound {
return nil, err
}
// segmentContainer is not empty when the manifest already existed
if segmentContainer == "" {
if opts.SegmentContainer != "" {
segmentContainer = opts.SegmentContainer
} else {
segmentContainer = opts.Container + "_segments"
}
}
file := &largeObjectCreateFile{
conn: c,
checkHash: opts.CheckHash,
container: opts.Container,
objectName: opts.ObjectName,
chunkSize: opts.ChunkSize,
minChunkSize: opts.MinChunkSize,
headers: opts.Headers,
segmentContainer: segmentContainer,
prefix: segmentPath,
segments: segments,
currentLength: currentLength,
}
if file.chunkSize == 0 {
file.chunkSize = 10 * 1024 * 1024
}
if file.minChunkSize > file.chunkSize {
file.chunkSize = file.minChunkSize
}
if opts.Flags&os.O_APPEND != 0 {
file.filePos = currentLength
}
return file, nil
}
// LargeObjectDelete deletes the large object named by container, path
func (c *Connection) LargeObjectDelete(ctx context.Context, container string, objectName string) error {
_, headers, err := c.Object(ctx, container, objectName)
if err != nil {
return err
}
var objects [][]string
if headers.IsLargeObject() {
segmentContainer, segments, err := c.getAllSegments(ctx, container, objectName, headers)
if err != nil {
return err
}
for _, obj := range segments {
objects = append(objects, []string{segmentContainer, obj.Name})
}
}
objects = append(objects, []string{container, objectName})
info, err := c.cachedQueryInfo(ctx)
if err == nil && info.SupportsBulkDelete() && len(objects) > 0 {
filenames := make([]string, len(objects))
for i, obj := range objects {
filenames[i] = obj[0] + "/" + obj[1]
}
_, err = c.doBulkDelete(ctx, filenames, nil)
// Don't fail on ObjectNotFound because eventual consistency
// makes this situation normal.
if err != nil && err != Forbidden && err != ObjectNotFound {
return err
}
} else {
for _, obj := range objects {
if err := c.ObjectDelete(ctx, obj[0], obj[1]); err != nil {
return err
}
}
}
return nil
}
// LargeObjectGetSegments returns all the segments that compose an object
// If the object is a Dynamic Large Object (DLO), it just returns the objects
// that have the prefix as indicated by the manifest.
// If the object is a Static Large Object (SLO), it retrieves the JSON content
// of the manifest and return all the segments of it.
func (c *Connection) LargeObjectGetSegments(ctx context.Context, container string, path string) (string, []Object, error) {
_, headers, err := c.Object(ctx, container, path)
if err != nil {
return "", nil, err
}
return c.getAllSegments(ctx, container, path, headers)
}
// Seek sets the offset for the next write operation
func (file *largeObjectCreateFile) Seek(offset int64, whence int) (int64, error) {
switch whence {
case 0:
file.filePos = offset
case 1:
file.filePos += offset
case 2:
file.filePos = file.currentLength + offset
default:
return -1, fmt.Errorf("invalid value for whence")
}
if file.filePos < 0 {
return -1, fmt.Errorf("negative offset")
}
return file.filePos, nil
}
func (file *largeObjectCreateFile) Size() int64 {
return file.currentLength
}
func withLORetry(expectedSize int64, fn func() (Headers, int64, error)) (err error) {
endTimer := time.NewTimer(readAfterWriteTimeout)
defer endTimer.Stop()
waitingTime := readAfterWriteWait
for {
var headers Headers
var sz int64
if headers, sz, err = fn(); err == nil {
if !headers.IsLargeObjectDLO() || (expectedSize == 0 && sz > 0) || expectedSize == sz {
return
}
} else {
return
}
waitTimer := time.NewTimer(waitingTime)
select {
case <-endTimer.C:
waitTimer.Stop()
err = fmt.Errorf("timeout expired while waiting for object to have size == %d, got: %d", expectedSize, sz)
return
case <-waitTimer.C:
waitingTime *= 2
}
}
}
func (c *Connection) waitForSegmentsToShowUp(ctx context.Context, container, objectName string, expectedSize int64) (err error) {
err = withLORetry(expectedSize, func() (Headers, int64, error) {
var info Object
var headers Headers
info, headers, err = c.objectBase(ctx, container, objectName)
if err != nil {
return headers, 0, err
}
return headers, info.Bytes, nil
})
return
}
func (file *largeObjectCreateFile) Write(buf []byte) (int, error) {
return file.WriteWithContext(context.Background(), buf)
}
func (file *largeObjectCreateFile) WriteWithContext(ctx context.Context, buf []byte) (int, error) {
var sz int64
var relativeFilePos int
writeSegmentIdx := 0
for i, obj := range file.segments {
if file.filePos < sz+obj.Bytes || (i == len(file.segments)-1 && file.filePos < sz+file.minChunkSize) {
relativeFilePos = int(file.filePos - sz)
break
}
writeSegmentIdx++
sz += obj.Bytes
}
sizeToWrite := len(buf)
for offset := 0; offset < sizeToWrite; {
newSegment, n, err := file.writeSegment(ctx, buf[offset:], writeSegmentIdx, relativeFilePos)
if err != nil {
return 0, err
}
if writeSegmentIdx < len(file.segments) {
file.segments[writeSegmentIdx] = *newSegment
} else {
file.segments = append(file.segments, *newSegment)
}
offset += n
writeSegmentIdx++
relativeFilePos = 0
}
file.filePos += int64(sizeToWrite)
file.currentLength = 0
for _, obj := range file.segments {
file.currentLength += obj.Bytes
}
return sizeToWrite, nil
}
func (file *largeObjectCreateFile) writeSegment(ctx context.Context, buf []byte, writeSegmentIdx int, relativeFilePos int) (obj *Object, n int, err error) {
var (
readers []io.Reader
existingSegment *Object
segmentSize int
)
segmentName := getSegment(file.prefix, writeSegmentIdx+1)
sizeToRead := int(file.chunkSize)
if writeSegmentIdx < len(file.segments) {
existingSegment = &file.segments[writeSegmentIdx]
if writeSegmentIdx != len(file.segments)-1 {
sizeToRead = int(existingSegment.Bytes)
}
if relativeFilePos > 0 {
headers := make(Headers)
headers["Range"] = "bytes=0-" + strconv.FormatInt(int64(relativeFilePos-1), 10)
existingSegmentReader, _, err := file.conn.ObjectOpen(ctx, file.segmentContainer, segmentName, true, headers)
if err != nil {
return nil, 0, err
}
defer func() {
closeErr := existingSegmentReader.Close()
if closeErr != nil {
err = closeErr
}
}()
sizeToRead -= relativeFilePos
segmentSize += relativeFilePos
readers = []io.Reader{existingSegmentReader}
}
}
if sizeToRead > len(buf) {
sizeToRead = len(buf)
}
segmentSize += sizeToRead
readers = append(readers, bytes.NewReader(buf[:sizeToRead]))
if existingSegment != nil && segmentSize < int(existingSegment.Bytes) {
headers := make(Headers)
headers["Range"] = "bytes=" + strconv.FormatInt(int64(segmentSize), 10) + "-"
tailSegmentReader, _, err := file.conn.ObjectOpen(ctx, file.segmentContainer, segmentName, true, headers)
if err != nil {
return nil, 0, err
}
defer func() {
closeErr := tailSegmentReader.Close()
if closeErr != nil {
err = closeErr
}
}()
segmentSize = int(existingSegment.Bytes)
readers = append(readers, tailSegmentReader)
}
segmentReader := io.MultiReader(readers...)
headers, err := file.conn.ObjectPut(ctx, file.segmentContainer, segmentName, segmentReader, true, "", file.contentType, nil)
if err != nil {
return nil, 0, err
}
return &Object{Name: segmentName, Bytes: int64(segmentSize), Hash: headers["Etag"]}, sizeToRead, nil
}
func withBuffer(opts *LargeObjectOpts, lo LargeObjectFile) LargeObjectFile {
if !opts.NoBuffer {
return &bufferedLargeObjectFile{
LargeObjectFile: lo,
bw: bufio.NewWriterSize(lo, int(opts.ChunkSize)),
}
}
return lo
}
type bufferedLargeObjectFile struct {
LargeObjectFile
bw *bufio.Writer
}
func (blo *bufferedLargeObjectFile) Close() error {
return blo.CloseWithContext(context.Background())
}
func (blo *bufferedLargeObjectFile) CloseWithContext(ctx context.Context) error {
err := blo.bw.Flush()
if err != nil {
return err
}
return blo.LargeObjectFile.CloseWithContext(ctx)
}
func (blo *bufferedLargeObjectFile) WriteWithContext(_ context.Context, p []byte) (n int, err error) {
return blo.Write(p)
}
func (blo *bufferedLargeObjectFile) Write(p []byte) (n int, err error) {
return blo.bw.Write(p)
}
func (blo *bufferedLargeObjectFile) Seek(offset int64, whence int) (int64, error) {
err := blo.bw.Flush()
if err != nil {
return 0, err
}
return blo.LargeObjectFile.Seek(offset, whence)
}
func (blo *bufferedLargeObjectFile) Size() int64 {
return blo.LargeObjectFile.Size() + int64(blo.bw.Buffered())
}
func (blo *bufferedLargeObjectFile) Flush(ctx context.Context) error {
err := blo.bw.Flush()
if err != nil {
return err
}
return blo.LargeObjectFile.Flush(ctx)
}

@ -0,0 +1,174 @@
// Metadata manipulation in and out of Headers
package swift
import (
"fmt"
"net/http"
"strconv"
"strings"
"time"
)
// Metadata stores account, container or object metadata.
type Metadata map[string]string
// Metadata gets the Metadata starting with the metaPrefix out of the Headers.
//
// The keys in the Metadata will be converted to lower case
func (h Headers) Metadata(metaPrefix string) Metadata {
m := Metadata{}
metaPrefix = http.CanonicalHeaderKey(metaPrefix)
for key, value := range h {
if strings.HasPrefix(key, metaPrefix) {
metaKey := strings.ToLower(key[len(metaPrefix):])
m[metaKey] = value
}
}
return m
}
// AccountMetadata converts Headers from account to a Metadata.
//
// The keys in the Metadata will be converted to lower case.
func (h Headers) AccountMetadata() Metadata {
return h.Metadata("X-Account-Meta-")
}
// ContainerMetadata converts Headers from container to a Metadata.
//
// The keys in the Metadata will be converted to lower case.
func (h Headers) ContainerMetadata() Metadata {
return h.Metadata("X-Container-Meta-")
}
// ObjectMetadata converts Headers from object to a Metadata.
//
// The keys in the Metadata will be converted to lower case.
func (h Headers) ObjectMetadata() Metadata {
return h.Metadata("X-Object-Meta-")
}
// Headers convert the Metadata starting with the metaPrefix into a
// Headers.
//
// The keys in the Metadata will be converted from lower case to http
// Canonical (see http.CanonicalHeaderKey).
func (m Metadata) Headers(metaPrefix string) Headers {
h := Headers{}
for key, value := range m {
key = http.CanonicalHeaderKey(metaPrefix + key)
h[key] = value
}
return h
}
// AccountHeaders converts the Metadata for the account.
func (m Metadata) AccountHeaders() Headers {
return m.Headers("X-Account-Meta-")
}
// ContainerHeaders converts the Metadata for the container.
func (m Metadata) ContainerHeaders() Headers {
return m.Headers("X-Container-Meta-")
}
// ObjectHeaders converts the Metadata for the object.
func (m Metadata) ObjectHeaders() Headers {
return m.Headers("X-Object-Meta-")
}
// Turns a number of ns into a floating point string in seconds
//
// Trims trailing zeros and guaranteed to be perfectly accurate
func nsToFloatString(ns int64) string {
if ns < 0 {
return "-" + nsToFloatString(-ns)
}
result := fmt.Sprintf("%010d", ns)
split := len(result) - 9
result, decimals := result[:split], result[split:]
decimals = strings.TrimRight(decimals, "0")
if decimals != "" {
result += "."
result += decimals
}
return result
}
// Turns a floating point string in seconds into a ns integer
//
// Guaranteed to be perfectly accurate
func floatStringToNs(s string) (int64, error) {
const zeros = "000000000"
if point := strings.IndexRune(s, '.'); point >= 0 {
tail := s[point+1:]
if fill := 9 - len(tail); fill < 0 {
tail = tail[:9]
} else {
tail += zeros[:fill]
}
s = s[:point] + tail
} else if len(s) > 0 { // Make sure empty string produces an error
s += zeros
}
return strconv.ParseInt(s, 10, 64)
}
// FloatStringToTime converts a floating point number string to a time.Time
//
// The string is floating point number of seconds since the epoch
// (Unix time). The number should be in fixed point format (not
// exponential), eg "1354040105.123456789" which represents the time
// "2012-11-27T18:15:05.123456789Z"
//
// Some care is taken to preserve all the accuracy in the time.Time
// (which wouldn't happen with a naive conversion through float64) so
// a round trip conversion won't change the data.
//
// If an error is returned then time will be returned as the zero time.
func FloatStringToTime(s string) (t time.Time, err error) {
ns, err := floatStringToNs(s)
if err != nil {
return
}
t = time.Unix(0, ns)
return
}
// TimeToFloatString converts a time.Time object to a floating point string
//
// The string is floating point number of seconds since the epoch
// (Unix time). The number is in fixed point format (not
// exponential), eg "1354040105.123456789" which represents the time
// "2012-11-27T18:15:05.123456789Z". Trailing zeros will be dropped
// from the output.
//
// Some care is taken to preserve all the accuracy in the time.Time
// (which wouldn't happen with a naive conversion through float64) so
// a round trip conversion won't change the data.
func TimeToFloatString(t time.Time) string {
return nsToFloatString(t.UnixNano())
}
// GetModTime reads a modification time (mtime) from a Metadata object
//
// This is a defacto standard (used in the official python-swiftclient
// amongst others) for storing the modification time (as read using
// os.Stat) for an object. It is stored using the key 'mtime', which
// for example when written to an object will be 'X-Object-Meta-Mtime'.
//
// If an error is returned then time will be returned as the zero time.
func (m Metadata) GetModTime() (t time.Time, err error) {
return FloatStringToTime(m["mtime"])
}
// SetModTime writes an modification time (mtime) to a Metadata object
//
// This is a defacto standard (used in the official python-swiftclient
// amongst others) for storing the modification time (as read using
// os.Stat) for an object. It is stored using the key 'mtime', which
// for example when written to an object will be 'X-Object-Meta-Mtime'.
func (m Metadata) SetModTime(t time.Time) {
m["mtime"] = TimeToFloatString(t)
}

@ -0,0 +1,55 @@
Notes on Go Swift
=================
Make a builder style interface like the Google Go APIs? Advantages
are that it is easy to add named methods to the service object to do
specific things. Slightly less efficient. Not sure about how to
return extra stuff though - in an object?
Make a container struct so these could be methods on it?
Make noResponse check for 204?
Make storage public so it can be extended easily?
Rename to go-swift to match user agent string?
Reconnect on auth error - 401 when token expires isn't tested
Make more api compatible with python cloudfiles?
Retry operations on timeout / network errors?
- also 408 error
- GET requests only?
Make Connection thread safe - whenever it is changed take a write lock whenever it is read from a read lock
Add extra headers field to Connection (for via etc)
Make errors use an error heirachy then can catch them with a type assertion
Error(...)
ObjectCorrupted{ Error }
Make a Debug flag in connection for logging stuff
Object If-Match, If-None-Match, If-Modified-Since, If-Unmodified-Since etc
Object range
Object create, update with X-Delete-At or X-Delete-After
Large object support
- check uploads are less than 5GB in normal mode?
Access control CORS?
Swift client retries and backs off for all types of errors
Implement net error interface?
type Error interface {
error
Timeout() bool // Is the error a timeout?
Temporary() bool // Is the error temporary?
}

@ -0,0 +1,185 @@
package swift
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/url"
"os"
)
// StaticLargeObjectCreateFile represents an open static large object
type StaticLargeObjectCreateFile struct {
largeObjectCreateFile
}
// SLONotSupported is returned as an error when Static Large Objects are not supported.
//
//nolint:stylecheck
var SLONotSupported = errors.New("SLO not supported")
type swiftSegment struct {
Path string `json:"path,omitempty"`
Etag string `json:"etag,omitempty"`
Size int64 `json:"size_bytes,omitempty"`
// When uploading a manifest, the attributes must be named `path`, `etag` and `size_bytes`
// but when querying the JSON content of a manifest with the `multipart-manifest=get`
// parameter, Swift names those attributes `name`, `hash` and `bytes`.
// We use all the different attributes names in this structure to be able to use
// the same structure for both uploading and retrieving.
Name string `json:"name,omitempty"`
Hash string `json:"hash,omitempty"`
Bytes int64 `json:"bytes,omitempty"`
ContentType string `json:"content_type,omitempty"`
LastModified string `json:"last_modified,omitempty"`
}
// StaticLargeObjectCreateFile creates a static large object returning
// an object which satisfies io.Writer, io.Seeker, io.Closer and
// io.ReaderFrom. The flags are as passed to the largeObjectCreate
// method.
func (c *Connection) StaticLargeObjectCreateFile(ctx context.Context, opts *LargeObjectOpts) (LargeObjectFile, error) {
info, err := c.cachedQueryInfo(ctx)
if err != nil || !info.SupportsSLO() {
return nil, SLONotSupported
}
realMinChunkSize := info.SLOMinSegmentSize()
if realMinChunkSize > opts.MinChunkSize {
opts.MinChunkSize = realMinChunkSize
}
lo, err := c.largeObjectCreate(ctx, opts)
if err != nil {
return nil, err
}
return withBuffer(opts, &StaticLargeObjectCreateFile{
largeObjectCreateFile: *lo,
}), nil
}
// StaticLargeObjectCreate creates or truncates an existing static
// large object returning a writeable object. This sets opts.Flags to
// an appropriate value before calling StaticLargeObjectCreateFile
func (c *Connection) StaticLargeObjectCreate(ctx context.Context, opts *LargeObjectOpts) (LargeObjectFile, error) {
opts.Flags = os.O_TRUNC | os.O_CREATE
return c.StaticLargeObjectCreateFile(ctx, opts)
}
// StaticLargeObjectDelete deletes a static large object and all of its segments.
func (c *Connection) StaticLargeObjectDelete(ctx context.Context, container string, path string) error {
info, err := c.cachedQueryInfo(ctx)
if err != nil || !info.SupportsSLO() {
return SLONotSupported
}
return c.LargeObjectDelete(ctx, container, path)
}
// StaticLargeObjectMove moves a static large object from srcContainer, srcObjectName to dstContainer, dstObjectName
func (c *Connection) StaticLargeObjectMove(ctx context.Context, srcContainer string, srcObjectName string, dstContainer string, dstObjectName string) error {
swiftInfo, err := c.cachedQueryInfo(ctx)
if err != nil || !swiftInfo.SupportsSLO() {
return SLONotSupported
}
info, headers, err := c.Object(ctx, srcContainer, srcObjectName)
if err != nil {
return err
}
container, segments, err := c.getAllSegments(ctx, srcContainer, srcObjectName, headers)
if err != nil {
return err
}
//copy only metadata during move (other headers might not be safe for copying)
headers = headers.ObjectMetadata().ObjectHeaders()
if err := c.createSLOManifest(ctx, dstContainer, dstObjectName, info.ContentType, container, segments, headers); err != nil {
return err
}
if err := c.ObjectDelete(ctx, srcContainer, srcObjectName); err != nil {
return err
}
return nil
}
// createSLOManifest creates a static large object manifest
func (c *Connection) createSLOManifest(ctx context.Context, container string, path string, contentType string, segmentContainer string, segments []Object, h Headers) error {
sloSegments := make([]swiftSegment, len(segments))
for i, segment := range segments {
sloSegments[i].Path = fmt.Sprintf("%s/%s", segmentContainer, segment.Name)
sloSegments[i].Etag = segment.Hash
sloSegments[i].Size = segment.Bytes
}
content, err := json.Marshal(sloSegments)
if err != nil {
return err
}
values := url.Values{}
values.Set("multipart-manifest", "put")
if _, err := c.objectPut(ctx, container, path, bytes.NewBuffer(content), false, "", contentType, h, values); err != nil {
return err
}
return nil
}
func (file *StaticLargeObjectCreateFile) Close() error {
return file.CloseWithContext(context.Background())
}
func (file *StaticLargeObjectCreateFile) CloseWithContext(ctx context.Context) error {
return file.Flush(ctx)
}
func (file *StaticLargeObjectCreateFile) Flush(ctx context.Context) error {
if err := file.conn.createSLOManifest(ctx, file.container, file.objectName, file.contentType, file.segmentContainer, file.segments, file.headers); err != nil {
return err
}
return file.conn.waitForSegmentsToShowUp(ctx, file.container, file.objectName, file.Size())
}
func (c *Connection) getAllSLOSegments(ctx context.Context, container, path string) (string, []Object, error) {
var (
segmentList []swiftSegment
segments []Object
segPath string
segmentContainer string
)
values := url.Values{}
values.Set("multipart-manifest", "get")
file, _, err := c.objectOpen(ctx, container, path, true, nil, values)
if err != nil {
return "", nil, err
}
content, err := ioutil.ReadAll(file)
if err != nil {
return "", nil, err
}
err = json.Unmarshal(content, &segmentList)
if err != nil {
return "", nil, err
}
for _, segment := range segmentList {
segmentContainer, segPath, err = parseFullPath(segment.Name[1:])
if err != nil {
return "", nil, err
}
segments = append(segments, Object{
Name: segPath,
Bytes: segment.Bytes,
Hash: segment.Hash,
})
}
return segmentContainer, segments, nil
}

File diff suppressed because it is too large Load Diff

@ -0,0 +1,58 @@
package swift
import (
"io"
"time"
)
// An io.ReadCloser which obeys an idle timeout
type timeoutReader struct {
reader io.ReadCloser
timeout time.Duration
cancel func()
}
// Returns a wrapper around the reader which obeys an idle
// timeout. The cancel function is called if the timeout happens
func newTimeoutReader(reader io.ReadCloser, timeout time.Duration, cancel func()) *timeoutReader {
return &timeoutReader{
reader: reader,
timeout: timeout,
cancel: cancel,
}
}
// Read reads up to len(p) bytes into p
//
// Waits at most for timeout for the read to complete otherwise returns a timeout
func (t *timeoutReader) Read(p []byte) (int, error) {
// FIXME limit the amount of data read in one chunk so as to not exceed the timeout?
// Do the read in the background
type result struct {
n int
err error
}
done := make(chan result, 1)
go func() {
n, err := t.reader.Read(p)
done <- result{n, err}
}()
// Wait for the read or the timeout
timer := time.NewTimer(t.timeout)
defer timer.Stop()
select {
case r := <-done:
return r.n, r.err
case <-timer.C:
t.cancel()
return 0, TimeoutError
}
}
// Close the channel
func (t *timeoutReader) Close() error {
return t.reader.Close()
}
// Check it satisfies the interface
var _ io.ReadCloser = &timeoutReader{}

@ -0,0 +1,22 @@
#!/bin/bash
set -e
if [ "${TRAVIS_PULL_REQUEST}" = "true" ]; then
exit 0
fi
if [ "${TEST_REAL_SERVER}" = "rackspace" ] && [ ! -z "${RACKSPACE_APIKEY}" ]; then
echo "Running tests pointing to Rackspace"
export SWIFT_API_KEY=$RACKSPACE_APIKEY
export SWIFT_API_USER=$RACKSPACE_USER
export SWIFT_AUTH_URL=$RACKSPACE_AUTH
go test ./...
fi
if [ "${TEST_REAL_SERVER}" = "memset" ] && [ ! -z "${MEMSET_APIKEY}" ]; then
echo "Running tests pointing to Memset"
export SWIFT_API_KEY=$MEMSET_APIKEY
export SWIFT_API_USER=$MEMSET_USER
export SWIFT_AUTH_URL=$MEMSET_AUTH
go test
fi

@ -0,0 +1,55 @@
package swift
import (
"io"
"time"
)
var watchdogChunkSize = 1 << 20 // 1 MiB
// An io.Reader which resets a watchdog timer whenever data is read
type watchdogReader struct {
timeout time.Duration
reader io.Reader
timer *time.Timer
chunkSize int
}
// Returns a new reader which will kick the watchdog timer whenever data is read
func newWatchdogReader(reader io.Reader, timeout time.Duration, timer *time.Timer) *watchdogReader {
return &watchdogReader{
timeout: timeout,
reader: reader,
timer: timer,
chunkSize: watchdogChunkSize,
}
}
// Read reads up to len(p) bytes into p
func (t *watchdogReader) Read(p []byte) (int, error) {
//read from underlying reader in chunks not larger than t.chunkSize
//while resetting the watchdog timer before every read; the small chunk
//size ensures that the timer does not fire when reading a large amount of
//data from a slow connection
start := 0
end := len(p)
for start < end {
length := end - start
if length > t.chunkSize {
length = t.chunkSize
}
resetTimer(t.timer, t.timeout)
n, err := t.reader.Read(p[start : start+length])
start += n
if n == 0 || err != nil {
return start, err
}
}
resetTimer(t.timer, t.timeout)
return start, nil
}
// Check it satisfies the interface
var _ io.Reader = &watchdogReader{}

@ -1332,6 +1332,9 @@ github.com/mwitkow/go-conntrack
# github.com/ncw/swift v1.0.53
## explicit
github.com/ncw/swift
# github.com/ncw/swift/v2 v2.0.2
## explicit; go 1.15
github.com/ncw/swift/v2
# github.com/oklog/run v1.1.0
## explicit; go 1.13
github.com/oklog/run

Loading…
Cancel
Save