From 1fc00fa02d7c2bcaebb2b32d0a97ac8a8dc34e97 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E5=9B=BD=E5=BF=A0?= Date: Thu, 9 Feb 2023 21:32:31 +0800 Subject: [PATCH] [new] chunk backend: Integrate Alibaba Cloud oss (#8410) --- CHANGELOG.md | 1 + docs/sources/configuration/_index.md | 42 + docs/sources/configuration/examples.md | 23 + .../alibaba-cloud-storage-config.yaml | 17 + go.mod | 1 + go.sum | 3 + .../loki_micro_services_delete_test.go | 2 +- pkg/loki/common/common.go | 17 +- pkg/ruler/base/storage.go | 15 +- .../chunk/client/alibaba/oss_object_client.go | 164 ++ pkg/storage/config/schema_config.go | 1 + pkg/storage/factory.go | 38 +- tools/doc-generator/parse/root_blocks.go | 6 + .../aliyun/aliyun-oss-go-sdk/LICENSE | 14 + .../aliyun/aliyun-oss-go-sdk/oss/auth.go | 190 ++ .../aliyun/aliyun-oss-go-sdk/oss/bucket.go | 1289 +++++++++++ .../aliyun/aliyun-oss-go-sdk/oss/client.go | 2056 +++++++++++++++++ .../aliyun/aliyun-oss-go-sdk/oss/conf.go | 207 ++ .../aliyun/aliyun-oss-go-sdk/oss/conn.go | 852 +++++++ .../aliyun/aliyun-oss-go-sdk/oss/const.go | 258 +++ .../aliyun/aliyun-oss-go-sdk/oss/crc.go | 123 + .../aliyun/aliyun-oss-go-sdk/oss/download.go | 567 +++++ .../aliyun/aliyun-oss-go-sdk/oss/error.go | 94 + .../aliyun-oss-go-sdk/oss/limit_reader_1_6.go | 28 + .../aliyun-oss-go-sdk/oss/limit_reader_1_7.go | 90 + .../aliyun-oss-go-sdk/oss/livechannel.go | 257 +++ .../aliyun/aliyun-oss-go-sdk/oss/mime.go | 572 +++++ .../aliyun/aliyun-oss-go-sdk/oss/model.go | 69 + .../aliyun/aliyun-oss-go-sdk/oss/multicopy.go | 474 ++++ .../aliyun/aliyun-oss-go-sdk/oss/multipart.go | 305 +++ .../aliyun/aliyun-oss-go-sdk/oss/option.go | 689 ++++++ .../aliyun/aliyun-oss-go-sdk/oss/progress.go | 116 + .../aliyun-oss-go-sdk/oss/redirect_1_6.go | 11 + .../aliyun-oss-go-sdk/oss/redirect_1_7.go | 12 + .../aliyun-oss-go-sdk/oss/select_object.go | 197 ++ .../oss/select_object_type.go | 364 +++ .../aliyun-oss-go-sdk/oss/transport_1_6.go | 41 + .../aliyun-oss-go-sdk/oss/transport_1_7.go | 43 + .../aliyun/aliyun-oss-go-sdk/oss/type.go | 1262 ++++++++++ .../aliyun/aliyun-oss-go-sdk/oss/upload.go | 552 +++++ .../aliyun/aliyun-oss-go-sdk/oss/utils.go | 522 +++++ vendor/modules.txt | 3 + 42 files changed, 11566 insertions(+), 21 deletions(-) create mode 100644 docs/sources/configuration/examples/alibaba-cloud-storage-config.yaml create mode 100644 pkg/storage/chunk/client/alibaba/oss_object_client.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/LICENSE create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/auth.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/bucket.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/client.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conf.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conn.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/const.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/crc.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/download.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/error.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_6.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_7.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/livechannel.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/mime.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/model.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multicopy.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multipart.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/option.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/progress.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/redirect_1_6.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/redirect_1_7.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/select_object.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/select_object_type.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_6.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_7.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/type.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/upload.go create mode 100644 vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/utils.go diff --git a/CHANGELOG.md b/CHANGELOG.md index d33e9aa950..d9784d0687 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -465,6 +465,7 @@ to include only the most relevant. * [5544](https://github.com/grafana/loki/pull/5544) **ssncferreira**: Update vectorAggEvaluator to fail for expressions without grouping * [5543](https://github.com/grafana/loki/pull/5543) **cyriltovena**: update loki go version to 1.17.8 * [5450](https://github.com/grafana/loki/pull/5450) **BenoitKnecht**: pkg/ruler/base: Add external_labels option +* [5522](https://github.com/grafana/loki/pull/5522) **liguozhong**: chunk backend: Integrate Alibaba Cloud oss * [5484](https://github.com/grafana/loki/pull/5484) **sandeepsukhani**: Add support for per user index query readiness with limits overrides * [5719](https://github.com/grafana/loki/pull/5719) **kovaxur**: Loki can use both basic-auth and tenant-id * [5358](https://github.com/grafana/loki/pull/5358) **DylanGuedes**: Add `RingMode` support to `IndexGateway` diff --git a/docs/sources/configuration/_index.md b/docs/sources/configuration/_index.md index af97aff69b..9642dbe8c3 100644 --- a/docs/sources/configuration/_index.md +++ b/docs/sources/configuration/_index.md @@ -778,6 +778,10 @@ storage: # The CLI flags prefix for this block configuration is: ruler.storage [azure: ] + # Configures backend rule storage for AlibabaCloud Object Storage (OSS). + # The CLI flags prefix for this block configuration is: ruler + [alibabacloud: ] + # Configures backend rule storage for GCS. # The CLI flags prefix for this block configuration is: ruler.storage [gcs: ] @@ -1474,6 +1478,11 @@ ring: The `storage_config` block configures one of many possible stores for both the index and chunks. Which configuration to be picked should be defined in schema_config block. ```yaml +# The alibabacloud_storage_config block configures the connection to Alibaba +# Cloud Storage object storage backend. +# The CLI flags prefix for this block configuration is: common +[alibabacloud: ] + # The aws_storage_config block configures the connection to dynamoDB and S3 # object storage. Either one of them or both can be configured. [aws: ] @@ -2807,6 +2816,10 @@ storage: # The CLI flags prefix for this block configuration is: common.storage [azure: ] + # The alibabacloud_storage_config block configures the connection to Alibaba + # Cloud Storage object storage backend. + [alibabacloud: ] + # The bos_storage_config block configures the connection to Baidu Object # Storage (BOS) object storage backend. # The CLI flags prefix for this block configuration is: common.storage @@ -3796,6 +3809,33 @@ The `azure_storage_config` block configures the connection to Azure object stora [max_retry_delay: | default = 500ms] ``` +### alibabacloud_storage_config + +The `alibabacloud_storage_config` block configures the connection to Alibaba Cloud Storage object storage backend. The supported CLI flags `` used to reference this configuration block are: + +- `common` +- `ruler` + +  + +```yaml +# Name of OSS bucket. +# CLI flag: -common.storage.oss.bucketname +[bucket: | default = ""] + +# oss Endpoint to connect to. +# CLI flag: -common.storage.oss.endpoint +[endpoint: | default = ""] + +# alibabacloud Access Key ID +# CLI flag: -common.storage.oss.access-key-id +[access_key_id: | default = ""] + +# alibabacloud Secret Access Key +# CLI flag: -common.storage.oss.secret-access-key +[secret_access_key: | default = ""] +``` + ### gcs_storage_config The `gcs_storage_config` block configures the connection to Google Cloud Storage object storage backend. The supported CLI flags `` used to reference this configuration block are: @@ -4091,6 +4131,8 @@ Named store from this example can be used by setting object_store to store-1 in [gcs: ] +[alibabacloud: ] + [swift: ] ``` diff --git a/docs/sources/configuration/examples.md b/docs/sources/configuration/examples.md index 6410a870c8..34a0350a9f 100644 --- a/docs/sources/configuration/examples.md +++ b/docs/sources/configuration/examples.md @@ -272,3 +272,26 @@ storage_config: ``` + +## alibaba-cloud-storage-config.yaml + +```yaml +# This partial configuration uses Alibaba for chunk storage + +schema_config: + configs: + - from: 2020-05-15 + object_store: alibabacloud + schema: v11 + index: + prefix: loki_index_ + period: 168h + +storage_config: + alibabacloud: + bucket: + endpoint: + access_key_id: + secret_access_key: +``` + diff --git a/docs/sources/configuration/examples/alibaba-cloud-storage-config.yaml b/docs/sources/configuration/examples/alibaba-cloud-storage-config.yaml new file mode 100644 index 0000000000..56ea9cd9c1 --- /dev/null +++ b/docs/sources/configuration/examples/alibaba-cloud-storage-config.yaml @@ -0,0 +1,17 @@ +# This partial configuration uses Alibaba for chunk storage + +schema_config: + configs: + - from: 2020-05-15 + object_store: alibabacloud + schema: v11 + index: + prefix: loki_index_ + period: 168h + +storage_config: + alibabacloud: + bucket: + endpoint: + access_key_id: + secret_access_key: diff --git a/go.mod b/go.mod index 4eed6b2db5..01284a1579 100644 --- a/go.mod +++ b/go.mod @@ -15,6 +15,7 @@ require ( github.com/Shopify/sarama v1.38.1 github.com/Workiva/go-datastructures v1.0.53 github.com/alicebob/miniredis/v2 v2.30.0 + github.com/aliyun/aliyun-oss-go-sdk v2.2.2+incompatible github.com/aws/aws-sdk-go v1.44.187 github.com/baidubce/bce-sdk-go v0.9.141 github.com/bmatcuk/doublestar v1.3.4 diff --git a/go.sum b/go.sum index db37ee3c55..c5d17bca86 100644 --- a/go.sum +++ b/go.sum @@ -198,6 +198,7 @@ github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGn github.com/alicebob/miniredis/v2 v2.30.0 h1:uA3uhDbCxfO9+DI/DuGeAMr9qI+noVWwGPNTFuKID5M= github.com/alicebob/miniredis/v2 v2.30.0/go.mod h1:84TWKZlxYkfgMucPBf5SOQBYJceZeQRFIaQgNMiCX6Q= github.com/aliyun/aliyun-oss-go-sdk v2.2.2+incompatible h1:9gWa46nstkJ9miBReJcN8Gq34cBFbzSpQZVVT9N09TM= +github.com/aliyun/aliyun-oss-go-sdk v2.2.2+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9/go.mod h1:eliMa/PW+RDr2QLWRmLH1R1ZA4RInpmvOzDDXtaIZkc= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= @@ -253,6 +254,7 @@ github.com/aws/smithy-go v1.11.1/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnw github.com/axiomhq/hyperloglog v0.0.0-20180317131949-fe9507de0228/go.mod h1:IOXAcuKIFq/mDyuQ4wyJuJ79XLMsmLM+5RdQ+vWrL7o= github.com/baidubce/bce-sdk-go v0.9.141 h1:EV5BH5lfymIGPSmYDo9xYdsVlvWAW6nFeiA6t929zBE= github.com/baidubce/bce-sdk-go v0.9.141/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg= +github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= @@ -1260,6 +1262,7 @@ github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735/go.mod h1:807d1WS github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b h1:gQZ0qzfKHQIybLANtM3mBXNUtOfsCFXeTsnBqCsx1KM= github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.10 h1:wsfMs0iv+MJiViM37qh5VEKISi3/ZUq2nNKNdqmumAs= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= diff --git a/integration/loki_micro_services_delete_test.go b/integration/loki_micro_services_delete_test.go index 27e85408c5..20c16fc121 100644 --- a/integration/loki_micro_services_delete_test.go +++ b/integration/loki_micro_services_delete_test.go @@ -258,7 +258,7 @@ func TestMicroServicesDeleteRequest(t *testing.T) { return false } return true - }, 10*time.Second, 1*time.Second) + }, 20*time.Second, 1*time.Second) // Check metrics metrics, err := cliCompactor.Metrics() diff --git a/pkg/loki/common/common.go b/pkg/loki/common/common.go index 4a9adcc0a2..0d272c95f2 100644 --- a/pkg/loki/common/common.go +++ b/pkg/loki/common/common.go @@ -2,6 +2,7 @@ package common import ( "flag" + "github.com/grafana/loki/pkg/storage/chunk/client/alibaba" "github.com/grafana/dskit/flagext" "github.com/grafana/dskit/netutil" @@ -66,19 +67,21 @@ func (c *Config) RegisterFlags(f *flag.FlagSet) { } type Storage struct { - S3 aws.S3Config `yaml:"s3"` - GCS gcp.GCSConfig `yaml:"gcs"` - Azure azure.BlobStorageConfig `yaml:"azure"` - BOS baidubce.BOSStorageConfig `yaml:"bos"` - Swift openstack.SwiftConfig `yaml:"swift"` - FSConfig FilesystemConfig `yaml:"filesystem"` - Hedging hedging.Config `yaml:"hedging"` + S3 aws.S3Config `yaml:"s3"` + GCS gcp.GCSConfig `yaml:"gcs"` + Azure azure.BlobStorageConfig `yaml:"azure"` + AlibabaCloud alibaba.OssConfig `yaml:"alibabacloud"` + BOS baidubce.BOSStorageConfig `yaml:"bos"` + Swift openstack.SwiftConfig `yaml:"swift"` + FSConfig FilesystemConfig `yaml:"filesystem"` + Hedging hedging.Config `yaml:"hedging"` } func (s *Storage) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { s.S3.RegisterFlagsWithPrefix(prefix, f) s.GCS.RegisterFlagsWithPrefix(prefix, f) s.Azure.RegisterFlagsWithPrefix(prefix, f) + s.AlibabaCloud.RegisterFlagsWithPrefix(prefix, f) s.Swift.RegisterFlagsWithPrefix(prefix, f) s.BOS.RegisterFlagsWithPrefix(prefix, f) s.FSConfig.RegisterFlagsWithPrefix(prefix, f) diff --git a/pkg/ruler/base/storage.go b/pkg/ruler/base/storage.go index 7e9f3cf28b..5ccb19fce6 100644 --- a/pkg/ruler/base/storage.go +++ b/pkg/ruler/base/storage.go @@ -19,6 +19,7 @@ import ( "github.com/grafana/loki/pkg/storage" "github.com/grafana/loki/pkg/storage/bucket" "github.com/grafana/loki/pkg/storage/chunk/client" + "github.com/grafana/loki/pkg/storage/chunk/client/alibaba" "github.com/grafana/loki/pkg/storage/chunk/client/aws" "github.com/grafana/loki/pkg/storage/chunk/client/azure" "github.com/grafana/loki/pkg/storage/chunk/client/baidubce" @@ -33,12 +34,13 @@ type RuleStoreConfig struct { Type string `yaml:"type"` // Object Storage Configs - Azure azure.BlobStorageConfig `yaml:"azure" doc:"description=Configures backend rule storage for Azure."` - GCS gcp.GCSConfig `yaml:"gcs" doc:"description=Configures backend rule storage for GCS."` - S3 aws.S3Config `yaml:"s3" doc:"description=Configures backend rule storage for S3."` - BOS baidubce.BOSStorageConfig `yaml:"bos" doc:"description=Configures backend rule storage for Baidu Object Storage (BOS)."` - Swift openstack.SwiftConfig `yaml:"swift" doc:"description=Configures backend rule storage for Swift."` - Local local.Config `yaml:"local" doc:"description=Configures backend rule storage for a local file system directory."` + Azure azure.BlobStorageConfig `yaml:"azure" doc:"description=Configures backend rule storage for Azure."` + AlibabaCloud alibaba.OssConfig `yaml:"alibabacloud" doc:"description=Configures backend rule storage for AlibabaCloud Object Storage (OSS)."` + GCS gcp.GCSConfig `yaml:"gcs" doc:"description=Configures backend rule storage for GCS."` + S3 aws.S3Config `yaml:"s3" doc:"description=Configures backend rule storage for S3."` + BOS baidubce.BOSStorageConfig `yaml:"bos" doc:"description=Configures backend rule storage for Baidu Object Storage (BOS)."` + Swift openstack.SwiftConfig `yaml:"swift" doc:"description=Configures backend rule storage for Swift."` + Local local.Config `yaml:"local" doc:"description=Configures backend rule storage for a local file system directory."` mock rulestore.RuleStore `yaml:"-"` } @@ -46,6 +48,7 @@ type RuleStoreConfig struct { // RegisterFlags registers flags. func (cfg *RuleStoreConfig) RegisterFlags(f *flag.FlagSet) { cfg.Azure.RegisterFlagsWithPrefix("ruler.storage.", f) + cfg.AlibabaCloud.RegisterFlagsWithPrefix("ruler.storage.", f) cfg.GCS.RegisterFlagsWithPrefix("ruler.storage.", f) cfg.S3.RegisterFlagsWithPrefix("ruler.storage.", f) cfg.Swift.RegisterFlagsWithPrefix("ruler.storage.", f) diff --git a/pkg/storage/chunk/client/alibaba/oss_object_client.go b/pkg/storage/chunk/client/alibaba/oss_object_client.go new file mode 100644 index 0000000000..71a6f13720 --- /dev/null +++ b/pkg/storage/chunk/client/alibaba/oss_object_client.go @@ -0,0 +1,164 @@ +package alibaba + +import ( + "context" + "flag" + "io" + "net/http" + "strconv" + + "github.com/aliyun/aliyun-oss-go-sdk/oss" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/weaveworks/common/instrument" + + "github.com/grafana/loki/pkg/storage/chunk/client" +) + +const NoSuchKeyErr = "NoSuchKey" + +var ossRequestDuration = instrument.NewHistogramCollector(prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: "loki", + Name: "oss_request_duration_seconds", + Help: "Time spent doing OSS requests.", + Buckets: prometheus.ExponentialBuckets(0.005, 4, 7), +}, []string{"operation", "status_code"})) + +func init() { + ossRequestDuration.Register() +} + +type OssObjectClient struct { + defaultBucket *oss.Bucket +} + +// OssConfig is config for the OSS Chunk Client. +type OssConfig struct { + Bucket string `yaml:"bucket"` + Endpoint string `yaml:"endpoint"` + AccessKeyID string `yaml:"access_key_id"` + SecretAccessKey string `yaml:"secret_access_key"` +} + +// RegisterFlags registers flags. +func (cfg *OssConfig) RegisterFlags(f *flag.FlagSet) { + cfg.RegisterFlagsWithPrefix("", f) +} + +// RegisterFlagsWithPrefix registers flags with prefix. +func (cfg *OssConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + f.StringVar(&cfg.Bucket, prefix+"oss.bucketname", "", "Name of OSS bucket.") + f.StringVar(&cfg.Endpoint, prefix+"oss.endpoint", "", "oss Endpoint to connect to.") + f.StringVar(&cfg.AccessKeyID, prefix+"oss.access-key-id", "", "alibabacloud Access Key ID") + f.StringVar(&cfg.SecretAccessKey, prefix+"oss.secret-access-key", "", "alibabacloud Secret Access Key") +} + +// NewOssObjectClient makes a new chunk.Client that writes chunks to OSS. +func NewOssObjectClient(ctx context.Context, cfg OssConfig) (client.ObjectClient, error) { + client, err := oss.New(cfg.Endpoint, cfg.AccessKeyID, cfg.SecretAccessKey) + if err != nil { + return nil, err + } + bucket, err := client.Bucket(cfg.Bucket) + if err != nil { + return nil, err + } + return &OssObjectClient{ + defaultBucket: bucket, + }, nil +} + +func (s *OssObjectClient) Stop() { +} + +// GetObject returns a reader and the size for the specified object key from the configured OSS bucket. +func (s *OssObjectClient) GetObject(ctx context.Context, objectKey string) (io.ReadCloser, int64, error) { + var resp *oss.GetObjectResult + var options []oss.Option + err := instrument.CollectedRequest(ctx, "OSS.GetObject", ossRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + var requestErr error + resp, requestErr = s.defaultBucket.DoGetObject(&oss.GetObjectRequest{ObjectKey: objectKey}, options) + if requestErr != nil { + return requestErr + } + return nil + }) + if err != nil { + return nil, 0, err + } + length := resp.Response.Headers.Get("Content-Length") + size, err := strconv.Atoi(length) + if err != nil { + return nil, 0, err + } + return resp.Response.Body, int64(size), err + +} + +// PutObject puts the specified bytes into the configured OSS bucket at the provided key +func (s *OssObjectClient) PutObject(ctx context.Context, objectKey string, object io.ReadSeeker) error { + return instrument.CollectedRequest(ctx, "OSS.PutObject", ossRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + if err := s.defaultBucket.PutObject(objectKey, object); err != nil { + return errors.Wrap(err, "failed to put oss object") + } + return nil + }) + +} + +// List implements chunk.ObjectClient. +func (s *OssObjectClient) List(ctx context.Context, prefix, delimiter string) ([]client.StorageObject, []client.StorageCommonPrefix, error) { + var storageObjects []client.StorageObject + var commonPrefixes []client.StorageCommonPrefix + marker := oss.Marker("") + for { + if ctx.Err() != nil { + return nil, nil, ctx.Err() + } + + objects, err := s.defaultBucket.ListObjects(oss.Prefix(prefix), oss.Delimiter(delimiter), marker) + if err != nil { + return nil, nil, errors.Wrap(err, "list alibaba oss bucket failed") + } + marker = oss.Marker(objects.NextMarker) + for _, object := range objects.Objects { + storageObjects = append(storageObjects, client.StorageObject{ + Key: object.Key, + ModifiedAt: object.LastModified, + }) + } + for _, object := range objects.CommonPrefixes { + if object != "" { + commonPrefixes = append(commonPrefixes, client.StorageCommonPrefix(object)) + } + } + if !objects.IsTruncated { + break + } + } + return storageObjects, commonPrefixes, nil +} + +// DeleteObject deletes the specified object key from the configured OSS bucket. +func (s *OssObjectClient) DeleteObject(ctx context.Context, objectKey string) error { + return instrument.CollectedRequest(ctx, "OSS.DeleteObject", ossRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + err := s.defaultBucket.DeleteObject(objectKey) + if err != nil { + return err + } + return nil + }) +} + +// IsObjectNotFoundErr returns true if error means that object is not found. Relevant to GetObject and DeleteObject operations. +func (s *OssObjectClient) IsObjectNotFoundErr(err error) bool { + switch caseErr := err.(type) { + case oss.ServiceError: + if caseErr.Code == NoSuchKeyErr && caseErr.StatusCode == http.StatusNotFound { + return true + } + return false + default: + return false + } +} diff --git a/pkg/storage/config/schema_config.go b/pkg/storage/config/schema_config.go index bcc0f39c6e..1c49983ac6 100644 --- a/pkg/storage/config/schema_config.go +++ b/pkg/storage/config/schema_config.go @@ -22,6 +22,7 @@ import ( const ( // Supported storage clients + StorageTypeAlibabaCloud = "alibabacloud" StorageTypeAWS = "aws" StorageTypeAWSDynamo = "aws-dynamo" StorageTypeAzure = "azure" diff --git a/pkg/storage/factory.go b/pkg/storage/factory.go index 1ce69523b7..b211710e34 100644 --- a/pkg/storage/factory.go +++ b/pkg/storage/factory.go @@ -13,6 +13,7 @@ import ( "github.com/grafana/loki/pkg/storage/chunk/cache" "github.com/grafana/loki/pkg/storage/chunk/client" + "github.com/grafana/loki/pkg/storage/chunk/client/alibaba" "github.com/grafana/loki/pkg/storage/chunk/client/aws" "github.com/grafana/loki/pkg/storage/chunk/client/azure" "github.com/grafana/loki/pkg/storage/chunk/client/baidubce" @@ -59,12 +60,13 @@ type StoreLimits interface { // NamedStores helps configure additional object stores from a given storage provider type NamedStores struct { - AWS map[string]aws.StorageConfig `yaml:"aws"` - Azure map[string]azure.BlobStorageConfig `yaml:"azure"` - BOS map[string]baidubce.BOSStorageConfig `yaml:"bos"` - Filesystem map[string]local.FSConfig `yaml:"filesystem"` - GCS map[string]gcp.GCSConfig `yaml:"gcs"` - Swift map[string]openstack.SwiftConfig `yaml:"swift"` + AWS map[string]aws.StorageConfig `yaml:"aws"` + Azure map[string]azure.BlobStorageConfig `yaml:"azure"` + BOS map[string]baidubce.BOSStorageConfig `yaml:"bos"` + Filesystem map[string]local.FSConfig `yaml:"filesystem"` + GCS map[string]gcp.GCSConfig `yaml:"gcs"` + AlibabaCloud map[string]alibaba.OssConfig `yaml:"alibabacloud"` + Swift map[string]openstack.SwiftConfig `yaml:"swift"` // contains mapping from named store reference name to store type storeType map[string]string `yaml:"-"` @@ -102,7 +104,12 @@ func (ns *NamedStores) populateStoreType() error { } ns.storeType[name] = config.StorageTypeAzure } - + for name := range ns.AlibabaCloud { + if err := checkForDuplicates(name); err != nil { + return err + } + ns.storeType[name] = config.StorageTypeAlibabaCloud + } for name := range ns.BOS { if err := checkForDuplicates(name); err != nil { return err @@ -158,6 +165,7 @@ func (ns *NamedStores) validate() error { // Config chooses which storage client to use. type Config struct { + AlibabaStorageConfig alibaba.OssConfig `yaml:"alibabacloud"` AWSStorageConfig aws.StorageConfig `yaml:"aws"` AzureStorageConfig azure.BlobStorageConfig `yaml:"azure"` BOSStorageConfig baidubce.BOSStorageConfig `yaml:"bos"` @@ -334,6 +342,12 @@ func NewChunkClient(name string, cfg Config, schemaCfg config.SchemaConfig, clie return nil, err } return client.NewClientWithMaxParallel(c, nil, cfg.MaxParallelGetChunk, schemaCfg), nil + case config.StorageTypeAlibabaCloud: + c, err := alibaba.NewOssObjectClient(context.Background(), cfg.AlibabaStorageConfig) + if err != nil { + return nil, err + } + return client.NewClientWithMaxParallel(c, nil, cfg.MaxParallelGetChunk, schemaCfg), nil case config.StorageTypeBOS: c, err := NewObjectClient(name, cfg, clientMetrics) if err != nil { @@ -467,6 +481,16 @@ func NewObjectClient(name string, cfg Config, clientMetrics ClientMetrics) (clie } return aws.NewS3ObjectClient(s3Cfg, cfg.Hedging) + case config.StorageTypeAlibabaCloud: + ossCfg := cfg.AlibabaStorageConfig + if namedStore != "" { + var ok bool + ossCfg, ok = cfg.NamedStores.AlibabaCloud[namedStore] + if !ok { + return nil, fmt.Errorf("Unrecognized named alibabacloud oss storage config %s", name) + } + } + return alibaba.NewOssObjectClient(context.Background(), ossCfg) case config.StorageTypeGCS: gcsCfg := cfg.GCSConfig if namedStore != "" { diff --git a/tools/doc-generator/parse/root_blocks.go b/tools/doc-generator/parse/root_blocks.go index 7d656b4b23..1c3e2b269e 100644 --- a/tools/doc-generator/parse/root_blocks.go +++ b/tools/doc-generator/parse/root_blocks.go @@ -24,6 +24,7 @@ import ( "github.com/grafana/loki/pkg/scheduler" "github.com/grafana/loki/pkg/storage" "github.com/grafana/loki/pkg/storage/chunk/cache" + "github.com/grafana/loki/pkg/storage/chunk/client/alibaba" "github.com/grafana/loki/pkg/storage/chunk/client/aws" "github.com/grafana/loki/pkg/storage/chunk/client/azure" "github.com/grafana/loki/pkg/storage/chunk/client/baidubce" @@ -200,6 +201,11 @@ var ( StructType: reflect.TypeOf(azure.BlobStorageConfig{}), Desc: "The azure_storage_config block configures the connection to Azure object storage backend.", }, + { + Name: "alibabacloud_storage_config", + StructType: reflect.TypeOf(alibaba.OssConfig{}), + Desc: "The alibabacloud_storage_config block configures the connection to Alibaba Cloud Storage object storage backend.", + }, { Name: "gcs_storage_config", StructType: reflect.TypeOf(gcp.GCSConfig{}), diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/LICENSE b/vendor/github.com/aliyun/aliyun-oss-go-sdk/LICENSE new file mode 100644 index 0000000000..d46e9d128c --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/LICENSE @@ -0,0 +1,14 @@ +Copyright (c) 2015 aliyun.com + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the +Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/auth.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/auth.go new file mode 100644 index 0000000000..bc1e4fa3f6 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/auth.go @@ -0,0 +1,190 @@ +package oss + +import ( + "bytes" + "crypto/hmac" + "crypto/sha1" + "crypto/sha256" + "encoding/base64" + "fmt" + "hash" + "io" + "net/http" + "sort" + "strconv" + "strings" +) + +// headerSorter defines the key-value structure for storing the sorted data in signHeader. +type headerSorter struct { + Keys []string + Vals []string +} + +// getAdditionalHeaderKeys get exist key in http header +func (conn Conn) getAdditionalHeaderKeys(req *http.Request) ([]string, map[string]string) { + var keysList []string + keysMap := make(map[string]string) + srcKeys := make(map[string]string) + + for k := range req.Header { + srcKeys[strings.ToLower(k)] = "" + } + + for _, v := range conn.config.AdditionalHeaders { + if _, ok := srcKeys[strings.ToLower(v)]; ok { + keysMap[strings.ToLower(v)] = "" + } + } + + for k := range keysMap { + keysList = append(keysList, k) + } + sort.Strings(keysList) + return keysList, keysMap +} + +// signHeader signs the header and sets it as the authorization header. +func (conn Conn) signHeader(req *http.Request, canonicalizedResource string) { + akIf := conn.config.GetCredentials() + authorizationStr := "" + if conn.config.AuthVersion == AuthV2 { + additionalList, _ := conn.getAdditionalHeaderKeys(req) + if len(additionalList) > 0 { + authorizationFmt := "OSS2 AccessKeyId:%v,AdditionalHeaders:%v,Signature:%v" + additionnalHeadersStr := strings.Join(additionalList, ";") + authorizationStr = fmt.Sprintf(authorizationFmt, akIf.GetAccessKeyID(), additionnalHeadersStr, conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret())) + } else { + authorizationFmt := "OSS2 AccessKeyId:%v,Signature:%v" + authorizationStr = fmt.Sprintf(authorizationFmt, akIf.GetAccessKeyID(), conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret())) + } + } else { + // Get the final authorization string + authorizationStr = "OSS " + akIf.GetAccessKeyID() + ":" + conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret()) + } + + // Give the parameter "Authorization" value + req.Header.Set(HTTPHeaderAuthorization, authorizationStr) +} + +func (conn Conn) getSignedStr(req *http.Request, canonicalizedResource string, keySecret string) string { + // Find out the "x-oss-"'s address in header of the request + ossHeadersMap := make(map[string]string) + additionalList, additionalMap := conn.getAdditionalHeaderKeys(req) + for k, v := range req.Header { + if strings.HasPrefix(strings.ToLower(k), "x-oss-") { + ossHeadersMap[strings.ToLower(k)] = v[0] + } else if conn.config.AuthVersion == AuthV2 { + if _, ok := additionalMap[strings.ToLower(k)]; ok { + ossHeadersMap[strings.ToLower(k)] = v[0] + } + } + } + hs := newHeaderSorter(ossHeadersMap) + + // Sort the ossHeadersMap by the ascending order + hs.Sort() + + // Get the canonicalizedOSSHeaders + canonicalizedOSSHeaders := "" + for i := range hs.Keys { + canonicalizedOSSHeaders += hs.Keys[i] + ":" + hs.Vals[i] + "\n" + } + + // Give other parameters values + // when sign URL, date is expires + date := req.Header.Get(HTTPHeaderDate) + contentType := req.Header.Get(HTTPHeaderContentType) + contentMd5 := req.Header.Get(HTTPHeaderContentMD5) + + // default is v1 signature + signStr := req.Method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedOSSHeaders + canonicalizedResource + h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(keySecret)) + + // v2 signature + if conn.config.AuthVersion == AuthV2 { + signStr = req.Method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedOSSHeaders + strings.Join(additionalList, ";") + "\n" + canonicalizedResource + h = hmac.New(func() hash.Hash { return sha256.New() }, []byte(keySecret)) + } + + // convert sign to log for easy to view + if conn.config.LogLevel >= Debug { + var signBuf bytes.Buffer + for i := 0; i < len(signStr); i++ { + if signStr[i] != '\n' { + signBuf.WriteByte(signStr[i]) + } else { + signBuf.WriteString("\\n") + } + } + conn.config.WriteLog(Debug, "[Req:%p]signStr:%s\n", req, signBuf.String()) + } + + io.WriteString(h, signStr) + signedStr := base64.StdEncoding.EncodeToString(h.Sum(nil)) + + return signedStr +} + +func (conn Conn) getRtmpSignedStr(bucketName, channelName, playlistName string, expiration int64, keySecret string, params map[string]interface{}) string { + if params[HTTPParamAccessKeyID] == nil { + return "" + } + + canonResource := fmt.Sprintf("/%s/%s", bucketName, channelName) + canonParamsKeys := []string{} + for key := range params { + if key != HTTPParamAccessKeyID && key != HTTPParamSignature && key != HTTPParamExpires && key != HTTPParamSecurityToken { + canonParamsKeys = append(canonParamsKeys, key) + } + } + + sort.Strings(canonParamsKeys) + canonParamsStr := "" + for _, key := range canonParamsKeys { + canonParamsStr = fmt.Sprintf("%s%s:%s\n", canonParamsStr, key, params[key].(string)) + } + + expireStr := strconv.FormatInt(expiration, 10) + signStr := expireStr + "\n" + canonParamsStr + canonResource + + h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(keySecret)) + io.WriteString(h, signStr) + signedStr := base64.StdEncoding.EncodeToString(h.Sum(nil)) + return signedStr +} + +// newHeaderSorter is an additional function for function SignHeader. +func newHeaderSorter(m map[string]string) *headerSorter { + hs := &headerSorter{ + Keys: make([]string, 0, len(m)), + Vals: make([]string, 0, len(m)), + } + + for k, v := range m { + hs.Keys = append(hs.Keys, k) + hs.Vals = append(hs.Vals, v) + } + return hs +} + +// Sort is an additional function for function SignHeader. +func (hs *headerSorter) Sort() { + sort.Sort(hs) +} + +// Len is an additional function for function SignHeader. +func (hs *headerSorter) Len() int { + return len(hs.Vals) +} + +// Less is an additional function for function SignHeader. +func (hs *headerSorter) Less(i, j int) bool { + return bytes.Compare([]byte(hs.Keys[i]), []byte(hs.Keys[j])) < 0 +} + +// Swap is an additional function for function SignHeader. +func (hs *headerSorter) Swap(i, j int) { + hs.Vals[i], hs.Vals[j] = hs.Vals[j], hs.Vals[i] + hs.Keys[i], hs.Keys[j] = hs.Keys[j], hs.Keys[i] +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/bucket.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/bucket.go new file mode 100644 index 0000000000..430252c02f --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/bucket.go @@ -0,0 +1,1289 @@ +package oss + +import ( + "bytes" + "crypto/md5" + "encoding/base64" + "encoding/xml" + "fmt" + "hash" + "hash/crc64" + "io" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" +) + +// Bucket implements the operations of object. +type Bucket struct { + Client Client + BucketName string +} + +// PutObject creates a new object and it will overwrite the original one if it exists already. +// +// objectKey the object key in UTF-8 encoding. The length must be between 1 and 1023, and cannot start with "/" or "\". +// reader io.Reader instance for reading the data for uploading +// options the options for uploading the object. The valid options here are CacheControl, ContentDisposition, ContentEncoding +// Expires, ServerSideEncryption, ObjectACL and Meta. Refer to the link below for more details. +// https://help.aliyun.com/document_detail/oss/api-reference/object/PutObject.html +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) PutObject(objectKey string, reader io.Reader, options ...Option) error { + opts := AddContentType(options, objectKey) + + request := &PutObjectRequest{ + ObjectKey: objectKey, + Reader: reader, + } + resp, err := bucket.DoPutObject(request, opts) + if err != nil { + return err + } + defer resp.Body.Close() + + return err +} + +// PutObjectFromFile creates a new object from the local file. +// +// objectKey object key. +// filePath the local file path to upload. +// options the options for uploading the object. Refer to the parameter options in PutObject for more details. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) PutObjectFromFile(objectKey, filePath string, options ...Option) error { + fd, err := os.Open(filePath) + if err != nil { + return err + } + defer fd.Close() + + opts := AddContentType(options, filePath, objectKey) + + request := &PutObjectRequest{ + ObjectKey: objectKey, + Reader: fd, + } + resp, err := bucket.DoPutObject(request, opts) + if err != nil { + return err + } + defer resp.Body.Close() + + return err +} + +// DoPutObject does the actual upload work. +// +// request the request instance for uploading an object. +// options the options for uploading an object. +// +// Response the response from OSS. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DoPutObject(request *PutObjectRequest, options []Option) (*Response, error) { + isOptSet, _, _ := IsOptionSet(options, HTTPHeaderContentType) + if !isOptSet { + options = AddContentType(options, request.ObjectKey) + } + + listener := GetProgressListener(options) + + params := map[string]interface{}{} + resp, err := bucket.do("PUT", request.ObjectKey, params, options, request.Reader, listener) + if err != nil { + return nil, err + } + + if bucket.GetConfig().IsEnableCRC { + err = CheckCRC(resp, "DoPutObject") + if err != nil { + return resp, err + } + } + + err = CheckRespCode(resp.StatusCode, []int{http.StatusOK}) + + return resp, err +} + +// GetObject downloads the object. +// +// objectKey the object key. +// options the options for downloading the object. The valid values are: Range, IfModifiedSince, IfUnmodifiedSince, IfMatch, +// IfNoneMatch, AcceptEncoding. For more details, please check out: +// https://help.aliyun.com/document_detail/oss/api-reference/object/GetObject.html +// +// io.ReadCloser reader instance for reading data from response. It must be called close() after the usage and only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) GetObject(objectKey string, options ...Option) (io.ReadCloser, error) { + result, err := bucket.DoGetObject(&GetObjectRequest{objectKey}, options) + if err != nil { + return nil, err + } + + return result.Response, nil +} + +// GetObjectToFile downloads the data to a local file. +// +// objectKey the object key to download. +// filePath the local file to store the object data. +// options the options for downloading the object. Refer to the parameter options in method GetObject for more details. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) GetObjectToFile(objectKey, filePath string, options ...Option) error { + tempFilePath := filePath + TempFileSuffix + + // Calls the API to actually download the object. Returns the result instance. + result, err := bucket.DoGetObject(&GetObjectRequest{objectKey}, options) + if err != nil { + return err + } + defer result.Response.Close() + + // If the local file does not exist, create a new one. If it exists, overwrite it. + fd, err := os.OpenFile(tempFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, FilePermMode) + if err != nil { + return err + } + + // Copy the data to the local file path. + _, err = io.Copy(fd, result.Response.Body) + fd.Close() + if err != nil { + return err + } + + // Compares the CRC value + hasRange, _, _ := IsOptionSet(options, HTTPHeaderRange) + encodeOpt, _ := FindOption(options, HTTPHeaderAcceptEncoding, nil) + acceptEncoding := "" + if encodeOpt != nil { + acceptEncoding = encodeOpt.(string) + } + if bucket.GetConfig().IsEnableCRC && !hasRange && acceptEncoding != "gzip" { + result.Response.ClientCRC = result.ClientCRC.Sum64() + err = CheckCRC(result.Response, "GetObjectToFile") + if err != nil { + os.Remove(tempFilePath) + return err + } + } + + return os.Rename(tempFilePath, filePath) +} + +// DoGetObject is the actual API that gets the object. It's the internal function called by other public APIs. +// +// request the request to download the object. +// options the options for downloading the file. Checks out the parameter options in method GetObject. +// +// GetObjectResult the result instance of getting the object. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DoGetObject(request *GetObjectRequest, options []Option) (*GetObjectResult, error) { + params, _ := GetRawParams(options) + resp, err := bucket.do("GET", request.ObjectKey, params, options, nil, nil) + if err != nil { + return nil, err + } + + result := &GetObjectResult{ + Response: resp, + } + + // CRC + var crcCalc hash.Hash64 + hasRange, _, _ := IsOptionSet(options, HTTPHeaderRange) + if bucket.GetConfig().IsEnableCRC && !hasRange { + crcCalc = crc64.New(CrcTable()) + result.ServerCRC = resp.ServerCRC + result.ClientCRC = crcCalc + } + + // Progress + listener := GetProgressListener(options) + + contentLen, _ := strconv.ParseInt(resp.Headers.Get(HTTPHeaderContentLength), 10, 64) + resp.Body = TeeReader(resp.Body, crcCalc, contentLen, listener, nil) + + return result, nil +} + +// CopyObject copies the object inside the bucket. +// +// srcObjectKey the source object to copy. +// destObjectKey the target object to copy. +// options options for copying an object. You can specify the conditions of copy. The valid conditions are CopySourceIfMatch, +// CopySourceIfNoneMatch, CopySourceIfModifiedSince, CopySourceIfUnmodifiedSince, MetadataDirective. +// Also you can specify the target object's attributes, such as CacheControl, ContentDisposition, ContentEncoding, Expires, +// ServerSideEncryption, ObjectACL, Meta. Refer to the link below for more details : +// https://help.aliyun.com/document_detail/oss/api-reference/object/CopyObject.html +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) CopyObject(srcObjectKey, destObjectKey string, options ...Option) (CopyObjectResult, error) { + var out CopyObjectResult + + //first find version id + versionIdKey := "versionId" + versionId, _ := FindOption(options, versionIdKey, nil) + if versionId == nil { + options = append(options, CopySource(bucket.BucketName, url.QueryEscape(srcObjectKey))) + } else { + options = DeleteOption(options, versionIdKey) + options = append(options, CopySourceVersion(bucket.BucketName, url.QueryEscape(srcObjectKey), versionId.(string))) + } + + params := map[string]interface{}{} + resp, err := bucket.do("PUT", destObjectKey, params, options, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// CopyObjectTo copies the object to another bucket. +// +// srcObjectKey source object key. The source bucket is Bucket.BucketName . +// destBucketName target bucket name. +// destObjectKey target object name. +// options copy options, check out parameter options in function CopyObject for more details. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) CopyObjectTo(destBucketName, destObjectKey, srcObjectKey string, options ...Option) (CopyObjectResult, error) { + return bucket.copy(srcObjectKey, destBucketName, destObjectKey, options...) +} + +// +// CopyObjectFrom copies the object to another bucket. +// +// srcBucketName source bucket name. +// srcObjectKey source object name. +// destObjectKey target object name. The target bucket name is Bucket.BucketName. +// options copy options. Check out parameter options in function CopyObject. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) CopyObjectFrom(srcBucketName, srcObjectKey, destObjectKey string, options ...Option) (CopyObjectResult, error) { + destBucketName := bucket.BucketName + var out CopyObjectResult + srcBucket, err := bucket.Client.Bucket(srcBucketName) + if err != nil { + return out, err + } + + return srcBucket.copy(srcObjectKey, destBucketName, destObjectKey, options...) +} + +func (bucket Bucket) copy(srcObjectKey, destBucketName, destObjectKey string, options ...Option) (CopyObjectResult, error) { + var out CopyObjectResult + + //first find version id + versionIdKey := "versionId" + versionId, _ := FindOption(options, versionIdKey, nil) + if versionId == nil { + options = append(options, CopySource(bucket.BucketName, url.QueryEscape(srcObjectKey))) + } else { + options = DeleteOption(options, versionIdKey) + options = append(options, CopySourceVersion(bucket.BucketName, url.QueryEscape(srcObjectKey), versionId.(string))) + } + + headers := make(map[string]string) + err := handleOptions(headers, options) + if err != nil { + return out, err + } + params := map[string]interface{}{} + resp, err := bucket.Client.Conn.Do("PUT", destBucketName, destObjectKey, params, headers, nil, 0, nil) + + // get response header + respHeader, _ := FindOption(options, responseHeader, nil) + if respHeader != nil { + pRespHeader := respHeader.(*http.Header) + *pRespHeader = resp.Headers + } + + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// AppendObject uploads the data in the way of appending an existing or new object. +// +// AppendObject the parameter appendPosition specifies which postion (in the target object) to append. For the first append (to a non-existing file), +// the appendPosition should be 0. The appendPosition in the subsequent calls will be the current object length. +// For example, the first appendObject's appendPosition is 0 and it uploaded 65536 bytes data, then the second call's position is 65536. +// The response header x-oss-next-append-position after each successful request also specifies the next call's append position (so the caller need not to maintain this information). +// +// objectKey the target object to append to. +// reader io.Reader. The read instance for reading the data to append. +// appendPosition the start position to append. +// destObjectProperties the options for the first appending, such as CacheControl, ContentDisposition, ContentEncoding, +// Expires, ServerSideEncryption, ObjectACL. +// +// int64 the next append position, it's valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) AppendObject(objectKey string, reader io.Reader, appendPosition int64, options ...Option) (int64, error) { + request := &AppendObjectRequest{ + ObjectKey: objectKey, + Reader: reader, + Position: appendPosition, + } + + result, err := bucket.DoAppendObject(request, options) + if err != nil { + return appendPosition, err + } + + return result.NextPosition, err +} + +// DoAppendObject is the actual API that does the object append. +// +// request the request object for appending object. +// options the options for appending object. +// +// AppendObjectResult the result object for appending object. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DoAppendObject(request *AppendObjectRequest, options []Option) (*AppendObjectResult, error) { + params := map[string]interface{}{} + params["append"] = nil + params["position"] = strconv.FormatInt(request.Position, 10) + headers := make(map[string]string) + + opts := AddContentType(options, request.ObjectKey) + handleOptions(headers, opts) + + var initCRC uint64 + isCRCSet, initCRCOpt, _ := IsOptionSet(options, initCRC64) + if isCRCSet { + initCRC = initCRCOpt.(uint64) + } + + listener := GetProgressListener(options) + + handleOptions(headers, opts) + resp, err := bucket.Client.Conn.Do("POST", bucket.BucketName, request.ObjectKey, params, headers, + request.Reader, initCRC, listener) + + // get response header + respHeader, _ := FindOption(options, responseHeader, nil) + if respHeader != nil { + pRespHeader := respHeader.(*http.Header) + *pRespHeader = resp.Headers + } + + if err != nil { + return nil, err + } + defer resp.Body.Close() + + nextPosition, _ := strconv.ParseInt(resp.Headers.Get(HTTPHeaderOssNextAppendPosition), 10, 64) + result := &AppendObjectResult{ + NextPosition: nextPosition, + CRC: resp.ServerCRC, + } + + if bucket.GetConfig().IsEnableCRC && isCRCSet { + err = CheckCRC(resp, "AppendObject") + if err != nil { + return result, err + } + } + + return result, nil +} + +// DeleteObject deletes the object. +// +// objectKey the object key to delete. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DeleteObject(objectKey string, options ...Option) error { + params, _ := GetRawParams(options) + resp, err := bucket.do("DELETE", objectKey, params, options, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// DeleteObjects deletes multiple objects. +// +// objectKeys the object keys to delete. +// options the options for deleting objects. +// Supported option is DeleteObjectsQuiet which means it will not return error even deletion failed (not recommended). By default it's not used. +// +// DeleteObjectsResult the result object. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DeleteObjects(objectKeys []string, options ...Option) (DeleteObjectsResult, error) { + out := DeleteObjectsResult{} + dxml := deleteXML{} + for _, key := range objectKeys { + dxml.Objects = append(dxml.Objects, DeleteObject{Key: key}) + } + + isQuiet, _ := FindOption(options, deleteObjectsQuiet, false) + dxml.Quiet = isQuiet.(bool) + + bs, err := xml.Marshal(dxml) + if err != nil { + return out, err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + options = append(options, ContentType(contentType)) + sum := md5.Sum(bs) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + options = append(options, ContentMD5(b64)) + + params := map[string]interface{}{} + params["delete"] = nil + params["encoding-type"] = "url" + + resp, err := bucket.do("POST", "", params, options, buffer, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + deletedResult := DeleteObjectVersionsResult{} + if !dxml.Quiet { + if err = xmlUnmarshal(resp.Body, &deletedResult); err == nil { + err = decodeDeleteObjectsResult(&deletedResult) + } + } + + // Keep compatibility:need convert to struct DeleteObjectsResult + out.XMLName = deletedResult.XMLName + for _, v := range deletedResult.DeletedObjectsDetail { + out.DeletedObjects = append(out.DeletedObjects, v.Key) + } + + return out, err +} + +// DeleteObjectVersions deletes multiple object versions. +// +// objectVersions the object keys and versions to delete. +// options the options for deleting objects. +// Supported option is DeleteObjectsQuiet which means it will not return error even deletion failed (not recommended). By default it's not used. +// +// DeleteObjectVersionsResult the result object. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DeleteObjectVersions(objectVersions []DeleteObject, options ...Option) (DeleteObjectVersionsResult, error) { + out := DeleteObjectVersionsResult{} + dxml := deleteXML{} + dxml.Objects = objectVersions + + isQuiet, _ := FindOption(options, deleteObjectsQuiet, false) + dxml.Quiet = isQuiet.(bool) + + bs, err := xml.Marshal(dxml) + if err != nil { + return out, err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + options = append(options, ContentType(contentType)) + sum := md5.Sum(bs) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + options = append(options, ContentMD5(b64)) + + params := map[string]interface{}{} + params["delete"] = nil + params["encoding-type"] = "url" + + resp, err := bucket.do("POST", "", params, options, buffer, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + if !dxml.Quiet { + if err = xmlUnmarshal(resp.Body, &out); err == nil { + err = decodeDeleteObjectsResult(&out) + } + } + return out, err +} + +// IsObjectExist checks if the object exists. +// +// bool flag of object's existence (true:exists; false:non-exist) when error is nil. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) IsObjectExist(objectKey string, options ...Option) (bool, error) { + _, err := bucket.GetObjectMeta(objectKey, options...) + if err == nil { + return true, nil + } + + switch err.(type) { + case ServiceError: + if err.(ServiceError).StatusCode == 404 { + return false, nil + } + } + + return false, err +} + +// ListObjects lists the objects under the current bucket. +// +// options it contains all the filters for listing objects. +// It could specify a prefix filter on object keys, the max keys count to return and the object key marker and the delimiter for grouping object names. +// The key marker means the returned objects' key must be greater than it in lexicographic order. +// +// For example, if the bucket has 8 objects, my-object-1, my-object-11, my-object-2, my-object-21, +// my-object-22, my-object-3, my-object-31, my-object-32. If the prefix is my-object-2 (no other filters), then it returns +// my-object-2, my-object-21, my-object-22 three objects. If the marker is my-object-22 (no other filters), then it returns +// my-object-3, my-object-31, my-object-32 three objects. If the max keys is 5, then it returns 5 objects. +// The three filters could be used together to achieve filter and paging functionality. +// If the prefix is the folder name, then it could list all files under this folder (including the files under its subfolders). +// But if the delimiter is specified with '/', then it only returns that folder's files (no subfolder's files). The direct subfolders are in the commonPrefixes properties. +// For example, if the bucket has three objects fun/test.jpg, fun/movie/001.avi, fun/movie/007.avi. And if the prefix is "fun/", then it returns all three objects. +// But if the delimiter is '/', then only "fun/test.jpg" is returned as files and fun/movie/ is returned as common prefix. +// +// For common usage scenario, check out sample/list_object.go. +// +// ListObjectsResult the return value after operation succeeds (only valid when error is nil). +// +func (bucket Bucket) ListObjects(options ...Option) (ListObjectsResult, error) { + var out ListObjectsResult + + options = append(options, EncodingType("url")) + params, err := GetRawParams(options) + if err != nil { + return out, err + } + + resp, err := bucket.do("GET", "", params, options, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + if err != nil { + return out, err + } + + err = decodeListObjectsResult(&out) + return out, err +} + +// Recommend to use ListObjectsV2 to replace ListObjects +// ListOListObjectsV2bjects lists the objects under the current bucket. +// ListObjectsResultV2 the return value after operation succeeds (only valid when error is nil). +func (bucket Bucket) ListObjectsV2(options ...Option) (ListObjectsResultV2, error) { + var out ListObjectsResultV2 + + options = append(options, EncodingType("url")) + options = append(options, ListType(2)) + params, err := GetRawParams(options) + if err != nil { + return out, err + } + + resp, err := bucket.do("GET", "", params, options, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + if err != nil { + return out, err + } + + err = decodeListObjectsResultV2(&out) + return out, err +} + +// ListObjectVersions lists objects of all versions under the current bucket. +func (bucket Bucket) ListObjectVersions(options ...Option) (ListObjectVersionsResult, error) { + var out ListObjectVersionsResult + + options = append(options, EncodingType("url")) + params, err := GetRawParams(options) + if err != nil { + return out, err + } + params["versions"] = nil + + resp, err := bucket.do("GET", "", params, options, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + if err != nil { + return out, err + } + + err = decodeListObjectVersionsResult(&out) + return out, err +} + +// SetObjectMeta sets the metadata of the Object. +// +// objectKey object +// options options for setting the metadata. The valid options are CacheControl, ContentDisposition, ContentEncoding, Expires, +// ServerSideEncryption, and custom metadata. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) SetObjectMeta(objectKey string, options ...Option) error { + options = append(options, MetadataDirective(MetaReplace)) + _, err := bucket.CopyObject(objectKey, objectKey, options...) + return err +} + +// GetObjectDetailedMeta gets the object's detailed metadata +// +// objectKey object key. +// options the constraints of the object. Only when the object meets the requirements this method will return the metadata. Otherwise returns error. Valid options are IfModifiedSince, IfUnmodifiedSince, +// IfMatch, IfNoneMatch. For more details check out https://help.aliyun.com/document_detail/oss/api-reference/object/HeadObject.html +// +// http.Header object meta when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) GetObjectDetailedMeta(objectKey string, options ...Option) (http.Header, error) { + params, _ := GetRawParams(options) + resp, err := bucket.do("HEAD", objectKey, params, options, nil, nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return resp.Headers, nil +} + +// GetObjectMeta gets object metadata. +// +// GetObjectMeta is more lightweight than GetObjectDetailedMeta as it only returns basic metadata including ETag +// size, LastModified. The size information is in the HTTP header Content-Length. +// +// objectKey object key +// +// http.Header the object's metadata, valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) GetObjectMeta(objectKey string, options ...Option) (http.Header, error) { + params, _ := GetRawParams(options) + params["objectMeta"] = nil + //resp, err := bucket.do("GET", objectKey, "?objectMeta", "", nil, nil, nil) + resp, err := bucket.do("HEAD", objectKey, params, options, nil, nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return resp.Headers, nil +} + +// SetObjectACL updates the object's ACL. +// +// Only the bucket's owner could update object's ACL which priority is higher than bucket's ACL. +// For example, if the bucket ACL is private and object's ACL is public-read-write. +// Then object's ACL is used and it means all users could read or write that object. +// When the object's ACL is not set, then bucket's ACL is used as the object's ACL. +// +// Object read operations include GetObject, HeadObject, CopyObject and UploadPartCopy on the source object; +// Object write operations include PutObject, PostObject, AppendObject, DeleteObject, DeleteMultipleObjects, +// CompleteMultipartUpload and CopyObject on target object. +// +// objectKey the target object key (to set the ACL on) +// objectAcl object ACL. Valid options are PrivateACL, PublicReadACL, PublicReadWriteACL. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) SetObjectACL(objectKey string, objectACL ACLType, options ...Option) error { + options = append(options, ObjectACL(objectACL)) + params, _ := GetRawParams(options) + params["acl"] = nil + resp, err := bucket.do("PUT", objectKey, params, options, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetObjectACL gets object's ACL +// +// objectKey the object to get ACL from. +// +// GetObjectACLResult the result object when error is nil. GetObjectACLResult.Acl is the object ACL. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) GetObjectACL(objectKey string, options ...Option) (GetObjectACLResult, error) { + var out GetObjectACLResult + params, _ := GetRawParams(options) + params["acl"] = nil + resp, err := bucket.do("GET", objectKey, params, options, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// PutSymlink creates a symlink (to point to an existing object) +// +// Symlink cannot point to another symlink. +// When creating a symlink, it does not check the existence of the target file, and does not check if the target file is symlink. +// Neither it checks the caller's permission on the target file. All these checks are deferred to the actual GetObject call via this symlink. +// If trying to add an existing file, as long as the caller has the write permission, the existing one will be overwritten. +// If the x-oss-meta- is specified, it will be added as the metadata of the symlink file. +// +// symObjectKey the symlink object's key. +// targetObjectKey the target object key to point to. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) PutSymlink(symObjectKey string, targetObjectKey string, options ...Option) error { + options = append(options, symlinkTarget(url.QueryEscape(targetObjectKey))) + params, _ := GetRawParams(options) + params["symlink"] = nil + resp, err := bucket.do("PUT", symObjectKey, params, options, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetSymlink gets the symlink object with the specified key. +// If the symlink object does not exist, returns 404. +// +// objectKey the symlink object's key. +// +// error it's nil if no error, otherwise it's an error object. +// When error is nil, the target file key is in the X-Oss-Symlink-Target header of the returned object. +// +func (bucket Bucket) GetSymlink(objectKey string, options ...Option) (http.Header, error) { + params, _ := GetRawParams(options) + params["symlink"] = nil + resp, err := bucket.do("GET", objectKey, params, options, nil, nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + targetObjectKey := resp.Headers.Get(HTTPHeaderOssSymlinkTarget) + targetObjectKey, err = url.QueryUnescape(targetObjectKey) + if err != nil { + return resp.Headers, err + } + resp.Headers.Set(HTTPHeaderOssSymlinkTarget, targetObjectKey) + return resp.Headers, err +} + +// RestoreObject restores the object from the archive storage. +// +// An archive object is in cold status by default and it cannot be accessed. +// When restore is called on the cold object, it will become available for access after some time. +// If multiple restores are called on the same file when the object is being restored, server side does nothing for additional calls but returns success. +// By default, the restored object is available for access for one day. After that it will be unavailable again. +// But if another RestoreObject are called after the file is restored, then it will extend one day's access time of that object, up to 7 days. +// +// objectKey object key to restore. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) RestoreObject(objectKey string, options ...Option) error { + params, _ := GetRawParams(options) + params["restore"] = nil + resp, err := bucket.do("POST", objectKey, params, options, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK, http.StatusAccepted}) +} + +// RestoreObjectDetail support more features than RestoreObject +func (bucket Bucket) RestoreObjectDetail(objectKey string, restoreConfig RestoreConfiguration, options ...Option) error { + if restoreConfig.Tier == "" { + // Expedited, Standard, Bulk + restoreConfig.Tier = string(RestoreStandard) + } + + if restoreConfig.Days == 0 { + restoreConfig.Days = 1 + } + + bs, err := xml.Marshal(restoreConfig) + if err != nil { + return err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + options = append(options, ContentType(contentType)) + + params, _ := GetRawParams(options) + params["restore"] = nil + + resp, err := bucket.do("POST", objectKey, params, options, buffer, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK, http.StatusAccepted}) +} + +// RestoreObjectXML support more features than RestoreObject +func (bucket Bucket) RestoreObjectXML(objectKey, configXML string, options ...Option) error { + buffer := new(bytes.Buffer) + buffer.Write([]byte(configXML)) + + contentType := http.DetectContentType(buffer.Bytes()) + options = append(options, ContentType(contentType)) + + params, _ := GetRawParams(options) + params["restore"] = nil + + resp, err := bucket.do("POST", objectKey, params, options, buffer, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK, http.StatusAccepted}) +} + +// SignURL signs the URL. Users could access the object directly with this URL without getting the AK. +// +// objectKey the target object to sign. +// signURLConfig the configuration for the signed URL +// +// string returns the signed URL, when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) SignURL(objectKey string, method HTTPMethod, expiredInSec int64, options ...Option) (string, error) { + if expiredInSec < 0 { + return "", fmt.Errorf("invalid expires: %d, expires must bigger than 0", expiredInSec) + } + expiration := time.Now().Unix() + expiredInSec + + params, err := GetRawParams(options) + if err != nil { + return "", err + } + + headers := make(map[string]string) + err = handleOptions(headers, options) + if err != nil { + return "", err + } + + return bucket.Client.Conn.signURL(method, bucket.BucketName, objectKey, expiration, params, headers), nil +} + +// PutObjectWithURL uploads an object with the URL. If the object exists, it will be overwritten. +// PutObjectWithURL It will not generate minetype according to the key name. +// +// signedURL signed URL. +// reader io.Reader the read instance for reading the data for the upload. +// options the options for uploading the data. The valid options are CacheControl, ContentDisposition, ContentEncoding, +// Expires, ServerSideEncryption, ObjectACL and custom metadata. Check out the following link for details: +// https://help.aliyun.com/document_detail/oss/api-reference/object/PutObject.html +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) PutObjectWithURL(signedURL string, reader io.Reader, options ...Option) error { + resp, err := bucket.DoPutObjectWithURL(signedURL, reader, options) + if err != nil { + return err + } + defer resp.Body.Close() + + return err +} + +// PutObjectFromFileWithURL uploads an object from a local file with the signed URL. +// PutObjectFromFileWithURL It does not generate mimetype according to object key's name or the local file name. +// +// signedURL the signed URL. +// filePath local file path, such as dirfile.txt, for uploading. +// options options for uploading, same as the options in PutObject function. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) PutObjectFromFileWithURL(signedURL, filePath string, options ...Option) error { + fd, err := os.Open(filePath) + if err != nil { + return err + } + defer fd.Close() + + resp, err := bucket.DoPutObjectWithURL(signedURL, fd, options) + if err != nil { + return err + } + defer resp.Body.Close() + + return err +} + +// DoPutObjectWithURL is the actual API that does the upload with URL work(internal for SDK) +// +// signedURL the signed URL. +// reader io.Reader the read instance for getting the data to upload. +// options options for uploading. +// +// Response the response object which contains the HTTP response. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DoPutObjectWithURL(signedURL string, reader io.Reader, options []Option) (*Response, error) { + listener := GetProgressListener(options) + + params := map[string]interface{}{} + resp, err := bucket.doURL("PUT", signedURL, params, options, reader, listener) + if err != nil { + return nil, err + } + + if bucket.GetConfig().IsEnableCRC { + err = CheckCRC(resp, "DoPutObjectWithURL") + if err != nil { + return resp, err + } + } + + err = CheckRespCode(resp.StatusCode, []int{http.StatusOK}) + + return resp, err +} + +// GetObjectWithURL downloads the object and returns the reader instance, with the signed URL. +// +// signedURL the signed URL. +// options options for downloading the object. Valid options are IfModifiedSince, IfUnmodifiedSince, IfMatch, +// IfNoneMatch, AcceptEncoding. For more information, check out the following link: +// https://help.aliyun.com/document_detail/oss/api-reference/object/GetObject.html +// +// io.ReadCloser the reader object for getting the data from response. It needs be closed after the usage. It's only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) GetObjectWithURL(signedURL string, options ...Option) (io.ReadCloser, error) { + result, err := bucket.DoGetObjectWithURL(signedURL, options) + if err != nil { + return nil, err + } + return result.Response, nil +} + +// GetObjectToFileWithURL downloads the object into a local file with the signed URL. +// +// signedURL the signed URL +// filePath the local file path to download to. +// options the options for downloading object. Check out the parameter options in function GetObject for the reference. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) GetObjectToFileWithURL(signedURL, filePath string, options ...Option) error { + tempFilePath := filePath + TempFileSuffix + + // Get the object's content + result, err := bucket.DoGetObjectWithURL(signedURL, options) + if err != nil { + return err + } + defer result.Response.Close() + + // If the file does not exist, create one. If exists, then overwrite it. + fd, err := os.OpenFile(tempFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, FilePermMode) + if err != nil { + return err + } + + // Save the data to the file. + _, err = io.Copy(fd, result.Response.Body) + fd.Close() + if err != nil { + return err + } + + // Compare the CRC value. If CRC values do not match, return error. + hasRange, _, _ := IsOptionSet(options, HTTPHeaderRange) + encodeOpt, _ := FindOption(options, HTTPHeaderAcceptEncoding, nil) + acceptEncoding := "" + if encodeOpt != nil { + acceptEncoding = encodeOpt.(string) + } + + if bucket.GetConfig().IsEnableCRC && !hasRange && acceptEncoding != "gzip" { + result.Response.ClientCRC = result.ClientCRC.Sum64() + err = CheckCRC(result.Response, "GetObjectToFileWithURL") + if err != nil { + os.Remove(tempFilePath) + return err + } + } + + return os.Rename(tempFilePath, filePath) +} + +// DoGetObjectWithURL is the actual API that downloads the file with the signed URL. +// +// signedURL the signed URL. +// options the options for getting object. Check out parameter options in GetObject for the reference. +// +// GetObjectResult the result object when the error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DoGetObjectWithURL(signedURL string, options []Option) (*GetObjectResult, error) { + params, _ := GetRawParams(options) + resp, err := bucket.doURL("GET", signedURL, params, options, nil, nil) + if err != nil { + return nil, err + } + + result := &GetObjectResult{ + Response: resp, + } + + // CRC + var crcCalc hash.Hash64 + hasRange, _, _ := IsOptionSet(options, HTTPHeaderRange) + if bucket.GetConfig().IsEnableCRC && !hasRange { + crcCalc = crc64.New(CrcTable()) + result.ServerCRC = resp.ServerCRC + result.ClientCRC = crcCalc + } + + // Progress + listener := GetProgressListener(options) + + contentLen, _ := strconv.ParseInt(resp.Headers.Get(HTTPHeaderContentLength), 10, 64) + resp.Body = TeeReader(resp.Body, crcCalc, contentLen, listener, nil) + + return result, nil +} + +// +// ProcessObject apply process on the specified image file. +// +// The supported process includes resize, rotate, crop, watermark, format, +// udf, customized style, etc. +// +// +// objectKey object key to process. +// process process string, such as "image/resize,w_100|sys/saveas,o_dGVzdC5qcGc,b_dGVzdA" +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) ProcessObject(objectKey string, process string, options ...Option) (ProcessObjectResult, error) { + var out ProcessObjectResult + params, _ := GetRawParams(options) + params["x-oss-process"] = nil + processData := fmt.Sprintf("%v=%v", "x-oss-process", process) + data := strings.NewReader(processData) + resp, err := bucket.do("POST", objectKey, params, nil, data, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = jsonUnmarshal(resp.Body, &out) + return out, err +} + +// +// PutObjectTagging add tagging to object +// +// objectKey object key to add tagging +// tagging tagging to be added +// +// error nil if success, otherwise error +// +func (bucket Bucket) PutObjectTagging(objectKey string, tagging Tagging, options ...Option) error { + bs, err := xml.Marshal(tagging) + if err != nil { + return err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + params, _ := GetRawParams(options) + params["tagging"] = nil + resp, err := bucket.do("PUT", objectKey, params, options, buffer, nil) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +// +// GetObjectTagging get tagging of the object +// +// objectKey object key to get tagging +// +// Tagging +// error nil if success, otherwise error + +func (bucket Bucket) GetObjectTagging(objectKey string, options ...Option) (GetObjectTaggingResult, error) { + var out GetObjectTaggingResult + params, _ := GetRawParams(options) + params["tagging"] = nil + + resp, err := bucket.do("GET", objectKey, params, options, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// DeleteObjectTagging delete object taggging +// +// objectKey object key to delete tagging +// +// error nil if success, otherwise error +// +func (bucket Bucket) DeleteObjectTagging(objectKey string, options ...Option) error { + params, _ := GetRawParams(options) + params["tagging"] = nil + + if objectKey == "" { + return fmt.Errorf("invalid argument: object name is empty") + } + + resp, err := bucket.do("DELETE", objectKey, params, options, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +func (bucket Bucket) OptionsMethod(objectKey string, options ...Option) (http.Header, error) { + var out http.Header + resp, err := bucket.do("OPTIONS", objectKey, nil, options, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + out = resp.Headers + return out, nil +} + +// public +func (bucket Bucket) Do(method, objectName string, params map[string]interface{}, options []Option, + data io.Reader, listener ProgressListener) (*Response, error) { + return bucket.do(method, objectName, params, options, data, listener) +} + +// Private +func (bucket Bucket) do(method, objectName string, params map[string]interface{}, options []Option, + data io.Reader, listener ProgressListener) (*Response, error) { + headers := make(map[string]string) + err := handleOptions(headers, options) + if err != nil { + return nil, err + } + + err = CheckBucketName(bucket.BucketName) + if len(bucket.BucketName) > 0 && err != nil { + return nil, err + } + + resp, err := bucket.Client.Conn.Do(method, bucket.BucketName, objectName, + params, headers, data, 0, listener) + + // get response header + respHeader, _ := FindOption(options, responseHeader, nil) + if respHeader != nil && resp != nil { + pRespHeader := respHeader.(*http.Header) + *pRespHeader = resp.Headers + } + + return resp, err +} + +func (bucket Bucket) doURL(method HTTPMethod, signedURL string, params map[string]interface{}, options []Option, + data io.Reader, listener ProgressListener) (*Response, error) { + headers := make(map[string]string) + err := handleOptions(headers, options) + if err != nil { + return nil, err + } + + resp, err := bucket.Client.Conn.DoURL(method, signedURL, headers, data, 0, listener) + + // get response header + respHeader, _ := FindOption(options, responseHeader, nil) + if respHeader != nil { + pRespHeader := respHeader.(*http.Header) + *pRespHeader = resp.Headers + } + + return resp, err +} + +func (bucket Bucket) GetConfig() *Config { + return bucket.Client.Config +} + +func AddContentType(options []Option, keys ...string) []Option { + typ := TypeByExtension("") + for _, key := range keys { + typ = TypeByExtension(key) + if typ != "" { + break + } + } + + if typ == "" { + typ = "application/octet-stream" + } + + opts := []Option{ContentType(typ)} + opts = append(opts, options...) + + return opts +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/client.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/client.go new file mode 100644 index 0000000000..d725a40ebc --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/client.go @@ -0,0 +1,2056 @@ +// Package oss implements functions for access oss service. +// It has two main struct Client and Bucket. +package oss + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "strings" + "time" +) + +// Client SDK's entry point. It's for bucket related options such as create/delete/set bucket (such as set/get ACL/lifecycle/referer/logging/website). +// Object related operations are done by Bucket class. +// Users use oss.New to create Client instance. +// +type ( + // Client OSS client + Client struct { + Config *Config // OSS client configuration + Conn *Conn // Send HTTP request + HTTPClient *http.Client //http.Client to use - if nil will make its own + } + + // ClientOption client option such as UseCname, Timeout, SecurityToken. + ClientOption func(*Client) +) + +// New creates a new client. +// +// endpoint the OSS datacenter endpoint such as http://oss-cn-hangzhou.aliyuncs.com . +// accessKeyId access key Id. +// accessKeySecret access key secret. +// +// Client creates the new client instance, the returned value is valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func New(endpoint, accessKeyID, accessKeySecret string, options ...ClientOption) (*Client, error) { + // Configuration + config := getDefaultOssConfig() + config.Endpoint = endpoint + config.AccessKeyID = accessKeyID + config.AccessKeySecret = accessKeySecret + + // URL parse + url := &urlMaker{} + err := url.Init(config.Endpoint, config.IsCname, config.IsUseProxy) + if err != nil { + return nil, err + } + + // HTTP connect + conn := &Conn{config: config, url: url} + + // OSS client + client := &Client{ + Config: config, + Conn: conn, + } + + // Client options parse + for _, option := range options { + option(client) + } + + if config.AuthVersion != AuthV1 && config.AuthVersion != AuthV2 { + return nil, fmt.Errorf("Init client Error, invalid Auth version: %v", config.AuthVersion) + } + + // Create HTTP connection + err = conn.init(config, url, client.HTTPClient) + + return client, err +} + +// Bucket gets the bucket instance. +// +// bucketName the bucket name. +// Bucket the bucket object, when error is nil. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) Bucket(bucketName string) (*Bucket, error) { + err := CheckBucketName(bucketName) + if err != nil { + return nil, err + } + + return &Bucket{ + client, + bucketName, + }, nil +} + +// CreateBucket creates a bucket. +// +// bucketName the bucket name, it's globably unique and immutable. The bucket name can only consist of lowercase letters, numbers and dash ('-'). +// It must start with lowercase letter or number and the length can only be between 3 and 255. +// options options for creating the bucket, with optional ACL. The ACL could be ACLPrivate, ACLPublicRead, and ACLPublicReadWrite. By default it's ACLPrivate. +// It could also be specified with StorageClass option, which supports StorageStandard, StorageIA(infrequent access), StorageArchive. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) CreateBucket(bucketName string, options ...Option) error { + headers := make(map[string]string) + handleOptions(headers, options) + + buffer := new(bytes.Buffer) + + var cbConfig createBucketConfiguration + cbConfig.StorageClass = StorageStandard + + isStorageSet, valStroage, _ := IsOptionSet(options, storageClass) + isRedundancySet, valRedundancy, _ := IsOptionSet(options, redundancyType) + isObjectHashFuncSet, valHashFunc, _ := IsOptionSet(options, objectHashFunc) + if isStorageSet { + cbConfig.StorageClass = valStroage.(StorageClassType) + } + + if isRedundancySet { + cbConfig.DataRedundancyType = valRedundancy.(DataRedundancyType) + } + + if isObjectHashFuncSet { + cbConfig.ObjectHashFunction = valHashFunc.(ObjecthashFuncType) + } + + bs, err := xml.Marshal(cbConfig) + if err != nil { + return err + } + buffer.Write(bs) + contentType := http.DetectContentType(buffer.Bytes()) + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// create bucket xml +func (client Client) CreateBucketXml(bucketName string, xmlBody string, options ...Option) error { + buffer := new(bytes.Buffer) + buffer.Write([]byte(xmlBody)) + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// ListBuckets lists buckets of the current account under the given endpoint, with optional filters. +// +// options specifies the filters such as Prefix, Marker and MaxKeys. Prefix is the bucket name's prefix filter. +// And marker makes sure the returned buckets' name are greater than it in lexicographic order. +// Maxkeys limits the max keys to return, and by default it's 100 and up to 1000. +// For the common usage scenario, please check out list_bucket.go in the sample. +// ListBucketsResponse the response object if error is nil. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) ListBuckets(options ...Option) (ListBucketsResult, error) { + var out ListBucketsResult + + params, err := GetRawParams(options) + if err != nil { + return out, err + } + + resp, err := client.do("GET", "", params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// IsBucketExist checks if the bucket exists +// +// bucketName the bucket name. +// +// bool true if it exists, and it's only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) IsBucketExist(bucketName string) (bool, error) { + listRes, err := client.ListBuckets(Prefix(bucketName), MaxKeys(1)) + if err != nil { + return false, err + } + + if len(listRes.Buckets) == 1 && listRes.Buckets[0].Name == bucketName { + return true, nil + } + return false, nil +} + +// DeleteBucket deletes the bucket. Only empty bucket can be deleted (no object and parts). +// +// bucketName the bucket name. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) DeleteBucket(bucketName string, options ...Option) error { + params := map[string]interface{}{} + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + if err != nil { + return err + } + + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// GetBucketLocation gets the bucket location. +// +// Checks out the following link for more information : +// https://help.aliyun.com/document_detail/oss/user_guide/oss_concept/endpoint.html +// +// bucketName the bucket name +// +// string bucket's datacenter location +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketLocation(bucketName string, options ...Option) (string, error) { + params := map[string]interface{}{} + params["location"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return "", err + } + defer resp.Body.Close() + + var LocationConstraint string + err = xmlUnmarshal(resp.Body, &LocationConstraint) + return LocationConstraint, err +} + +// SetBucketACL sets bucket's ACL. +// +// bucketName the bucket name +// bucketAcl the bucket ACL: ACLPrivate, ACLPublicRead and ACLPublicReadWrite. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketACL(bucketName string, bucketACL ACLType, options ...Option) error { + headers := map[string]string{HTTPHeaderOssACL: string(bucketACL)} + params := map[string]interface{}{} + params["acl"] = nil + resp, err := client.do("PUT", bucketName, params, headers, nil, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketACL gets the bucket ACL. +// +// bucketName the bucket name. +// +// GetBucketAclResponse the result object, and it's only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketACL(bucketName string, options ...Option) (GetBucketACLResult, error) { + var out GetBucketACLResult + params := map[string]interface{}{} + params["acl"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// SetBucketLifecycle sets the bucket's lifecycle. +// +// For more information, checks out following link: +// https://help.aliyun.com/document_detail/oss/user_guide/manage_object/object_lifecycle.html +// +// bucketName the bucket name. +// rules the lifecycle rules. There're two kind of rules: absolute time expiration and relative time expiration in days and day/month/year respectively. +// Check out sample/bucket_lifecycle.go for more details. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketLifecycle(bucketName string, rules []LifecycleRule, options ...Option) error { + err := verifyLifecycleRules(rules) + if err != nil { + return err + } + lifecycleCfg := LifecycleConfiguration{Rules: rules} + bs, err := xml.Marshal(lifecycleCfg) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["lifecycle"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// SetBucketLifecycleXml sets the bucket's lifecycle rule from xml config +func (client Client) SetBucketLifecycleXml(bucketName string, xmlBody string, options ...Option) error { + buffer := new(bytes.Buffer) + buffer.Write([]byte(xmlBody)) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["lifecycle"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// DeleteBucketLifecycle deletes the bucket's lifecycle. +// +// +// bucketName the bucket name. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) DeleteBucketLifecycle(bucketName string, options ...Option) error { + params := map[string]interface{}{} + params["lifecycle"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// GetBucketLifecycle gets the bucket's lifecycle settings. +// +// bucketName the bucket name. +// +// GetBucketLifecycleResponse the result object upon successful request. It's only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketLifecycle(bucketName string, options ...Option) (GetBucketLifecycleResult, error) { + var out GetBucketLifecycleResult + params := map[string]interface{}{} + params["lifecycle"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + + // NonVersionTransition is not suggested to use + // to keep compatible + for k, rule := range out.Rules { + if len(rule.NonVersionTransitions) > 0 { + out.Rules[k].NonVersionTransition = &(out.Rules[k].NonVersionTransitions[0]) + } + } + return out, err +} + +func (client Client) GetBucketLifecycleXml(bucketName string, options ...Option) (string, error) { + params := map[string]interface{}{} + params["lifecycle"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return "", err + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + out := string(body) + return out, err +} + +// SetBucketReferer sets the bucket's referer whitelist and the flag if allowing empty referrer. +// +// To avoid stealing link on OSS data, OSS supports the HTTP referrer header. A whitelist referrer could be set either by API or web console, as well as +// the allowing empty referrer flag. Note that this applies to requests from webbrowser only. +// For example, for a bucket os-example and its referrer http://www.aliyun.com, all requests from this URL could access the bucket. +// For more information, please check out this link : +// https://help.aliyun.com/document_detail/oss/user_guide/security_management/referer.html +// +// bucketName the bucket name. +// referers the referrer white list. A bucket could have a referrer list and each referrer supports one '*' and multiple '?' as wildcards. +// The sample could be found in sample/bucket_referer.go +// allowEmptyReferer the flag of allowing empty referrer. By default it's true. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketReferer(bucketName string, referers []string, allowEmptyReferer bool, options ...Option) error { + rxml := RefererXML{} + rxml.AllowEmptyReferer = allowEmptyReferer + if referers == nil { + rxml.RefererList = append(rxml.RefererList, "") + } else { + for _, referer := range referers { + rxml.RefererList = append(rxml.RefererList, referer) + } + } + + bs, err := xml.Marshal(rxml) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["referer"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketReferer gets the bucket's referrer white list. +// +// bucketName the bucket name. +// +// GetBucketRefererResponse the result object upon successful request. It's only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketReferer(bucketName string, options ...Option) (GetBucketRefererResult, error) { + var out GetBucketRefererResult + params := map[string]interface{}{} + params["referer"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// SetBucketLogging sets the bucket logging settings. +// +// OSS could automatically store the access log. Only the bucket owner could enable the logging. +// Once enabled, OSS would save all the access log into hourly log files in a specified bucket. +// For more information, please check out https://help.aliyun.com/document_detail/oss/user_guide/security_management/logging.html +// +// bucketName bucket name to enable the log. +// targetBucket the target bucket name to store the log files. +// targetPrefix the log files' prefix. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketLogging(bucketName, targetBucket, targetPrefix string, + isEnable bool, options ...Option) error { + var err error + var bs []byte + if isEnable { + lxml := LoggingXML{} + lxml.LoggingEnabled.TargetBucket = targetBucket + lxml.LoggingEnabled.TargetPrefix = targetPrefix + bs, err = xml.Marshal(lxml) + } else { + lxml := loggingXMLEmpty{} + bs, err = xml.Marshal(lxml) + } + + if err != nil { + return err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["logging"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// DeleteBucketLogging deletes the logging configuration to disable the logging on the bucket. +// +// bucketName the bucket name to disable the logging. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) DeleteBucketLogging(bucketName string, options ...Option) error { + params := map[string]interface{}{} + params["logging"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// GetBucketLogging gets the bucket's logging settings +// +// bucketName the bucket name +// GetBucketLoggingResponse the result object upon successful request. It's only valid when error is nil. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketLogging(bucketName string, options ...Option) (GetBucketLoggingResult, error) { + var out GetBucketLoggingResult + params := map[string]interface{}{} + params["logging"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// SetBucketWebsite sets the bucket's static website's index and error page. +// +// OSS supports static web site hosting for the bucket data. When the bucket is enabled with that, you can access the file in the bucket like the way to access a static website. +// For more information, please check out: https://help.aliyun.com/document_detail/oss/user_guide/static_host_website.html +// +// bucketName the bucket name to enable static web site. +// indexDocument index page. +// errorDocument error page. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketWebsite(bucketName, indexDocument, errorDocument string, options ...Option) error { + wxml := WebsiteXML{} + wxml.IndexDocument.Suffix = indexDocument + wxml.ErrorDocument.Key = errorDocument + + bs, err := xml.Marshal(wxml) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := make(map[string]string) + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["website"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// SetBucketWebsiteDetail sets the bucket's static website's detail +// +// OSS supports static web site hosting for the bucket data. When the bucket is enabled with that, you can access the file in the bucket like the way to access a static website. +// For more information, please check out: https://help.aliyun.com/document_detail/oss/user_guide/static_host_website.html +// +// bucketName the bucket name to enable static web site. +// +// wxml the website's detail +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketWebsiteDetail(bucketName string, wxml WebsiteXML, options ...Option) error { + bs, err := xml.Marshal(wxml) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := make(map[string]string) + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["website"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// SetBucketWebsiteXml sets the bucket's static website's rule +// +// OSS supports static web site hosting for the bucket data. When the bucket is enabled with that, you can access the file in the bucket like the way to access a static website. +// For more information, please check out: https://help.aliyun.com/document_detail/oss/user_guide/static_host_website.html +// +// bucketName the bucket name to enable static web site. +// +// wxml the website's detail +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketWebsiteXml(bucketName string, webXml string, options ...Option) error { + buffer := new(bytes.Buffer) + buffer.Write([]byte(webXml)) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := make(map[string]string) + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["website"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// DeleteBucketWebsite deletes the bucket's static web site settings. +// +// bucketName the bucket name. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) DeleteBucketWebsite(bucketName string, options ...Option) error { + params := map[string]interface{}{} + params["website"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// GetBucketWebsite gets the bucket's default page (index page) and the error page. +// +// bucketName the bucket name +// +// GetBucketWebsiteResponse the result object upon successful request. It's only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketWebsite(bucketName string, options ...Option) (GetBucketWebsiteResult, error) { + var out GetBucketWebsiteResult + params := map[string]interface{}{} + params["website"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// GetBucketWebsiteXml gets the bucket's website config xml config. +// +// bucketName the bucket name +// +// string the bucket's xml config, It's only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketWebsiteXml(bucketName string, options ...Option) (string, error) { + params := map[string]interface{}{} + params["website"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return "", err + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + + out := string(body) + return out, err +} + +// SetBucketCORS sets the bucket's CORS rules +// +// For more information, please check out https://help.aliyun.com/document_detail/oss/user_guide/security_management/cors.html +// +// bucketName the bucket name +// corsRules the CORS rules to set. The related sample code is in sample/bucket_cors.go. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketCORS(bucketName string, corsRules []CORSRule, options ...Option) error { + corsxml := CORSXML{} + for _, v := range corsRules { + cr := CORSRule{} + cr.AllowedMethod = v.AllowedMethod + cr.AllowedOrigin = v.AllowedOrigin + cr.AllowedHeader = v.AllowedHeader + cr.ExposeHeader = v.ExposeHeader + cr.MaxAgeSeconds = v.MaxAgeSeconds + corsxml.CORSRules = append(corsxml.CORSRules, cr) + } + + bs, err := xml.Marshal(corsxml) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["cors"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +func (client Client) SetBucketCORSXml(bucketName string, xmlBody string, options ...Option) error { + buffer := new(bytes.Buffer) + buffer.Write([]byte(xmlBody)) + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["cors"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// DeleteBucketCORS deletes the bucket's static website settings. +// +// bucketName the bucket name. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) DeleteBucketCORS(bucketName string, options ...Option) error { + params := map[string]interface{}{} + params["cors"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// GetBucketCORS gets the bucket's CORS settings. +// +// bucketName the bucket name. +// GetBucketCORSResult the result object upon successful request. It's only valid when error is nil. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketCORS(bucketName string, options ...Option) (GetBucketCORSResult, error) { + var out GetBucketCORSResult + params := map[string]interface{}{} + params["cors"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +func (client Client) GetBucketCORSXml(bucketName string, options ...Option) (string, error) { + params := map[string]interface{}{} + params["cors"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return "", err + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + out := string(body) + return out, err +} + +// GetBucketInfo gets the bucket information. +// +// bucketName the bucket name. +// GetBucketInfoResult the result object upon successful request. It's only valid when error is nil. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketInfo(bucketName string, options ...Option) (GetBucketInfoResult, error) { + var out GetBucketInfoResult + params := map[string]interface{}{} + params["bucketInfo"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + + // convert None to "" + if err == nil { + if out.BucketInfo.SseRule.KMSMasterKeyID == "None" { + out.BucketInfo.SseRule.KMSMasterKeyID = "" + } + + if out.BucketInfo.SseRule.SSEAlgorithm == "None" { + out.BucketInfo.SseRule.SSEAlgorithm = "" + } + + if out.BucketInfo.SseRule.KMSDataEncryption == "None" { + out.BucketInfo.SseRule.KMSDataEncryption = "" + } + } + return out, err +} + +// SetBucketVersioning set bucket versioning:Enabled、Suspended +// bucketName the bucket name. +// error it's nil if no error, otherwise it's an error object. +func (client Client) SetBucketVersioning(bucketName string, versioningConfig VersioningConfig, options ...Option) error { + var err error + var bs []byte + bs, err = xml.Marshal(versioningConfig) + + if err != nil { + return err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["versioning"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketVersioning get bucket versioning status:Enabled、Suspended +// bucketName the bucket name. +// error it's nil if no error, otherwise it's an error object. +func (client Client) GetBucketVersioning(bucketName string, options ...Option) (GetBucketVersioningResult, error) { + var out GetBucketVersioningResult + params := map[string]interface{}{} + params["versioning"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// SetBucketEncryption set bucket encryption config +// bucketName the bucket name. +// error it's nil if no error, otherwise it's an error object. +func (client Client) SetBucketEncryption(bucketName string, encryptionRule ServerEncryptionRule, options ...Option) error { + var err error + var bs []byte + bs, err = xml.Marshal(encryptionRule) + + if err != nil { + return err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["encryption"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketEncryption get bucket encryption +// bucketName the bucket name. +// error it's nil if no error, otherwise it's an error object. +func (client Client) GetBucketEncryption(bucketName string, options ...Option) (GetBucketEncryptionResult, error) { + var out GetBucketEncryptionResult + params := map[string]interface{}{} + params["encryption"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// DeleteBucketEncryption delete bucket encryption config +// bucketName the bucket name. +// error it's nil if no error, otherwise it's an error bucket +func (client Client) DeleteBucketEncryption(bucketName string, options ...Option) error { + params := map[string]interface{}{} + params["encryption"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// +// SetBucketTagging add tagging to bucket +// bucketName name of bucket +// tagging tagging to be added +// error nil if success, otherwise error +func (client Client) SetBucketTagging(bucketName string, tagging Tagging, options ...Option) error { + var err error + var bs []byte + bs, err = xml.Marshal(tagging) + + if err != nil { + return err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["tagging"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketTagging get tagging of the bucket +// bucketName name of bucket +// error nil if success, otherwise error +func (client Client) GetBucketTagging(bucketName string, options ...Option) (GetBucketTaggingResult, error) { + var out GetBucketTaggingResult + params := map[string]interface{}{} + params["tagging"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// DeleteBucketTagging delete bucket tagging +// bucketName name of bucket +// error nil if success, otherwise error +// +func (client Client) DeleteBucketTagging(bucketName string, options ...Option) error { + params := map[string]interface{}{} + params["tagging"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// GetBucketStat get bucket stat +// bucketName the bucket name. +// error it's nil if no error, otherwise it's an error object. +func (client Client) GetBucketStat(bucketName string, options ...Option) (GetBucketStatResult, error) { + var out GetBucketStatResult + params := map[string]interface{}{} + params["stat"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// GetBucketPolicy API operation for Object Storage Service. +// +// Get the policy from the bucket. +// +// bucketName the bucket name. +// +// string return the bucket's policy, and it's only valid when error is nil. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketPolicy(bucketName string, options ...Option) (string, error) { + params := map[string]interface{}{} + params["policy"] = nil + + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return "", err + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + + out := string(body) + return out, err +} + +// SetBucketPolicy API operation for Object Storage Service. +// +// Set the policy from the bucket. +// +// bucketName the bucket name. +// +// policy the bucket policy. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketPolicy(bucketName string, policy string, options ...Option) error { + params := map[string]interface{}{} + params["policy"] = nil + + buffer := strings.NewReader(policy) + + resp, err := client.do("PUT", bucketName, params, nil, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// DeleteBucketPolicy API operation for Object Storage Service. +// +// Deletes the policy from the bucket. +// +// bucketName the bucket name. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) DeleteBucketPolicy(bucketName string, options ...Option) error { + params := map[string]interface{}{} + params["policy"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + if err != nil { + return err + } + + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// SetBucketRequestPayment API operation for Object Storage Service. +// +// Set the requestPayment of bucket +// +// bucketName the bucket name. +// +// paymentConfig the payment configuration +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketRequestPayment(bucketName string, paymentConfig RequestPaymentConfiguration, options ...Option) error { + params := map[string]interface{}{} + params["requestPayment"] = nil + + var bs []byte + bs, err := xml.Marshal(paymentConfig) + + if err != nil { + return err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketRequestPayment API operation for Object Storage Service. +// +// Get bucket requestPayment +// +// bucketName the bucket name. +// +// RequestPaymentConfiguration the payment configuration +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketRequestPayment(bucketName string, options ...Option) (RequestPaymentConfiguration, error) { + var out RequestPaymentConfiguration + params := map[string]interface{}{} + params["requestPayment"] = nil + + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// GetUserQoSInfo API operation for Object Storage Service. +// +// Get user qos. +// +// UserQoSConfiguration the User Qos and range Information. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetUserQoSInfo(options ...Option) (UserQoSConfiguration, error) { + var out UserQoSConfiguration + params := map[string]interface{}{} + params["qosInfo"] = nil + + resp, err := client.do("GET", "", params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// SetBucketQoSInfo API operation for Object Storage Service. +// +// Set Bucket Qos information. +// +// bucketName the bucket name. +// +// qosConf the qos configuration. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketQoSInfo(bucketName string, qosConf BucketQoSConfiguration, options ...Option) error { + params := map[string]interface{}{} + params["qosInfo"] = nil + + var bs []byte + bs, err := xml.Marshal(qosConf) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentTpye := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentTpye + + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketQosInfo API operation for Object Storage Service. +// +// Get Bucket Qos information. +// +// bucketName the bucket name. +// +// BucketQoSConfiguration the return qos configuration. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketQosInfo(bucketName string, options ...Option) (BucketQoSConfiguration, error) { + var out BucketQoSConfiguration + params := map[string]interface{}{} + params["qosInfo"] = nil + + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// DeleteBucketQosInfo API operation for Object Storage Service. +// +// Delete Bucket QoS information. +// +// bucketName the bucket name. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) DeleteBucketQosInfo(bucketName string, options ...Option) error { + params := map[string]interface{}{} + params["qosInfo"] = nil + + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + if err != nil { + return err + } + defer resp.Body.Close() + + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// SetBucketInventory API operation for Object Storage Service +// +// Set the Bucket inventory. +// +// bucketName tht bucket name. +// +// inventoryConfig the inventory configuration. +// +// error it's nil if no error, otherwise it's an error. +// +func (client Client) SetBucketInventory(bucketName string, inventoryConfig InventoryConfiguration, options ...Option) error { + params := map[string]interface{}{} + params["inventoryId"] = inventoryConfig.Id + params["inventory"] = nil + + var bs []byte + bs, err := xml.Marshal(inventoryConfig) + + if err != nil { + return err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := make(map[string]string) + headers[HTTPHeaderContentType] = contentType + + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + + if err != nil { + return err + } + + defer resp.Body.Close() + + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketInventory API operation for Object Storage Service +// +// Get the Bucket inventory. +// +// bucketName tht bucket name. +// +// strInventoryId the inventory id. +// +// InventoryConfiguration the inventory configuration. +// +// error it's nil if no error, otherwise it's an error. +// +func (client Client) GetBucketInventory(bucketName string, strInventoryId string, options ...Option) (InventoryConfiguration, error) { + var out InventoryConfiguration + params := map[string]interface{}{} + params["inventory"] = nil + params["inventoryId"] = strInventoryId + + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// ListBucketInventory API operation for Object Storage Service +// +// List the Bucket inventory. +// +// bucketName tht bucket name. +// +// continuationToken the users token. +// +// ListInventoryConfigurationsResult list all inventory configuration by . +// +// error it's nil if no error, otherwise it's an error. +// +func (client Client) ListBucketInventory(bucketName, continuationToken string, options ...Option) (ListInventoryConfigurationsResult, error) { + var out ListInventoryConfigurationsResult + params := map[string]interface{}{} + params["inventory"] = nil + if continuationToken == "" { + params["continuation-token"] = nil + } else { + params["continuation-token"] = continuationToken + } + + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// DeleteBucketInventory API operation for Object Storage Service. +// +// Delete Bucket inventory information. +// +// bucketName tht bucket name. +// +// strInventoryId the inventory id. +// +// error it's nil if no error, otherwise it's an error. +// +func (client Client) DeleteBucketInventory(bucketName, strInventoryId string, options ...Option) error { + params := map[string]interface{}{} + params["inventory"] = nil + params["inventoryId"] = strInventoryId + + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + if err != nil { + return err + } + defer resp.Body.Close() + + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// SetBucketAsyncTask API operation for set async fetch task +// +// bucketName tht bucket name. +// +// asynConf configruation +// +// error it's nil if success, otherwise it's an error. +func (client Client) SetBucketAsyncTask(bucketName string, asynConf AsyncFetchTaskConfiguration, options ...Option) (AsyncFetchTaskResult, error) { + var out AsyncFetchTaskResult + params := map[string]interface{}{} + params["asyncFetch"] = nil + + var bs []byte + bs, err := xml.Marshal(asynConf) + + if err != nil { + return out, err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := make(map[string]string) + headers[HTTPHeaderContentType] = contentType + + resp, err := client.do("POST", bucketName, params, headers, buffer, options...) + + if err != nil { + return out, err + } + + defer resp.Body.Close() + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// GetBucketAsyncTask API operation for set async fetch task +// +// bucketName tht bucket name. +// +// taskid returned by SetBucketAsyncTask +// +// error it's nil if success, otherwise it's an error. +func (client Client) GetBucketAsyncTask(bucketName string, taskID string, options ...Option) (AsynFetchTaskInfo, error) { + var out AsynFetchTaskInfo + params := map[string]interface{}{} + params["asyncFetch"] = nil + + headers := make(map[string]string) + headers[HTTPHeaderOssTaskID] = taskID + resp, err := client.do("GET", bucketName, params, headers, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// InitiateBucketWorm creates bucket worm Configuration +// bucketName the bucket name. +// retentionDays the retention period in days +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) InitiateBucketWorm(bucketName string, retentionDays int, options ...Option) (string, error) { + var initiateWormConf InitiateWormConfiguration + initiateWormConf.RetentionPeriodInDays = retentionDays + + var respHeader http.Header + isOptSet, _, _ := IsOptionSet(options, responseHeader) + if !isOptSet { + options = append(options, GetResponseHeader(&respHeader)) + } + + bs, err := xml.Marshal(initiateWormConf) + if err != nil { + return "", err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := make(map[string]string) + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["worm"] = nil + + resp, err := client.do("POST", bucketName, params, headers, buffer, options...) + if err != nil { + return "", err + } + defer resp.Body.Close() + + respOpt, _ := FindOption(options, responseHeader, nil) + wormID := "" + err = CheckRespCode(resp.StatusCode, []int{http.StatusOK}) + if err == nil && respOpt != nil { + wormID = (respOpt.(*http.Header)).Get("x-oss-worm-id") + } + return wormID, err +} + +// AbortBucketWorm delete bucket worm Configuration +// bucketName the bucket name. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) AbortBucketWorm(bucketName string, options ...Option) error { + params := map[string]interface{}{} + params["worm"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// CompleteBucketWorm complete bucket worm Configuration +// bucketName the bucket name. +// wormID the worm id +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) CompleteBucketWorm(bucketName string, wormID string, options ...Option) error { + params := map[string]interface{}{} + params["wormId"] = wormID + resp, err := client.do("POST", bucketName, params, nil, nil, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// ExtendBucketWorm exetend bucket worm Configuration +// bucketName the bucket name. +// retentionDays the retention period in days +// wormID the worm id +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) ExtendBucketWorm(bucketName string, retentionDays int, wormID string, options ...Option) error { + var extendWormConf ExtendWormConfiguration + extendWormConf.RetentionPeriodInDays = retentionDays + + bs, err := xml.Marshal(extendWormConf) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := make(map[string]string) + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["wormId"] = wormID + params["wormExtend"] = nil + + resp, err := client.do("POST", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketWorm get bucket worm Configuration +// bucketName the bucket name. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketWorm(bucketName string, options ...Option) (WormConfiguration, error) { + var out WormConfiguration + params := map[string]interface{}{} + params["worm"] = nil + + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// SetBucketTransferAcc set bucket transfer acceleration configuration +// bucketName the bucket name. +// accConf bucket transfer acceleration configuration +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketTransferAcc(bucketName string, accConf TransferAccConfiguration, options ...Option) error { + bs, err := xml.Marshal(accConf) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := make(map[string]string) + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["transferAcceleration"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketTransferAcc get bucket transfer acceleration configuration +// bucketName the bucket name. +// accConf bucket transfer acceleration configuration +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketTransferAcc(bucketName string, options ...Option) (TransferAccConfiguration, error) { + var out TransferAccConfiguration + params := map[string]interface{}{} + params["transferAcceleration"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// DeleteBucketTransferAcc delete bucket transfer acceleration configuration +// bucketName the bucket name. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) DeleteBucketTransferAcc(bucketName string, options ...Option) error { + params := map[string]interface{}{} + params["transferAcceleration"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// PutBucketReplication put bucket replication configuration +// bucketName the bucket name. +// xmlBody the replication configuration. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) PutBucketReplication(bucketName string, xmlBody string, options ...Option) error { + buffer := new(bytes.Buffer) + buffer.Write([]byte(xmlBody)) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["replication"] = nil + params["comp"] = "add" + resp, err := client.do("POST", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketReplication get bucket replication configuration +// bucketName the bucket name. +// string the replication configuration. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketReplication(bucketName string, options ...Option) (string, error) { + params := map[string]interface{}{} + params["replication"] = nil + + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return "", err + } + defer resp.Body.Close() + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + return string(data), err +} + +// DeleteBucketReplication delete bucket replication configuration +// bucketName the bucket name. +// ruleId the ID of the replication configuration. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) DeleteBucketReplication(bucketName string, ruleId string, options ...Option) error { + replicationxml := ReplicationXML{} + replicationxml.ID = ruleId + + bs, err := xml.Marshal(replicationxml) + if err != nil { + return err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["replication"] = nil + params["comp"] = "delete" + resp, err := client.do("POST", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketReplicationLocation get the locations of the target bucket that can be copied to +// bucketName the bucket name. +// string the locations of the target bucket that can be copied to. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketReplicationLocation(bucketName string, options ...Option) (string, error) { + params := map[string]interface{}{} + params["replicationLocation"] = nil + + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return "", err + } + defer resp.Body.Close() + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + return string(data), err +} + +// GetBucketReplicationProgress get the replication progress of bucket +// bucketName the bucket name. +// ruleId the ID of the replication configuration. +// string the replication progress of bucket. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketReplicationProgress(bucketName string, ruleId string, options ...Option) (string, error) { + params := map[string]interface{}{} + params["replicationProgress"] = nil + if ruleId != "" { + params["rule-id"] = ruleId + } + + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return "", err + } + defer resp.Body.Close() + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + return string(data), err +} + +// GetBucketCname get bucket's binding cname +// bucketName the bucket name. +// string the xml configuration of bucket. +// error it's nil if no error, otherwise it's an error object. +func (client Client) GetBucketCname(bucketName string, options ...Option) (string, error) { + params := map[string]interface{}{} + params["cname"] = nil + + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return "", err + } + defer resp.Body.Close() + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + return string(data), err +} + +// LimitUploadSpeed set upload bandwidth limit speed,default is 0,unlimited +// upSpeed KB/s, 0 is unlimited,default is 0 +// error it's nil if success, otherwise failure +func (client Client) LimitUploadSpeed(upSpeed int) error { + if client.Config == nil { + return fmt.Errorf("client config is nil") + } + return client.Config.LimitUploadSpeed(upSpeed) +} + +// LimitDownloadSpeed set download bandwidth limit speed,default is 0,unlimited +// downSpeed KB/s, 0 is unlimited,default is 0 +// error it's nil if success, otherwise failure +func (client Client) LimitDownloadSpeed(downSpeed int) error { + if client.Config == nil { + return fmt.Errorf("client config is nil") + } + return client.Config.LimitDownloadSpeed(downSpeed) +} + +// UseCname sets the flag of using CName. By default it's false. +// +// isUseCname true: the endpoint has the CName, false: the endpoint does not have cname. Default is false. +// +func UseCname(isUseCname bool) ClientOption { + return func(client *Client) { + client.Config.IsCname = isUseCname + client.Conn.url.Init(client.Config.Endpoint, client.Config.IsCname, client.Config.IsUseProxy) + } +} + +// Timeout sets the HTTP timeout in seconds. +// +// connectTimeoutSec HTTP timeout in seconds. Default is 10 seconds. 0 means infinite (not recommended) +// readWriteTimeout HTTP read or write's timeout in seconds. Default is 20 seconds. 0 means infinite. +// +func Timeout(connectTimeoutSec, readWriteTimeout int64) ClientOption { + return func(client *Client) { + client.Config.HTTPTimeout.ConnectTimeout = + time.Second * time.Duration(connectTimeoutSec) + client.Config.HTTPTimeout.ReadWriteTimeout = + time.Second * time.Duration(readWriteTimeout) + client.Config.HTTPTimeout.HeaderTimeout = + time.Second * time.Duration(readWriteTimeout) + client.Config.HTTPTimeout.IdleConnTimeout = + time.Second * time.Duration(readWriteTimeout) + client.Config.HTTPTimeout.LongTimeout = + time.Second * time.Duration(readWriteTimeout*10) + } +} + +// SecurityToken sets the temporary user's SecurityToken. +// +// token STS token +// +func SecurityToken(token string) ClientOption { + return func(client *Client) { + client.Config.SecurityToken = strings.TrimSpace(token) + } +} + +// EnableMD5 enables MD5 validation. +// +// isEnableMD5 true: enable MD5 validation; false: disable MD5 validation. +// +func EnableMD5(isEnableMD5 bool) ClientOption { + return func(client *Client) { + client.Config.IsEnableMD5 = isEnableMD5 + } +} + +// MD5ThresholdCalcInMemory sets the memory usage threshold for computing the MD5, default is 16MB. +// +// threshold the memory threshold in bytes. When the uploaded content is more than 16MB, the temp file is used for computing the MD5. +// +func MD5ThresholdCalcInMemory(threshold int64) ClientOption { + return func(client *Client) { + client.Config.MD5Threshold = threshold + } +} + +// EnableCRC enables the CRC checksum. Default is true. +// +// isEnableCRC true: enable CRC checksum; false: disable the CRC checksum. +// +func EnableCRC(isEnableCRC bool) ClientOption { + return func(client *Client) { + client.Config.IsEnableCRC = isEnableCRC + } +} + +// UserAgent specifies UserAgent. The default is aliyun-sdk-go/1.2.0 (windows/-/amd64;go1.5.2). +// +// userAgent the user agent string. +// +func UserAgent(userAgent string) ClientOption { + return func(client *Client) { + client.Config.UserAgent = userAgent + client.Config.UserSetUa = true + } +} + +// Proxy sets the proxy (optional). The default is not using proxy. +// +// proxyHost the proxy host in the format "host:port". For example, proxy.com:80 . +// +func Proxy(proxyHost string) ClientOption { + return func(client *Client) { + client.Config.IsUseProxy = true + client.Config.ProxyHost = proxyHost + client.Conn.url.Init(client.Config.Endpoint, client.Config.IsCname, client.Config.IsUseProxy) + } +} + +// AuthProxy sets the proxy information with user name and password. +// +// proxyHost the proxy host in the format "host:port". For example, proxy.com:80 . +// proxyUser the proxy user name. +// proxyPassword the proxy password. +// +func AuthProxy(proxyHost, proxyUser, proxyPassword string) ClientOption { + return func(client *Client) { + client.Config.IsUseProxy = true + client.Config.ProxyHost = proxyHost + client.Config.IsAuthProxy = true + client.Config.ProxyUser = proxyUser + client.Config.ProxyPassword = proxyPassword + client.Conn.url.Init(client.Config.Endpoint, client.Config.IsCname, client.Config.IsUseProxy) + } +} + +// +// HTTPClient sets the http.Client in use to the one passed in +// +func HTTPClient(HTTPClient *http.Client) ClientOption { + return func(client *Client) { + client.HTTPClient = HTTPClient + } +} + +// +// SetLogLevel sets the oss sdk log level +// +func SetLogLevel(LogLevel int) ClientOption { + return func(client *Client) { + client.Config.LogLevel = LogLevel + } +} + +// +// SetLogger sets the oss sdk logger +// +func SetLogger(Logger *log.Logger) ClientOption { + return func(client *Client) { + client.Config.Logger = Logger + } +} + +// SetCredentialsProvider sets funciton for get the user's ak +func SetCredentialsProvider(provider CredentialsProvider) ClientOption { + return func(client *Client) { + client.Config.CredentialsProvider = provider + } +} + +// SetLocalAddr sets funciton for local addr +func SetLocalAddr(localAddr net.Addr) ClientOption { + return func(client *Client) { + client.Config.LocalAddr = localAddr + } +} + +// AuthVersion sets auth version: v1 or v2 signature which oss_server needed +func AuthVersion(authVersion AuthVersionType) ClientOption { + return func(client *Client) { + client.Config.AuthVersion = authVersion + } +} + +// AdditionalHeaders sets special http headers needed to be signed +func AdditionalHeaders(headers []string) ClientOption { + return func(client *Client) { + client.Config.AdditionalHeaders = headers + } +} + +// only effective from go1.7 onward,RedirectEnabled set http redirect enabled or not +func RedirectEnabled(enabled bool) ClientOption { + return func(client *Client) { + client.Config.RedirectEnabled = enabled + } +} + +// skip verifying tls certificate file +func InsecureSkipVerify(enabled bool) ClientOption { + return func(client *Client) { + client.Config.InsecureSkipVerify = enabled + } +} + +// Private +func (client Client) do(method, bucketName string, params map[string]interface{}, + headers map[string]string, data io.Reader, options ...Option) (*Response, error) { + err := CheckBucketName(bucketName) + if len(bucketName) > 0 && err != nil { + return nil, err + } + + // option headers + addHeaders := make(map[string]string) + err = handleOptions(addHeaders, options) + if err != nil { + return nil, err + } + + // merge header + if headers == nil { + headers = make(map[string]string) + } + + for k, v := range addHeaders { + if _, ok := headers[k]; !ok { + headers[k] = v + } + } + + resp, err := client.Conn.Do(method, bucketName, "", params, headers, data, 0, nil) + + // get response header + respHeader, _ := FindOption(options, responseHeader, nil) + if respHeader != nil { + pRespHeader := respHeader.(*http.Header) + *pRespHeader = resp.Headers + } + + return resp, err +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conf.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conf.go new file mode 100644 index 0000000000..d43527255f --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conf.go @@ -0,0 +1,207 @@ +package oss + +import ( + "bytes" + "fmt" + "log" + "net" + "os" + "time" +) + +// Define the level of the output log +const ( + LogOff = iota + Error + Warn + Info + Debug +) + +// LogTag Tag for each level of log +var LogTag = []string{"[error]", "[warn]", "[info]", "[debug]"} + +// HTTPTimeout defines HTTP timeout. +type HTTPTimeout struct { + ConnectTimeout time.Duration + ReadWriteTimeout time.Duration + HeaderTimeout time.Duration + LongTimeout time.Duration + IdleConnTimeout time.Duration +} + +// HTTPMaxConns defines max idle connections and max idle connections per host +type HTTPMaxConns struct { + MaxIdleConns int + MaxIdleConnsPerHost int +} + +// CredentialInf is interface for get AccessKeyID,AccessKeySecret,SecurityToken +type Credentials interface { + GetAccessKeyID() string + GetAccessKeySecret() string + GetSecurityToken() string +} + +// CredentialInfBuild is interface for get CredentialInf +type CredentialsProvider interface { + GetCredentials() Credentials +} + +type defaultCredentials struct { + config *Config +} + +func (defCre *defaultCredentials) GetAccessKeyID() string { + return defCre.config.AccessKeyID +} + +func (defCre *defaultCredentials) GetAccessKeySecret() string { + return defCre.config.AccessKeySecret +} + +func (defCre *defaultCredentials) GetSecurityToken() string { + return defCre.config.SecurityToken +} + +type defaultCredentialsProvider struct { + config *Config +} + +func (defBuild *defaultCredentialsProvider) GetCredentials() Credentials { + return &defaultCredentials{config: defBuild.config} +} + +// Config defines oss configuration +type Config struct { + Endpoint string // OSS endpoint + AccessKeyID string // AccessId + AccessKeySecret string // AccessKey + RetryTimes uint // Retry count by default it's 5. + UserAgent string // SDK name/version/system information + IsDebug bool // Enable debug mode. Default is false. + Timeout uint // Timeout in seconds. By default it's 60. + SecurityToken string // STS Token + IsCname bool // If cname is in the endpoint. + HTTPTimeout HTTPTimeout // HTTP timeout + HTTPMaxConns HTTPMaxConns // Http max connections + IsUseProxy bool // Flag of using proxy. + ProxyHost string // Flag of using proxy host. + IsAuthProxy bool // Flag of needing authentication. + ProxyUser string // Proxy user + ProxyPassword string // Proxy password + IsEnableMD5 bool // Flag of enabling MD5 for upload. + MD5Threshold int64 // Memory footprint threshold for each MD5 computation (16MB is the default), in byte. When the data is more than that, temp file is used. + IsEnableCRC bool // Flag of enabling CRC for upload. + LogLevel int // Log level + Logger *log.Logger // For write log + UploadLimitSpeed int // Upload limit speed:KB/s, 0 is unlimited + UploadLimiter *OssLimiter // Bandwidth limit reader for upload + DownloadLimitSpeed int // Download limit speed:KB/s, 0 is unlimited + DownloadLimiter *OssLimiter // Bandwidth limit reader for download + CredentialsProvider CredentialsProvider // User provides interface to get AccessKeyID, AccessKeySecret, SecurityToken + LocalAddr net.Addr // local client host info + UserSetUa bool // UserAgent is set by user or not + AuthVersion AuthVersionType // v1 or v2 signature,default is v1 + AdditionalHeaders []string // special http headers needed to be sign + RedirectEnabled bool // only effective from go1.7 onward, enable http redirect or not + InsecureSkipVerify bool // for https, Whether to skip verifying the server certificate file +} + +// LimitUploadSpeed uploadSpeed:KB/s, 0 is unlimited,default is 0 +func (config *Config) LimitUploadSpeed(uploadSpeed int) error { + if uploadSpeed < 0 { + return fmt.Errorf("invalid argument, the value of uploadSpeed is less than 0") + } else if uploadSpeed == 0 { + config.UploadLimitSpeed = 0 + config.UploadLimiter = nil + return nil + } + + var err error + config.UploadLimiter, err = GetOssLimiter(uploadSpeed) + if err == nil { + config.UploadLimitSpeed = uploadSpeed + } + return err +} + +// LimitDownLoadSpeed downloadSpeed:KB/s, 0 is unlimited,default is 0 +func (config *Config) LimitDownloadSpeed(downloadSpeed int) error { + if downloadSpeed < 0 { + return fmt.Errorf("invalid argument, the value of downloadSpeed is less than 0") + } else if downloadSpeed == 0 { + config.DownloadLimitSpeed = 0 + config.DownloadLimiter = nil + return nil + } + + var err error + config.DownloadLimiter, err = GetOssLimiter(downloadSpeed) + if err == nil { + config.DownloadLimitSpeed = downloadSpeed + } + return err +} + +// WriteLog output log function +func (config *Config) WriteLog(LogLevel int, format string, a ...interface{}) { + if config.LogLevel < LogLevel || config.Logger == nil { + return + } + + var logBuffer bytes.Buffer + logBuffer.WriteString(LogTag[LogLevel-1]) + logBuffer.WriteString(fmt.Sprintf(format, a...)) + config.Logger.Printf("%s", logBuffer.String()) +} + +// for get Credentials +func (config *Config) GetCredentials() Credentials { + return config.CredentialsProvider.GetCredentials() +} + +// getDefaultOssConfig gets the default configuration. +func getDefaultOssConfig() *Config { + config := Config{} + + config.Endpoint = "" + config.AccessKeyID = "" + config.AccessKeySecret = "" + config.RetryTimes = 5 + config.IsDebug = false + config.UserAgent = userAgent() + config.Timeout = 60 // Seconds + config.SecurityToken = "" + config.IsCname = false + + config.HTTPTimeout.ConnectTimeout = time.Second * 30 // 30s + config.HTTPTimeout.ReadWriteTimeout = time.Second * 60 // 60s + config.HTTPTimeout.HeaderTimeout = time.Second * 60 // 60s + config.HTTPTimeout.LongTimeout = time.Second * 300 // 300s + config.HTTPTimeout.IdleConnTimeout = time.Second * 50 // 50s + config.HTTPMaxConns.MaxIdleConns = 100 + config.HTTPMaxConns.MaxIdleConnsPerHost = 100 + + config.IsUseProxy = false + config.ProxyHost = "" + config.IsAuthProxy = false + config.ProxyUser = "" + config.ProxyPassword = "" + + config.MD5Threshold = 16 * 1024 * 1024 // 16MB + config.IsEnableMD5 = false + config.IsEnableCRC = true + + config.LogLevel = LogOff + config.Logger = log.New(os.Stdout, "", log.LstdFlags) + + provider := &defaultCredentialsProvider{config: &config} + config.CredentialsProvider = provider + + config.AuthVersion = AuthV1 + config.RedirectEnabled = true + config.InsecureSkipVerify = false + + return &config +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conn.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conn.go new file mode 100644 index 0000000000..cf2efee47a --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conn.go @@ -0,0 +1,852 @@ +package oss + +import ( + "bytes" + "crypto/md5" + "encoding/base64" + "encoding/json" + "encoding/xml" + "fmt" + "hash" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "sort" + "strconv" + "strings" + "time" +) + +// Conn defines OSS Conn +type Conn struct { + config *Config + url *urlMaker + client *http.Client +} + +var signKeyList = []string{"acl", "uploads", "location", "cors", + "logging", "website", "referer", "lifecycle", + "delete", "append", "tagging", "objectMeta", + "uploadId", "partNumber", "security-token", + "position", "img", "style", "styleName", + "replication", "replicationProgress", + "replicationLocation", "cname", "bucketInfo", + "comp", "qos", "live", "status", "vod", + "startTime", "endTime", "symlink", + "x-oss-process", "response-content-type", "x-oss-traffic-limit", + "response-content-language", "response-expires", + "response-cache-control", "response-content-disposition", + "response-content-encoding", "udf", "udfName", "udfImage", + "udfId", "udfImageDesc", "udfApplication", "comp", + "udfApplicationLog", "restore", "callback", "callback-var", "qosInfo", + "policy", "stat", "encryption", "versions", "versioning", "versionId", "requestPayment", + "x-oss-request-payer", "sequential", + "inventory", "inventoryId", "continuation-token", "asyncFetch", + "worm", "wormId", "wormExtend", "withHashContext", + "x-oss-enable-md5", "x-oss-enable-sha1", "x-oss-enable-sha256", + "x-oss-hash-ctx", "x-oss-md5-ctx", "transferAcceleration", + "regionList", +} + +// init initializes Conn +func (conn *Conn) init(config *Config, urlMaker *urlMaker, client *http.Client) error { + if client == nil { + // New transport + transport := newTransport(conn, config) + + // Proxy + if conn.config.IsUseProxy { + proxyURL, err := url.Parse(config.ProxyHost) + if err != nil { + return err + } + if config.IsAuthProxy { + if config.ProxyPassword != "" { + proxyURL.User = url.UserPassword(config.ProxyUser, config.ProxyPassword) + } else { + proxyURL.User = url.User(config.ProxyUser) + } + } + transport.Proxy = http.ProxyURL(proxyURL) + } + client = &http.Client{Transport: transport} + if !config.RedirectEnabled { + disableHTTPRedirect(client) + } + } + + conn.config = config + conn.url = urlMaker + conn.client = client + + return nil +} + +// Do sends request and returns the response +func (conn Conn) Do(method, bucketName, objectName string, params map[string]interface{}, headers map[string]string, + data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) { + urlParams := conn.getURLParams(params) + subResource := conn.getSubResource(params) + uri := conn.url.getURL(bucketName, objectName, urlParams) + resource := conn.getResource(bucketName, objectName, subResource) + return conn.doRequest(method, uri, resource, headers, data, initCRC, listener) +} + +// DoURL sends the request with signed URL and returns the response result. +func (conn Conn) DoURL(method HTTPMethod, signedURL string, headers map[string]string, + data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) { + // Get URI from signedURL + uri, err := url.ParseRequestURI(signedURL) + if err != nil { + return nil, err + } + + m := strings.ToUpper(string(method)) + req := &http.Request{ + Method: m, + URL: uri, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: uri.Host, + } + + tracker := &readerTracker{completedBytes: 0} + fd, crc := conn.handleBody(req, data, initCRC, listener, tracker) + if fd != nil { + defer func() { + fd.Close() + os.Remove(fd.Name()) + }() + } + + if conn.config.IsAuthProxy { + auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword + basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth)) + req.Header.Set("Proxy-Authorization", basic) + } + + req.Header.Set(HTTPHeaderHost, req.Host) + req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent) + + if headers != nil { + for k, v := range headers { + req.Header.Set(k, v) + } + } + + // Transfer started + event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength, 0) + publishProgress(listener, event) + + if conn.config.LogLevel >= Debug { + conn.LoggerHTTPReq(req) + } + + resp, err := conn.client.Do(req) + if err != nil { + // Transfer failed + event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength, 0) + publishProgress(listener, event) + conn.config.WriteLog(Debug, "[Resp:%p]http error:%s\n", req, err.Error()) + return nil, err + } + + if conn.config.LogLevel >= Debug { + //print out http resp + conn.LoggerHTTPResp(req, resp) + } + + // Transfer completed + event = newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength, 0) + publishProgress(listener, event) + + return conn.handleResponse(resp, crc) +} + +func (conn Conn) getURLParams(params map[string]interface{}) string { + // Sort + keys := make([]string, 0, len(params)) + for k := range params { + keys = append(keys, k) + } + sort.Strings(keys) + + // Serialize + var buf bytes.Buffer + for _, k := range keys { + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(url.QueryEscape(k)) + if params[k] != nil && params[k].(string) != "" { + buf.WriteString("=" + strings.Replace(url.QueryEscape(params[k].(string)), "+", "%20", -1)) + } + } + + return buf.String() +} + +func (conn Conn) getSubResource(params map[string]interface{}) string { + // Sort + keys := make([]string, 0, len(params)) + signParams := make(map[string]string) + for k := range params { + if conn.config.AuthVersion == AuthV2 { + encodedKey := url.QueryEscape(k) + keys = append(keys, encodedKey) + if params[k] != nil && params[k] != "" { + signParams[encodedKey] = strings.Replace(url.QueryEscape(params[k].(string)), "+", "%20", -1) + } + } else if conn.isParamSign(k) { + keys = append(keys, k) + if params[k] != nil { + signParams[k] = params[k].(string) + } + } + } + sort.Strings(keys) + + // Serialize + var buf bytes.Buffer + for _, k := range keys { + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(k) + if _, ok := signParams[k]; ok { + if signParams[k] != "" { + buf.WriteString("=" + signParams[k]) + } + } + } + return buf.String() +} + +func (conn Conn) isParamSign(paramKey string) bool { + for _, k := range signKeyList { + if paramKey == k { + return true + } + } + return false +} + +// getResource gets canonicalized resource +func (conn Conn) getResource(bucketName, objectName, subResource string) string { + if subResource != "" { + subResource = "?" + subResource + } + if bucketName == "" { + if conn.config.AuthVersion == AuthV2 { + return url.QueryEscape("/") + subResource + } + return fmt.Sprintf("/%s%s", bucketName, subResource) + } + if conn.config.AuthVersion == AuthV2 { + return url.QueryEscape("/"+bucketName+"/") + strings.Replace(url.QueryEscape(objectName), "+", "%20", -1) + subResource + } + return fmt.Sprintf("/%s/%s%s", bucketName, objectName, subResource) +} + +func (conn Conn) doRequest(method string, uri *url.URL, canonicalizedResource string, headers map[string]string, + data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) { + method = strings.ToUpper(method) + req := &http.Request{ + Method: method, + URL: uri, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: uri.Host, + } + + tracker := &readerTracker{completedBytes: 0} + fd, crc := conn.handleBody(req, data, initCRC, listener, tracker) + if fd != nil { + defer func() { + fd.Close() + os.Remove(fd.Name()) + }() + } + + if conn.config.IsAuthProxy { + auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword + basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth)) + req.Header.Set("Proxy-Authorization", basic) + } + + date := time.Now().UTC().Format(http.TimeFormat) + req.Header.Set(HTTPHeaderDate, date) + req.Header.Set(HTTPHeaderHost, req.Host) + req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent) + + akIf := conn.config.GetCredentials() + if akIf.GetSecurityToken() != "" { + req.Header.Set(HTTPHeaderOssSecurityToken, akIf.GetSecurityToken()) + } + + if headers != nil { + for k, v := range headers { + req.Header.Set(k, v) + } + } + + conn.signHeader(req, canonicalizedResource) + + // Transfer started + event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength, 0) + publishProgress(listener, event) + + if conn.config.LogLevel >= Debug { + conn.LoggerHTTPReq(req) + } + + resp, err := conn.client.Do(req) + + if err != nil { + // Transfer failed + event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength, 0) + publishProgress(listener, event) + conn.config.WriteLog(Debug, "[Resp:%p]http error:%s\n", req, err.Error()) + return nil, err + } + + if conn.config.LogLevel >= Debug { + //print out http resp + conn.LoggerHTTPResp(req, resp) + } + + // Transfer completed + event = newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength, 0) + publishProgress(listener, event) + + return conn.handleResponse(resp, crc) +} + +func (conn Conn) signURL(method HTTPMethod, bucketName, objectName string, expiration int64, params map[string]interface{}, headers map[string]string) string { + akIf := conn.config.GetCredentials() + if akIf.GetSecurityToken() != "" { + params[HTTPParamSecurityToken] = akIf.GetSecurityToken() + } + + m := strings.ToUpper(string(method)) + req := &http.Request{ + Method: m, + Header: make(http.Header), + } + + if conn.config.IsAuthProxy { + auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword + basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth)) + req.Header.Set("Proxy-Authorization", basic) + } + + req.Header.Set(HTTPHeaderDate, strconv.FormatInt(expiration, 10)) + req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent) + + if headers != nil { + for k, v := range headers { + req.Header.Set(k, v) + } + } + + if conn.config.AuthVersion == AuthV2 { + params[HTTPParamSignatureVersion] = "OSS2" + params[HTTPParamExpiresV2] = strconv.FormatInt(expiration, 10) + params[HTTPParamAccessKeyIDV2] = conn.config.AccessKeyID + additionalList, _ := conn.getAdditionalHeaderKeys(req) + if len(additionalList) > 0 { + params[HTTPParamAdditionalHeadersV2] = strings.Join(additionalList, ";") + } + } + + subResource := conn.getSubResource(params) + canonicalizedResource := conn.getResource(bucketName, objectName, subResource) + signedStr := conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret()) + + if conn.config.AuthVersion == AuthV1 { + params[HTTPParamExpires] = strconv.FormatInt(expiration, 10) + params[HTTPParamAccessKeyID] = akIf.GetAccessKeyID() + params[HTTPParamSignature] = signedStr + } else if conn.config.AuthVersion == AuthV2 { + params[HTTPParamSignatureV2] = signedStr + } + urlParams := conn.getURLParams(params) + return conn.url.getSignURL(bucketName, objectName, urlParams) +} + +func (conn Conn) signRtmpURL(bucketName, channelName, playlistName string, expiration int64) string { + params := map[string]interface{}{} + if playlistName != "" { + params[HTTPParamPlaylistName] = playlistName + } + expireStr := strconv.FormatInt(expiration, 10) + params[HTTPParamExpires] = expireStr + + akIf := conn.config.GetCredentials() + if akIf.GetAccessKeyID() != "" { + params[HTTPParamAccessKeyID] = akIf.GetAccessKeyID() + if akIf.GetSecurityToken() != "" { + params[HTTPParamSecurityToken] = akIf.GetSecurityToken() + } + signedStr := conn.getRtmpSignedStr(bucketName, channelName, playlistName, expiration, akIf.GetAccessKeySecret(), params) + params[HTTPParamSignature] = signedStr + } + + urlParams := conn.getURLParams(params) + return conn.url.getSignRtmpURL(bucketName, channelName, urlParams) +} + +// handleBody handles request body +func (conn Conn) handleBody(req *http.Request, body io.Reader, initCRC uint64, + listener ProgressListener, tracker *readerTracker) (*os.File, hash.Hash64) { + var file *os.File + var crc hash.Hash64 + reader := body + readerLen, err := GetReaderLen(reader) + if err == nil { + req.ContentLength = readerLen + } + req.Header.Set(HTTPHeaderContentLength, strconv.FormatInt(req.ContentLength, 10)) + + // MD5 + if body != nil && conn.config.IsEnableMD5 && req.Header.Get(HTTPHeaderContentMD5) == "" { + md5 := "" + reader, md5, file, _ = calcMD5(body, req.ContentLength, conn.config.MD5Threshold) + req.Header.Set(HTTPHeaderContentMD5, md5) + } + + // CRC + if reader != nil && conn.config.IsEnableCRC { + crc = NewCRC(CrcTable(), initCRC) + reader = TeeReader(reader, crc, req.ContentLength, listener, tracker) + } + + // HTTP body + rc, ok := reader.(io.ReadCloser) + if !ok && reader != nil { + rc = ioutil.NopCloser(reader) + } + + if conn.isUploadLimitReq(req) { + limitReader := &LimitSpeedReader{ + reader: rc, + ossLimiter: conn.config.UploadLimiter, + } + req.Body = limitReader + } else { + req.Body = rc + } + return file, crc +} + +// isUploadLimitReq: judge limit upload speed or not +func (conn Conn) isUploadLimitReq(req *http.Request) bool { + if conn.config.UploadLimitSpeed == 0 || conn.config.UploadLimiter == nil { + return false + } + + if req.Method != "GET" && req.Method != "DELETE" && req.Method != "HEAD" { + if req.ContentLength > 0 { + return true + } + } + return false +} + +func tryGetFileSize(f *os.File) int64 { + fInfo, _ := f.Stat() + return fInfo.Size() +} + +// handleResponse handles response +func (conn Conn) handleResponse(resp *http.Response, crc hash.Hash64) (*Response, error) { + var cliCRC uint64 + var srvCRC uint64 + + statusCode := resp.StatusCode + if statusCode/100 != 2 { + if statusCode >= 400 && statusCode <= 505 { + // 4xx and 5xx indicate that the operation has error occurred + var respBody []byte + respBody, err := readResponseBody(resp) + if err != nil { + return nil, err + } + + if len(respBody) == 0 { + err = ServiceError{ + StatusCode: statusCode, + RequestID: resp.Header.Get(HTTPHeaderOssRequestID), + } + } else { + // Response contains storage service error object, unmarshal + srvErr, errIn := serviceErrFromXML(respBody, resp.StatusCode, + resp.Header.Get(HTTPHeaderOssRequestID)) + if errIn != nil { // error unmarshaling the error response + err = fmt.Errorf("oss: service returned invalid response body, status = %s, RequestId = %s", resp.Status, resp.Header.Get(HTTPHeaderOssRequestID)) + } else { + err = srvErr + } + } + + return &Response{ + StatusCode: resp.StatusCode, + Headers: resp.Header, + Body: ioutil.NopCloser(bytes.NewReader(respBody)), // restore the body + }, err + } else if statusCode >= 300 && statusCode <= 307 { + // OSS use 3xx, but response has no body + err := fmt.Errorf("oss: service returned %d,%s", resp.StatusCode, resp.Status) + return &Response{ + StatusCode: resp.StatusCode, + Headers: resp.Header, + Body: resp.Body, + }, err + } else { + // (0,300) [308,400) [506,) + // Other extended http StatusCode + var respBody []byte + respBody, err := readResponseBody(resp) + if err != nil { + return &Response{StatusCode: resp.StatusCode, Headers: resp.Header, Body: ioutil.NopCloser(bytes.NewReader(respBody))}, err + } + + if len(respBody) == 0 { + err = ServiceError{ + StatusCode: statusCode, + RequestID: resp.Header.Get(HTTPHeaderOssRequestID), + } + } else { + // Response contains storage service error object, unmarshal + srvErr, errIn := serviceErrFromXML(respBody, resp.StatusCode, + resp.Header.Get(HTTPHeaderOssRequestID)) + if errIn != nil { // error unmarshaling the error response + err = fmt.Errorf("unkown response body, status = %s, RequestId = %s", resp.Status, resp.Header.Get(HTTPHeaderOssRequestID)) + } else { + err = srvErr + } + } + + return &Response{ + StatusCode: resp.StatusCode, + Headers: resp.Header, + Body: ioutil.NopCloser(bytes.NewReader(respBody)), // restore the body + }, err + } + } else { + if conn.config.IsEnableCRC && crc != nil { + cliCRC = crc.Sum64() + } + srvCRC, _ = strconv.ParseUint(resp.Header.Get(HTTPHeaderOssCRC64), 10, 64) + + realBody := resp.Body + if conn.isDownloadLimitResponse(resp) { + limitReader := &LimitSpeedReader{ + reader: realBody, + ossLimiter: conn.config.DownloadLimiter, + } + realBody = limitReader + } + + // 2xx, successful + return &Response{ + StatusCode: resp.StatusCode, + Headers: resp.Header, + Body: realBody, + ClientCRC: cliCRC, + ServerCRC: srvCRC, + }, nil + } +} + +// isUploadLimitReq: judge limit upload speed or not +func (conn Conn) isDownloadLimitResponse(resp *http.Response) bool { + if resp == nil || conn.config.DownloadLimitSpeed == 0 || conn.config.DownloadLimiter == nil { + return false + } + + if strings.EqualFold(resp.Request.Method, "GET") { + return true + } + return false +} + +// LoggerHTTPReq Print the header information of the http request +func (conn Conn) LoggerHTTPReq(req *http.Request) { + var logBuffer bytes.Buffer + logBuffer.WriteString(fmt.Sprintf("[Req:%p]Method:%s\t", req, req.Method)) + logBuffer.WriteString(fmt.Sprintf("Host:%s\t", req.URL.Host)) + logBuffer.WriteString(fmt.Sprintf("Path:%s\t", req.URL.Path)) + logBuffer.WriteString(fmt.Sprintf("Query:%s\t", req.URL.RawQuery)) + logBuffer.WriteString(fmt.Sprintf("Header info:")) + + for k, v := range req.Header { + var valueBuffer bytes.Buffer + for j := 0; j < len(v); j++ { + if j > 0 { + valueBuffer.WriteString(" ") + } + valueBuffer.WriteString(v[j]) + } + logBuffer.WriteString(fmt.Sprintf("\t%s:%s", k, valueBuffer.String())) + } + conn.config.WriteLog(Debug, "%s\n", logBuffer.String()) +} + +// LoggerHTTPResp Print Response to http request +func (conn Conn) LoggerHTTPResp(req *http.Request, resp *http.Response) { + var logBuffer bytes.Buffer + logBuffer.WriteString(fmt.Sprintf("[Resp:%p]StatusCode:%d\t", req, resp.StatusCode)) + logBuffer.WriteString(fmt.Sprintf("Header info:")) + for k, v := range resp.Header { + var valueBuffer bytes.Buffer + for j := 0; j < len(v); j++ { + if j > 0 { + valueBuffer.WriteString(" ") + } + valueBuffer.WriteString(v[j]) + } + logBuffer.WriteString(fmt.Sprintf("\t%s:%s", k, valueBuffer.String())) + } + conn.config.WriteLog(Debug, "%s\n", logBuffer.String()) +} + +func calcMD5(body io.Reader, contentLen, md5Threshold int64) (reader io.Reader, b64 string, tempFile *os.File, err error) { + if contentLen == 0 || contentLen > md5Threshold { + // Huge body, use temporary file + tempFile, err = ioutil.TempFile(os.TempDir(), TempFilePrefix) + if tempFile != nil { + io.Copy(tempFile, body) + tempFile.Seek(0, os.SEEK_SET) + md5 := md5.New() + io.Copy(md5, tempFile) + sum := md5.Sum(nil) + b64 = base64.StdEncoding.EncodeToString(sum[:]) + tempFile.Seek(0, os.SEEK_SET) + reader = tempFile + } + } else { + // Small body, use memory + buf, _ := ioutil.ReadAll(body) + sum := md5.Sum(buf) + b64 = base64.StdEncoding.EncodeToString(sum[:]) + reader = bytes.NewReader(buf) + } + return +} + +func readResponseBody(resp *http.Response) ([]byte, error) { + defer resp.Body.Close() + out, err := ioutil.ReadAll(resp.Body) + if err == io.EOF { + err = nil + } + return out, err +} + +func serviceErrFromXML(body []byte, statusCode int, requestID string) (ServiceError, error) { + var storageErr ServiceError + + if err := xml.Unmarshal(body, &storageErr); err != nil { + return storageErr, err + } + + storageErr.StatusCode = statusCode + storageErr.RequestID = requestID + storageErr.RawMessage = string(body) + return storageErr, nil +} + +func xmlUnmarshal(body io.Reader, v interface{}) error { + data, err := ioutil.ReadAll(body) + if err != nil { + return err + } + return xml.Unmarshal(data, v) +} + +func jsonUnmarshal(body io.Reader, v interface{}) error { + data, err := ioutil.ReadAll(body) + if err != nil { + return err + } + return json.Unmarshal(data, v) +} + +// timeoutConn handles HTTP timeout +type timeoutConn struct { + conn net.Conn + timeout time.Duration + longTimeout time.Duration +} + +func newTimeoutConn(conn net.Conn, timeout time.Duration, longTimeout time.Duration) *timeoutConn { + conn.SetReadDeadline(time.Now().Add(longTimeout)) + return &timeoutConn{ + conn: conn, + timeout: timeout, + longTimeout: longTimeout, + } +} + +func (c *timeoutConn) Read(b []byte) (n int, err error) { + c.SetReadDeadline(time.Now().Add(c.timeout)) + n, err = c.conn.Read(b) + c.SetReadDeadline(time.Now().Add(c.longTimeout)) + return n, err +} + +func (c *timeoutConn) Write(b []byte) (n int, err error) { + c.SetWriteDeadline(time.Now().Add(c.timeout)) + n, err = c.conn.Write(b) + c.SetReadDeadline(time.Now().Add(c.longTimeout)) + return n, err +} + +func (c *timeoutConn) Close() error { + return c.conn.Close() +} + +func (c *timeoutConn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +func (c *timeoutConn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +func (c *timeoutConn) SetDeadline(t time.Time) error { + return c.conn.SetDeadline(t) +} + +func (c *timeoutConn) SetReadDeadline(t time.Time) error { + return c.conn.SetReadDeadline(t) +} + +func (c *timeoutConn) SetWriteDeadline(t time.Time) error { + return c.conn.SetWriteDeadline(t) +} + +// UrlMaker builds URL and resource +const ( + urlTypeCname = 1 + urlTypeIP = 2 + urlTypeAliyun = 3 +) + +type urlMaker struct { + Scheme string // HTTP or HTTPS + NetLoc string // Host or IP + Type int // 1 CNAME, 2 IP, 3 ALIYUN + IsProxy bool // Proxy +} + +// Init parses endpoint +func (um *urlMaker) Init(endpoint string, isCname bool, isProxy bool) error { + if strings.HasPrefix(endpoint, "http://") { + um.Scheme = "http" + um.NetLoc = endpoint[len("http://"):] + } else if strings.HasPrefix(endpoint, "https://") { + um.Scheme = "https" + um.NetLoc = endpoint[len("https://"):] + } else { + um.Scheme = "http" + um.NetLoc = endpoint + } + + //use url.Parse() to get real host + strUrl := um.Scheme + "://" + um.NetLoc + url, err := url.Parse(strUrl) + if err != nil { + return err + } + + um.NetLoc = url.Host + host, _, err := net.SplitHostPort(um.NetLoc) + if err != nil { + host = um.NetLoc + if host[0] == '[' && host[len(host)-1] == ']' { + host = host[1 : len(host)-1] + } + } + + ip := net.ParseIP(host) + if ip != nil { + um.Type = urlTypeIP + } else if isCname { + um.Type = urlTypeCname + } else { + um.Type = urlTypeAliyun + } + um.IsProxy = isProxy + + return nil +} + +// getURL gets URL +func (um urlMaker) getURL(bucket, object, params string) *url.URL { + host, path := um.buildURL(bucket, object) + addr := "" + if params == "" { + addr = fmt.Sprintf("%s://%s%s", um.Scheme, host, path) + } else { + addr = fmt.Sprintf("%s://%s%s?%s", um.Scheme, host, path, params) + } + uri, _ := url.ParseRequestURI(addr) + return uri +} + +// getSignURL gets sign URL +func (um urlMaker) getSignURL(bucket, object, params string) string { + host, path := um.buildURL(bucket, object) + return fmt.Sprintf("%s://%s%s?%s", um.Scheme, host, path, params) +} + +// getSignRtmpURL Build Sign Rtmp URL +func (um urlMaker) getSignRtmpURL(bucket, channelName, params string) string { + host, path := um.buildURL(bucket, "live") + + channelName = url.QueryEscape(channelName) + channelName = strings.Replace(channelName, "+", "%20", -1) + + return fmt.Sprintf("rtmp://%s%s/%s?%s", host, path, channelName, params) +} + +// buildURL builds URL +func (um urlMaker) buildURL(bucket, object string) (string, string) { + var host = "" + var path = "" + + object = url.QueryEscape(object) + object = strings.Replace(object, "+", "%20", -1) + + if um.Type == urlTypeCname { + host = um.NetLoc + path = "/" + object + } else if um.Type == urlTypeIP { + if bucket == "" { + host = um.NetLoc + path = "/" + } else { + host = um.NetLoc + path = fmt.Sprintf("/%s/%s", bucket, object) + } + } else { + if bucket == "" { + host = um.NetLoc + path = "/" + } else { + host = bucket + "." + um.NetLoc + path = "/" + object + } + } + + return host, path +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/const.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/const.go new file mode 100644 index 0000000000..47e1b0f10f --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/const.go @@ -0,0 +1,258 @@ +package oss + +import "os" + +// ACLType bucket/object ACL +type ACLType string + +const ( + // ACLPrivate definition : private read and write + ACLPrivate ACLType = "private" + + // ACLPublicRead definition : public read and private write + ACLPublicRead ACLType = "public-read" + + // ACLPublicReadWrite definition : public read and public write + ACLPublicReadWrite ACLType = "public-read-write" + + // ACLDefault Object. It's only applicable for object. + ACLDefault ACLType = "default" +) + +// bucket versioning status +type VersioningStatus string + +const ( + // Versioning Status definition: Enabled + VersionEnabled VersioningStatus = "Enabled" + + // Versioning Status definition: Suspended + VersionSuspended VersioningStatus = "Suspended" +) + +// MetadataDirectiveType specifying whether use the metadata of source object when copying object. +type MetadataDirectiveType string + +const ( + // MetaCopy the target object's metadata is copied from the source one + MetaCopy MetadataDirectiveType = "COPY" + + // MetaReplace the target object's metadata is created as part of the copy request (not same as the source one) + MetaReplace MetadataDirectiveType = "REPLACE" +) + +// TaggingDirectiveType specifying whether use the tagging of source object when copying object. +type TaggingDirectiveType string + +const ( + // TaggingCopy the target object's tagging is copied from the source one + TaggingCopy TaggingDirectiveType = "COPY" + + // TaggingReplace the target object's tagging is created as part of the copy request (not same as the source one) + TaggingReplace TaggingDirectiveType = "REPLACE" +) + +// AlgorithmType specifying the server side encryption algorithm name +type AlgorithmType string + +const ( + KMSAlgorithm AlgorithmType = "KMS" + AESAlgorithm AlgorithmType = "AES256" + SM4Algorithm AlgorithmType = "SM4" +) + +// StorageClassType bucket storage type +type StorageClassType string + +const ( + // StorageStandard standard + StorageStandard StorageClassType = "Standard" + + // StorageIA infrequent access + StorageIA StorageClassType = "IA" + + // StorageArchive archive + StorageArchive StorageClassType = "Archive" + + // StorageColdArchive cold archive + StorageColdArchive StorageClassType = "ColdArchive" +) + +//RedundancyType bucket data Redundancy type +type DataRedundancyType string + +const ( + // RedundancyLRS Local redundancy, default value + RedundancyLRS DataRedundancyType = "LRS" + + // RedundancyZRS Same city redundancy + RedundancyZRS DataRedundancyType = "ZRS" +) + +//ObjecthashFuncType +type ObjecthashFuncType string + +const ( + HashFuncSha1 ObjecthashFuncType = "SHA-1" + HashFuncSha256 ObjecthashFuncType = "SHA-256" +) + +// PayerType the type of request payer +type PayerType string + +const ( + // Requester the requester who send the request + Requester PayerType = "Requester" + + // BucketOwner the requester who send the request + BucketOwner PayerType = "BucketOwner" +) + +//RestoreMode the restore mode for coldArchive object +type RestoreMode string + +const ( + //RestoreExpedited object will be restored in 1 hour + RestoreExpedited RestoreMode = "Expedited" + + //RestoreStandard object will be restored in 2-5 hours + RestoreStandard RestoreMode = "Standard" + + //RestoreBulk object will be restored in 5-10 hours + RestoreBulk RestoreMode = "Bulk" +) + +// HTTPMethod HTTP request method +type HTTPMethod string + +const ( + // HTTPGet HTTP GET + HTTPGet HTTPMethod = "GET" + + // HTTPPut HTTP PUT + HTTPPut HTTPMethod = "PUT" + + // HTTPHead HTTP HEAD + HTTPHead HTTPMethod = "HEAD" + + // HTTPPost HTTP POST + HTTPPost HTTPMethod = "POST" + + // HTTPDelete HTTP DELETE + HTTPDelete HTTPMethod = "DELETE" +) + +// HTTP headers +const ( + HTTPHeaderAcceptEncoding string = "Accept-Encoding" + HTTPHeaderAuthorization = "Authorization" + HTTPHeaderCacheControl = "Cache-Control" + HTTPHeaderContentDisposition = "Content-Disposition" + HTTPHeaderContentEncoding = "Content-Encoding" + HTTPHeaderContentLength = "Content-Length" + HTTPHeaderContentMD5 = "Content-MD5" + HTTPHeaderContentType = "Content-Type" + HTTPHeaderContentLanguage = "Content-Language" + HTTPHeaderDate = "Date" + HTTPHeaderEtag = "ETag" + HTTPHeaderExpires = "Expires" + HTTPHeaderHost = "Host" + HTTPHeaderLastModified = "Last-Modified" + HTTPHeaderRange = "Range" + HTTPHeaderLocation = "Location" + HTTPHeaderOrigin = "Origin" + HTTPHeaderServer = "Server" + HTTPHeaderUserAgent = "User-Agent" + HTTPHeaderIfModifiedSince = "If-Modified-Since" + HTTPHeaderIfUnmodifiedSince = "If-Unmodified-Since" + HTTPHeaderIfMatch = "If-Match" + HTTPHeaderIfNoneMatch = "If-None-Match" + HTTPHeaderACReqMethod = "Access-Control-Request-Method" + HTTPHeaderACReqHeaders = "Access-Control-Request-Headers" + + HTTPHeaderOssACL = "X-Oss-Acl" + HTTPHeaderOssMetaPrefix = "X-Oss-Meta-" + HTTPHeaderOssObjectACL = "X-Oss-Object-Acl" + HTTPHeaderOssSecurityToken = "X-Oss-Security-Token" + HTTPHeaderOssServerSideEncryption = "X-Oss-Server-Side-Encryption" + HTTPHeaderOssServerSideEncryptionKeyID = "X-Oss-Server-Side-Encryption-Key-Id" + HTTPHeaderOssServerSideDataEncryption = "X-Oss-Server-Side-Data-Encryption" + HTTPHeaderSSECAlgorithm = "X-Oss-Server-Side-Encryption-Customer-Algorithm" + HTTPHeaderSSECKey = "X-Oss-Server-Side-Encryption-Customer-Key" + HTTPHeaderSSECKeyMd5 = "X-Oss-Server-Side-Encryption-Customer-Key-MD5" + HTTPHeaderOssCopySource = "X-Oss-Copy-Source" + HTTPHeaderOssCopySourceRange = "X-Oss-Copy-Source-Range" + HTTPHeaderOssCopySourceIfMatch = "X-Oss-Copy-Source-If-Match" + HTTPHeaderOssCopySourceIfNoneMatch = "X-Oss-Copy-Source-If-None-Match" + HTTPHeaderOssCopySourceIfModifiedSince = "X-Oss-Copy-Source-If-Modified-Since" + HTTPHeaderOssCopySourceIfUnmodifiedSince = "X-Oss-Copy-Source-If-Unmodified-Since" + HTTPHeaderOssMetadataDirective = "X-Oss-Metadata-Directive" + HTTPHeaderOssNextAppendPosition = "X-Oss-Next-Append-Position" + HTTPHeaderOssRequestID = "X-Oss-Request-Id" + HTTPHeaderOssCRC64 = "X-Oss-Hash-Crc64ecma" + HTTPHeaderOssSymlinkTarget = "X-Oss-Symlink-Target" + HTTPHeaderOssStorageClass = "X-Oss-Storage-Class" + HTTPHeaderOssCallback = "X-Oss-Callback" + HTTPHeaderOssCallbackVar = "X-Oss-Callback-Var" + HTTPHeaderOssRequester = "X-Oss-Request-Payer" + HTTPHeaderOssTagging = "X-Oss-Tagging" + HTTPHeaderOssTaggingDirective = "X-Oss-Tagging-Directive" + HTTPHeaderOssTrafficLimit = "X-Oss-Traffic-Limit" + HTTPHeaderOssForbidOverWrite = "X-Oss-Forbid-Overwrite" + HTTPHeaderOssRangeBehavior = "X-Oss-Range-Behavior" + HTTPHeaderOssTaskID = "X-Oss-Task-Id" + HTTPHeaderOssHashCtx = "X-Oss-Hash-Ctx" + HTTPHeaderOssMd5Ctx = "X-Oss-Md5-Ctx" + HTTPHeaderAllowSameActionOverLap = "X-Oss-Allow-Same-Action-Overlap" +) + +// HTTP Param +const ( + HTTPParamExpires = "Expires" + HTTPParamAccessKeyID = "OSSAccessKeyId" + HTTPParamSignature = "Signature" + HTTPParamSecurityToken = "security-token" + HTTPParamPlaylistName = "playlistName" + + HTTPParamSignatureVersion = "x-oss-signature-version" + HTTPParamExpiresV2 = "x-oss-expires" + HTTPParamAccessKeyIDV2 = "x-oss-access-key-id" + HTTPParamSignatureV2 = "x-oss-signature" + HTTPParamAdditionalHeadersV2 = "x-oss-additional-headers" +) + +// Other constants +const ( + MaxPartSize = 5 * 1024 * 1024 * 1024 // Max part size, 5GB + MinPartSize = 100 * 1024 // Min part size, 100KB + + FilePermMode = os.FileMode(0664) // Default file permission + + TempFilePrefix = "oss-go-temp-" // Temp file prefix + TempFileSuffix = ".temp" // Temp file suffix + + CheckpointFileSuffix = ".cp" // Checkpoint file suffix + + NullVersion = "null" + + Version = "v2.2.2" // Go SDK version +) + +// FrameType +const ( + DataFrameType = 8388609 + ContinuousFrameType = 8388612 + EndFrameType = 8388613 + MetaEndFrameCSVType = 8388614 + MetaEndFrameJSONType = 8388615 +) + +// AuthVersion the version of auth +type AuthVersionType string + +const ( + // AuthV1 v1 + AuthV1 AuthVersionType = "v1" + // AuthV2 v2 + AuthV2 AuthVersionType = "v2" +) diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/crc.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/crc.go new file mode 100644 index 0000000000..c96694f285 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/crc.go @@ -0,0 +1,123 @@ +package oss + +import ( + "hash" + "hash/crc64" +) + +// digest represents the partial evaluation of a checksum. +type digest struct { + crc uint64 + tab *crc64.Table +} + +// NewCRC creates a new hash.Hash64 computing the CRC64 checksum +// using the polynomial represented by the Table. +func NewCRC(tab *crc64.Table, init uint64) hash.Hash64 { return &digest{init, tab} } + +// Size returns the number of bytes sum will return. +func (d *digest) Size() int { return crc64.Size } + +// BlockSize returns the hash's underlying block size. +// The Write method must be able to accept any amount +// of data, but it may operate more efficiently if all writes +// are a multiple of the block size. +func (d *digest) BlockSize() int { return 1 } + +// Reset resets the hash to its initial state. +func (d *digest) Reset() { d.crc = 0 } + +// Write (via the embedded io.Writer interface) adds more data to the running hash. +// It never returns an error. +func (d *digest) Write(p []byte) (n int, err error) { + d.crc = crc64.Update(d.crc, d.tab, p) + return len(p), nil +} + +// Sum64 returns CRC64 value. +func (d *digest) Sum64() uint64 { return d.crc } + +// Sum returns hash value. +func (d *digest) Sum(in []byte) []byte { + s := d.Sum64() + return append(in, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s)) +} + +// gf2Dim dimension of GF(2) vectors (length of CRC) +const gf2Dim int = 64 + +func gf2MatrixTimes(mat []uint64, vec uint64) uint64 { + var sum uint64 + for i := 0; vec != 0; i++ { + if vec&1 != 0 { + sum ^= mat[i] + } + + vec >>= 1 + } + return sum +} + +func gf2MatrixSquare(square []uint64, mat []uint64) { + for n := 0; n < gf2Dim; n++ { + square[n] = gf2MatrixTimes(mat, mat[n]) + } +} + +// CRC64Combine combines CRC64 +func CRC64Combine(crc1 uint64, crc2 uint64, len2 uint64) uint64 { + var even [gf2Dim]uint64 // Even-power-of-two zeros operator + var odd [gf2Dim]uint64 // Odd-power-of-two zeros operator + + // Degenerate case + if len2 == 0 { + return crc1 + } + + // Put operator for one zero bit in odd + odd[0] = crc64.ECMA // CRC64 polynomial + var row uint64 = 1 + for n := 1; n < gf2Dim; n++ { + odd[n] = row + row <<= 1 + } + + // Put operator for two zero bits in even + gf2MatrixSquare(even[:], odd[:]) + + // Put operator for four zero bits in odd + gf2MatrixSquare(odd[:], even[:]) + + // Apply len2 zeros to crc1, first square will put the operator for one zero byte, eight zero bits, in even + for { + // Apply zeros operator for this bit of len2 + gf2MatrixSquare(even[:], odd[:]) + + if len2&1 != 0 { + crc1 = gf2MatrixTimes(even[:], crc1) + } + + len2 >>= 1 + + // If no more bits set, then done + if len2 == 0 { + break + } + + // Another iteration of the loop with odd and even swapped + gf2MatrixSquare(odd[:], even[:]) + if len2&1 != 0 { + crc1 = gf2MatrixTimes(odd[:], crc1) + } + len2 >>= 1 + + // If no more bits set, then done + if len2 == 0 { + break + } + } + + // Return combined CRC + crc1 ^= crc2 + return crc1 +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/download.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/download.go new file mode 100644 index 0000000000..90c1b633d9 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/download.go @@ -0,0 +1,567 @@ +package oss + +import ( + "crypto/md5" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "hash" + "hash/crc64" + "io" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "strconv" + "time" +) + +// DownloadFile downloads files with multipart download. +// +// objectKey the object key. +// filePath the local file to download from objectKey in OSS. +// partSize the part size in bytes. +// options object's constraints, check out GetObject for the reference. +// +// error it's nil when the call succeeds, otherwise it's an error object. +// +func (bucket Bucket) DownloadFile(objectKey, filePath string, partSize int64, options ...Option) error { + if partSize < 1 { + return errors.New("oss: part size smaller than 1") + } + + uRange, err := GetRangeConfig(options) + if err != nil { + return err + } + + cpConf := getCpConfig(options) + routines := getRoutines(options) + + var strVersionId string + versionId, _ := FindOption(options, "versionId", nil) + if versionId != nil { + strVersionId = versionId.(string) + } + + if cpConf != nil && cpConf.IsEnable { + cpFilePath := getDownloadCpFilePath(cpConf, bucket.BucketName, objectKey, strVersionId, filePath) + if cpFilePath != "" { + return bucket.downloadFileWithCp(objectKey, filePath, partSize, options, cpFilePath, routines, uRange) + } + } + + return bucket.downloadFile(objectKey, filePath, partSize, options, routines, uRange) +} + +func getDownloadCpFilePath(cpConf *cpConfig, srcBucket, srcObject, versionId, destFile string) string { + if cpConf.FilePath == "" && cpConf.DirPath != "" { + src := fmt.Sprintf("oss://%v/%v", srcBucket, srcObject) + absPath, _ := filepath.Abs(destFile) + cpFileName := getCpFileName(src, absPath, versionId) + cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName + } + return cpConf.FilePath +} + +// downloadWorkerArg is download worker's parameters +type downloadWorkerArg struct { + bucket *Bucket + key string + filePath string + options []Option + hook downloadPartHook + enableCRC bool +} + +// downloadPartHook is hook for test +type downloadPartHook func(part downloadPart) error + +var downloadPartHooker downloadPartHook = defaultDownloadPartHook + +func defaultDownloadPartHook(part downloadPart) error { + return nil +} + +// defaultDownloadProgressListener defines default ProgressListener, shields the ProgressListener in options of GetObject. +type defaultDownloadProgressListener struct { +} + +// ProgressChanged no-ops +func (listener *defaultDownloadProgressListener) ProgressChanged(event *ProgressEvent) { +} + +// downloadWorker +func downloadWorker(id int, arg downloadWorkerArg, jobs <-chan downloadPart, results chan<- downloadPart, failed chan<- error, die <-chan bool) { + for part := range jobs { + if err := arg.hook(part); err != nil { + failed <- err + break + } + + // Resolve options + r := Range(part.Start, part.End) + p := Progress(&defaultDownloadProgressListener{}) + + var respHeader http.Header + opts := make([]Option, len(arg.options)+3) + // Append orderly, can not be reversed! + opts = append(opts, arg.options...) + opts = append(opts, r, p, GetResponseHeader(&respHeader)) + + rd, err := arg.bucket.GetObject(arg.key, opts...) + if err != nil { + failed <- err + break + } + defer rd.Close() + + var crcCalc hash.Hash64 + if arg.enableCRC { + crcCalc = crc64.New(CrcTable()) + contentLen := part.End - part.Start + 1 + rd = ioutil.NopCloser(TeeReader(rd, crcCalc, contentLen, nil, nil)) + } + defer rd.Close() + + select { + case <-die: + return + default: + } + + fd, err := os.OpenFile(arg.filePath, os.O_WRONLY, FilePermMode) + if err != nil { + failed <- err + break + } + + _, err = fd.Seek(part.Start-part.Offset, os.SEEK_SET) + if err != nil { + fd.Close() + failed <- err + break + } + + startT := time.Now().UnixNano() / 1000 / 1000 / 1000 + _, err = io.Copy(fd, rd) + endT := time.Now().UnixNano() / 1000 / 1000 / 1000 + if err != nil { + arg.bucket.Client.Config.WriteLog(Debug, "download part error,cost:%d second,part number:%d,request id:%s,error:%s.\n", endT-startT, part.Index, GetRequestId(respHeader), err.Error()) + fd.Close() + failed <- err + break + } + + if arg.enableCRC { + part.CRC64 = crcCalc.Sum64() + } + + fd.Close() + results <- part + } +} + +// downloadScheduler +func downloadScheduler(jobs chan downloadPart, parts []downloadPart) { + for _, part := range parts { + jobs <- part + } + close(jobs) +} + +// downloadPart defines download part +type downloadPart struct { + Index int // Part number, starting from 0 + Start int64 // Start index + End int64 // End index + Offset int64 // Offset + CRC64 uint64 // CRC check value of part +} + +// getDownloadParts gets download parts +func getDownloadParts(objectSize, partSize int64, uRange *UnpackedRange) []downloadPart { + parts := []downloadPart{} + part := downloadPart{} + i := 0 + start, end := AdjustRange(uRange, objectSize) + for offset := start; offset < end; offset += partSize { + part.Index = i + part.Start = offset + part.End = GetPartEnd(offset, end, partSize) + part.Offset = start + part.CRC64 = 0 + parts = append(parts, part) + i++ + } + return parts +} + +// getObjectBytes gets object bytes length +func getObjectBytes(parts []downloadPart) int64 { + var ob int64 + for _, part := range parts { + ob += (part.End - part.Start + 1) + } + return ob +} + +// combineCRCInParts caculates the total CRC of continuous parts +func combineCRCInParts(dps []downloadPart) uint64 { + if dps == nil || len(dps) == 0 { + return 0 + } + + crc := dps[0].CRC64 + for i := 1; i < len(dps); i++ { + crc = CRC64Combine(crc, dps[i].CRC64, (uint64)(dps[i].End-dps[i].Start+1)) + } + + return crc +} + +// downloadFile downloads file concurrently without checkpoint. +func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, options []Option, routines int, uRange *UnpackedRange) error { + tempFilePath := filePath + TempFileSuffix + listener := GetProgressListener(options) + + // If the file does not exist, create one. If exists, the download will overwrite it. + fd, err := os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode) + if err != nil { + return err + } + fd.Close() + + // Get the object detailed meta for object whole size + // must delete header:range to get whole object size + skipOptions := DeleteOption(options, HTTPHeaderRange) + meta, err := bucket.GetObjectDetailedMeta(objectKey, skipOptions...) + if err != nil { + return err + } + + objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64) + if err != nil { + return err + } + + enableCRC := false + expectedCRC := (uint64)(0) + if bucket.GetConfig().IsEnableCRC && meta.Get(HTTPHeaderOssCRC64) != "" { + if uRange == nil || (!uRange.HasStart && !uRange.HasEnd) { + enableCRC = true + expectedCRC, _ = strconv.ParseUint(meta.Get(HTTPHeaderOssCRC64), 10, 64) + } + } + + // Get the parts of the file + parts := getDownloadParts(objectSize, partSize, uRange) + jobs := make(chan downloadPart, len(parts)) + results := make(chan downloadPart, len(parts)) + failed := make(chan error) + die := make(chan bool) + + var completedBytes int64 + totalBytes := getObjectBytes(parts) + event := newProgressEvent(TransferStartedEvent, 0, totalBytes, 0) + publishProgress(listener, event) + + // Start the download workers + arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker, enableCRC} + for w := 1; w <= routines; w++ { + go downloadWorker(w, arg, jobs, results, failed, die) + } + + // Download parts concurrently + go downloadScheduler(jobs, parts) + + // Waiting for parts download finished + completed := 0 + for completed < len(parts) { + select { + case part := <-results: + completed++ + downBytes := (part.End - part.Start + 1) + completedBytes += downBytes + parts[part.Index].CRC64 = part.CRC64 + event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes, downBytes) + publishProgress(listener, event) + case err := <-failed: + close(die) + event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes, 0) + publishProgress(listener, event) + return err + } + + if completed >= len(parts) { + break + } + } + + event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes, 0) + publishProgress(listener, event) + + if enableCRC { + actualCRC := combineCRCInParts(parts) + err = CheckDownloadCRC(actualCRC, expectedCRC) + if err != nil { + return err + } + } + + return os.Rename(tempFilePath, filePath) +} + +// ----- Concurrent download with chcekpoint ----- + +const downloadCpMagic = "92611BED-89E2-46B6-89E5-72F273D4B0A3" + +type downloadCheckpoint struct { + Magic string // Magic + MD5 string // Checkpoint content MD5 + FilePath string // Local file + Object string // Key + ObjStat objectStat // Object status + Parts []downloadPart // All download parts + PartStat []bool // Parts' download status + Start int64 // Start point of the file + End int64 // End point of the file + enableCRC bool // Whether has CRC check + CRC uint64 // CRC check value +} + +type objectStat struct { + Size int64 // Object size + LastModified string // Last modified time + Etag string // Etag +} + +// isValid flags of checkpoint data is valid. It returns true when the data is valid and the checkpoint is valid and the object is not updated. +func (cp downloadCheckpoint) isValid(meta http.Header, uRange *UnpackedRange) (bool, error) { + // Compare the CP's Magic and the MD5 + cpb := cp + cpb.MD5 = "" + js, _ := json.Marshal(cpb) + sum := md5.Sum(js) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + + if cp.Magic != downloadCpMagic || b64 != cp.MD5 { + return false, nil + } + + objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64) + if err != nil { + return false, err + } + + // Compare the object size, last modified time and etag + if cp.ObjStat.Size != objectSize || + cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) || + cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) { + return false, nil + } + + // Check the download range + if uRange != nil { + start, end := AdjustRange(uRange, objectSize) + if start != cp.Start || end != cp.End { + return false, nil + } + } + + return true, nil +} + +// load checkpoint from local file +func (cp *downloadCheckpoint) load(filePath string) error { + contents, err := ioutil.ReadFile(filePath) + if err != nil { + return err + } + + err = json.Unmarshal(contents, cp) + return err +} + +// dump funciton dumps to file +func (cp *downloadCheckpoint) dump(filePath string) error { + bcp := *cp + + // Calculate MD5 + bcp.MD5 = "" + js, err := json.Marshal(bcp) + if err != nil { + return err + } + sum := md5.Sum(js) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + bcp.MD5 = b64 + + // Serialize + js, err = json.Marshal(bcp) + if err != nil { + return err + } + + // Dump + return ioutil.WriteFile(filePath, js, FilePermMode) +} + +// todoParts gets unfinished parts +func (cp downloadCheckpoint) todoParts() []downloadPart { + dps := []downloadPart{} + for i, ps := range cp.PartStat { + if !ps { + dps = append(dps, cp.Parts[i]) + } + } + return dps +} + +// getCompletedBytes gets completed size +func (cp downloadCheckpoint) getCompletedBytes() int64 { + var completedBytes int64 + for i, part := range cp.Parts { + if cp.PartStat[i] { + completedBytes += (part.End - part.Start + 1) + } + } + return completedBytes +} + +// prepare initiates download tasks +func (cp *downloadCheckpoint) prepare(meta http.Header, bucket *Bucket, objectKey, filePath string, partSize int64, uRange *UnpackedRange) error { + // CP + cp.Magic = downloadCpMagic + cp.FilePath = filePath + cp.Object = objectKey + + objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64) + if err != nil { + return err + } + + cp.ObjStat.Size = objectSize + cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified) + cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag) + + if bucket.GetConfig().IsEnableCRC && meta.Get(HTTPHeaderOssCRC64) != "" { + if uRange == nil || (!uRange.HasStart && !uRange.HasEnd) { + cp.enableCRC = true + cp.CRC, _ = strconv.ParseUint(meta.Get(HTTPHeaderOssCRC64), 10, 64) + } + } + + // Parts + cp.Parts = getDownloadParts(objectSize, partSize, uRange) + cp.PartStat = make([]bool, len(cp.Parts)) + for i := range cp.PartStat { + cp.PartStat[i] = false + } + + return nil +} + +func (cp *downloadCheckpoint) complete(cpFilePath, downFilepath string) error { + err := os.Rename(downFilepath, cp.FilePath) + if err != nil { + return err + } + return os.Remove(cpFilePath) +} + +// downloadFileWithCp downloads files with checkpoint. +func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int, uRange *UnpackedRange) error { + tempFilePath := filePath + TempFileSuffix + listener := GetProgressListener(options) + + // Load checkpoint data. + dcp := downloadCheckpoint{} + err := dcp.load(cpFilePath) + if err != nil { + os.Remove(cpFilePath) + } + + // Get the object detailed meta for object whole size + // must delete header:range to get whole object size + skipOptions := DeleteOption(options, HTTPHeaderRange) + meta, err := bucket.GetObjectDetailedMeta(objectKey, skipOptions...) + if err != nil { + return err + } + + // Load error or data invalid. Re-initialize the download. + valid, err := dcp.isValid(meta, uRange) + if err != nil || !valid { + if err = dcp.prepare(meta, &bucket, objectKey, filePath, partSize, uRange); err != nil { + return err + } + os.Remove(cpFilePath) + } + + // Create the file if not exists. Otherwise the parts download will overwrite it. + fd, err := os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode) + if err != nil { + return err + } + fd.Close() + + // Unfinished parts + parts := dcp.todoParts() + jobs := make(chan downloadPart, len(parts)) + results := make(chan downloadPart, len(parts)) + failed := make(chan error) + die := make(chan bool) + + completedBytes := dcp.getCompletedBytes() + event := newProgressEvent(TransferStartedEvent, completedBytes, dcp.ObjStat.Size, 0) + publishProgress(listener, event) + + // Start the download workers routine + arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker, dcp.enableCRC} + for w := 1; w <= routines; w++ { + go downloadWorker(w, arg, jobs, results, failed, die) + } + + // Concurrently downloads parts + go downloadScheduler(jobs, parts) + + // Wait for the parts download finished + completed := 0 + for completed < len(parts) { + select { + case part := <-results: + completed++ + dcp.PartStat[part.Index] = true + dcp.Parts[part.Index].CRC64 = part.CRC64 + dcp.dump(cpFilePath) + downBytes := (part.End - part.Start + 1) + completedBytes += downBytes + event = newProgressEvent(TransferDataEvent, completedBytes, dcp.ObjStat.Size, downBytes) + publishProgress(listener, event) + case err := <-failed: + close(die) + event = newProgressEvent(TransferFailedEvent, completedBytes, dcp.ObjStat.Size, 0) + publishProgress(listener, event) + return err + } + + if completed >= len(parts) { + break + } + } + + event = newProgressEvent(TransferCompletedEvent, completedBytes, dcp.ObjStat.Size, 0) + publishProgress(listener, event) + + if dcp.enableCRC { + actualCRC := combineCRCInParts(dcp.Parts) + err = CheckDownloadCRC(actualCRC, dcp.CRC) + if err != nil { + return err + } + } + + return dcp.complete(cpFilePath, tempFilePath) +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/error.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/error.go new file mode 100644 index 0000000000..a877211fad --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/error.go @@ -0,0 +1,94 @@ +package oss + +import ( + "encoding/xml" + "fmt" + "net/http" + "strings" +) + +// ServiceError contains fields of the error response from Oss Service REST API. +type ServiceError struct { + XMLName xml.Name `xml:"Error"` + Code string `xml:"Code"` // The error code returned from OSS to the caller + Message string `xml:"Message"` // The detail error message from OSS + RequestID string `xml:"RequestId"` // The UUID used to uniquely identify the request + HostID string `xml:"HostId"` // The OSS server cluster's Id + Endpoint string `xml:"Endpoint"` + RawMessage string // The raw messages from OSS + StatusCode int // HTTP status code +} + +// Error implements interface error +func (e ServiceError) Error() string { + if e.Endpoint == "" { + return fmt.Sprintf("oss: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=\"%s\", RequestId=%s", + e.StatusCode, e.Code, e.Message, e.RequestID) + } + return fmt.Sprintf("oss: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=\"%s\", RequestId=%s, Endpoint=%s", + e.StatusCode, e.Code, e.Message, e.RequestID, e.Endpoint) +} + +// UnexpectedStatusCodeError is returned when a storage service responds with neither an error +// nor with an HTTP status code indicating success. +type UnexpectedStatusCodeError struct { + allowed []int // The expected HTTP stats code returned from OSS + got int // The actual HTTP status code from OSS +} + +// Error implements interface error +func (e UnexpectedStatusCodeError) Error() string { + s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) } + + got := s(e.got) + expected := []string{} + for _, v := range e.allowed { + expected = append(expected, s(v)) + } + return fmt.Sprintf("oss: status code from service response is %s; was expecting %s", + got, strings.Join(expected, " or ")) +} + +// Got is the actual status code returned by oss. +func (e UnexpectedStatusCodeError) Got() int { + return e.got +} + +// CheckRespCode returns UnexpectedStatusError if the given response code is not +// one of the allowed status codes; otherwise nil. +func CheckRespCode(respCode int, allowed []int) error { + for _, v := range allowed { + if respCode == v { + return nil + } + } + return UnexpectedStatusCodeError{allowed, respCode} +} + +// CRCCheckError is returned when crc check is inconsistent between client and server +type CRCCheckError struct { + clientCRC uint64 // Calculated CRC64 in client + serverCRC uint64 // Calculated CRC64 in server + operation string // Upload operations such as PutObject/AppendObject/UploadPart, etc + requestID string // The request id of this operation +} + +// Error implements interface error +func (e CRCCheckError) Error() string { + return fmt.Sprintf("oss: the crc of %s is inconsistent, client %d but server %d; request id is %s", + e.operation, e.clientCRC, e.serverCRC, e.requestID) +} + +func CheckDownloadCRC(clientCRC, serverCRC uint64) error { + if clientCRC == serverCRC { + return nil + } + return CRCCheckError{clientCRC, serverCRC, "DownloadFile", ""} +} + +func CheckCRC(resp *Response, operation string) error { + if resp.Headers.Get(HTTPHeaderOssCRC64) == "" || resp.ClientCRC == resp.ServerCRC { + return nil + } + return CRCCheckError{resp.ClientCRC, resp.ServerCRC, operation, resp.Headers.Get(HTTPHeaderOssRequestID)} +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_6.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_6.go new file mode 100644 index 0000000000..943dc8fd0d --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_6.go @@ -0,0 +1,28 @@ +// +build !go1.7 + +// "golang.org/x/time/rate" is depended on golang context package go1.7 onward +// this file is only for build,not supports limit upload speed +package oss + +import ( + "fmt" + "io" +) + +const ( + perTokenBandwidthSize int = 1024 +) + +type OssLimiter struct { +} + +type LimitSpeedReader struct { + io.ReadCloser + reader io.Reader + ossLimiter *OssLimiter +} + +func GetOssLimiter(uploadSpeed int) (ossLimiter *OssLimiter, err error) { + err = fmt.Errorf("rate.Limiter is not supported below version go1.7") + return nil, err +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_7.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_7.go new file mode 100644 index 0000000000..f6baf29877 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_7.go @@ -0,0 +1,90 @@ +// +build go1.7 + +package oss + +import ( + "fmt" + "io" + "math" + "time" + + "golang.org/x/time/rate" +) + +const ( + perTokenBandwidthSize int = 1024 +) + +// OssLimiter wrapper rate.Limiter +type OssLimiter struct { + limiter *rate.Limiter +} + +// GetOssLimiter create OssLimiter +// uploadSpeed KB/s +func GetOssLimiter(uploadSpeed int) (ossLimiter *OssLimiter, err error) { + limiter := rate.NewLimiter(rate.Limit(uploadSpeed), uploadSpeed) + + // first consume the initial full token,the limiter will behave more accurately + limiter.AllowN(time.Now(), uploadSpeed) + + return &OssLimiter{ + limiter: limiter, + }, nil +} + +// LimitSpeedReader for limit bandwidth upload +type LimitSpeedReader struct { + io.ReadCloser + reader io.Reader + ossLimiter *OssLimiter +} + +// Read +func (r *LimitSpeedReader) Read(p []byte) (n int, err error) { + n = 0 + err = nil + start := 0 + burst := r.ossLimiter.limiter.Burst() + var end int + var tmpN int + var tc int + for start < len(p) { + if start+burst*perTokenBandwidthSize < len(p) { + end = start + burst*perTokenBandwidthSize + } else { + end = len(p) + } + + tmpN, err = r.reader.Read(p[start:end]) + if tmpN > 0 { + n += tmpN + start = n + } + + if err != nil { + return + } + + tc = int(math.Ceil(float64(tmpN) / float64(perTokenBandwidthSize))) + now := time.Now() + re := r.ossLimiter.limiter.ReserveN(now, tc) + if !re.OK() { + err = fmt.Errorf("LimitSpeedReader.Read() failure,ReserveN error,start:%d,end:%d,burst:%d,perTokenBandwidthSize:%d", + start, end, burst, perTokenBandwidthSize) + return + } + timeDelay := re.Delay() + time.Sleep(timeDelay) + } + return +} + +// Close ... +func (r *LimitSpeedReader) Close() error { + rc, ok := r.reader.(io.ReadCloser) + if ok { + return rc.Close() + } + return nil +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/livechannel.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/livechannel.go new file mode 100644 index 0000000000..bf5ba070bb --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/livechannel.go @@ -0,0 +1,257 @@ +package oss + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "net/http" + "strconv" + "time" +) + +// +// CreateLiveChannel create a live-channel +// +// channelName the name of the channel +// config configuration of the channel +// +// CreateLiveChannelResult the result of create live-channel +// error nil if success, otherwise error +// +func (bucket Bucket) CreateLiveChannel(channelName string, config LiveChannelConfiguration) (CreateLiveChannelResult, error) { + var out CreateLiveChannelResult + + bs, err := xml.Marshal(config) + if err != nil { + return out, err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + params := map[string]interface{}{} + params["live"] = nil + resp, err := bucket.do("PUT", channelName, params, nil, buffer, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// PutLiveChannelStatus Set the status of the live-channel: enabled/disabled +// +// channelName the name of the channel +// status enabled/disabled +// +// error nil if success, otherwise error +// +func (bucket Bucket) PutLiveChannelStatus(channelName, status string) error { + params := map[string]interface{}{} + params["live"] = nil + params["status"] = status + + resp, err := bucket.do("PUT", channelName, params, nil, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// PostVodPlaylist create an playlist based on the specified playlist name, startTime and endTime +// +// channelName the name of the channel +// playlistName the name of the playlist, must end with ".m3u8" +// startTime the start time of the playlist +// endTime the endtime of the playlist +// +// error nil if success, otherwise error +// +func (bucket Bucket) PostVodPlaylist(channelName, playlistName string, startTime, endTime time.Time) error { + params := map[string]interface{}{} + params["vod"] = nil + params["startTime"] = strconv.FormatInt(startTime.Unix(), 10) + params["endTime"] = strconv.FormatInt(endTime.Unix(), 10) + + key := fmt.Sprintf("%s/%s", channelName, playlistName) + resp, err := bucket.do("POST", key, params, nil, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetVodPlaylist get the playlist based on the specified channelName, startTime and endTime +// +// channelName the name of the channel +// startTime the start time of the playlist +// endTime the endtime of the playlist +// +// io.ReadCloser reader instance for reading data from response. It must be called close() after the usage and only valid when error is nil. +// error nil if success, otherwise error +// +func (bucket Bucket) GetVodPlaylist(channelName string, startTime, endTime time.Time) (io.ReadCloser, error) { + params := map[string]interface{}{} + params["vod"] = nil + params["startTime"] = strconv.FormatInt(startTime.Unix(), 10) + params["endTime"] = strconv.FormatInt(endTime.Unix(), 10) + + resp, err := bucket.do("GET", channelName, params, nil, nil, nil) + if err != nil { + return nil, err + } + + return resp.Body, nil +} + +// +// GetLiveChannelStat Get the state of the live-channel +// +// channelName the name of the channel +// +// LiveChannelStat the state of the live-channel +// error nil if success, otherwise error +// +func (bucket Bucket) GetLiveChannelStat(channelName string) (LiveChannelStat, error) { + var out LiveChannelStat + params := map[string]interface{}{} + params["live"] = nil + params["comp"] = "stat" + + resp, err := bucket.do("GET", channelName, params, nil, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// GetLiveChannelInfo Get the configuration info of the live-channel +// +// channelName the name of the channel +// +// LiveChannelConfiguration the configuration info of the live-channel +// error nil if success, otherwise error +// +func (bucket Bucket) GetLiveChannelInfo(channelName string) (LiveChannelConfiguration, error) { + var out LiveChannelConfiguration + params := map[string]interface{}{} + params["live"] = nil + + resp, err := bucket.do("GET", channelName, params, nil, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// GetLiveChannelHistory Get push records of live-channel +// +// channelName the name of the channel +// +// LiveChannelHistory push records +// error nil if success, otherwise error +// +func (bucket Bucket) GetLiveChannelHistory(channelName string) (LiveChannelHistory, error) { + var out LiveChannelHistory + params := map[string]interface{}{} + params["live"] = nil + params["comp"] = "history" + + resp, err := bucket.do("GET", channelName, params, nil, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// ListLiveChannel list the live-channels +// +// options Prefix: filter by the name start with the value of "Prefix" +// MaxKeys: the maximum count returned +// Marker: cursor from which starting list +// +// ListLiveChannelResult live-channel list +// error nil if success, otherwise error +// +func (bucket Bucket) ListLiveChannel(options ...Option) (ListLiveChannelResult, error) { + var out ListLiveChannelResult + + params, err := GetRawParams(options) + if err != nil { + return out, err + } + + params["live"] = nil + + resp, err := bucket.do("GET", "", params, nil, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// DeleteLiveChannel Delete the live-channel. When a client trying to stream the live-channel, the operation will fail. it will only delete the live-channel itself and the object generated by the live-channel will not be deleted. +// +// channelName the name of the channel +// +// error nil if success, otherwise error +// +func (bucket Bucket) DeleteLiveChannel(channelName string) error { + params := map[string]interface{}{} + params["live"] = nil + + if channelName == "" { + return fmt.Errorf("invalid argument: channel name is empty") + } + + resp, err := bucket.do("DELETE", channelName, params, nil, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// +// SignRtmpURL Generate a RTMP push-stream signature URL for the trusted user to push the RTMP stream to the live-channel. +// +// channelName the name of the channel +// playlistName the name of the playlist, must end with ".m3u8" +// expires expiration (in seconds) +// +// string singed rtmp push stream url +// error nil if success, otherwise error +// +func (bucket Bucket) SignRtmpURL(channelName, playlistName string, expires int64) (string, error) { + if expires <= 0 { + return "", fmt.Errorf("invalid argument: %d, expires must greater than 0", expires) + } + expiration := time.Now().Unix() + expires + + return bucket.Client.Conn.signRtmpURL(bucket.BucketName, channelName, playlistName, expiration), nil +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/mime.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/mime.go new file mode 100644 index 0000000000..64f4dcc638 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/mime.go @@ -0,0 +1,572 @@ +package oss + +import ( + "mime" + "path" + "strings" +) + +var extToMimeType = map[string]string{ + ".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", + ".xltx": "application/vnd.openxmlformats-officedocument.spreadsheetml.template", + ".potx": "application/vnd.openxmlformats-officedocument.presentationml.template", + ".ppsx": "application/vnd.openxmlformats-officedocument.presentationml.slideshow", + ".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation", + ".sldx": "application/vnd.openxmlformats-officedocument.presentationml.slide", + ".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", + ".dotx": "application/vnd.openxmlformats-officedocument.wordprocessingml.template", + ".xlam": "application/vnd.ms-excel.addin.macroEnabled.12", + ".xlsb": "application/vnd.ms-excel.sheet.binary.macroEnabled.12", + ".apk": "application/vnd.android.package-archive", + ".hqx": "application/mac-binhex40", + ".cpt": "application/mac-compactpro", + ".doc": "application/msword", + ".ogg": "application/ogg", + ".pdf": "application/pdf", + ".rtf": "text/rtf", + ".mif": "application/vnd.mif", + ".xls": "application/vnd.ms-excel", + ".ppt": "application/vnd.ms-powerpoint", + ".odc": "application/vnd.oasis.opendocument.chart", + ".odb": "application/vnd.oasis.opendocument.database", + ".odf": "application/vnd.oasis.opendocument.formula", + ".odg": "application/vnd.oasis.opendocument.graphics", + ".otg": "application/vnd.oasis.opendocument.graphics-template", + ".odi": "application/vnd.oasis.opendocument.image", + ".odp": "application/vnd.oasis.opendocument.presentation", + ".otp": "application/vnd.oasis.opendocument.presentation-template", + ".ods": "application/vnd.oasis.opendocument.spreadsheet", + ".ots": "application/vnd.oasis.opendocument.spreadsheet-template", + ".odt": "application/vnd.oasis.opendocument.text", + ".odm": "application/vnd.oasis.opendocument.text-master", + ".ott": "application/vnd.oasis.opendocument.text-template", + ".oth": "application/vnd.oasis.opendocument.text-web", + ".sxw": "application/vnd.sun.xml.writer", + ".stw": "application/vnd.sun.xml.writer.template", + ".sxc": "application/vnd.sun.xml.calc", + ".stc": "application/vnd.sun.xml.calc.template", + ".sxd": "application/vnd.sun.xml.draw", + ".std": "application/vnd.sun.xml.draw.template", + ".sxi": "application/vnd.sun.xml.impress", + ".sti": "application/vnd.sun.xml.impress.template", + ".sxg": "application/vnd.sun.xml.writer.global", + ".sxm": "application/vnd.sun.xml.math", + ".sis": "application/vnd.symbian.install", + ".wbxml": "application/vnd.wap.wbxml", + ".wmlc": "application/vnd.wap.wmlc", + ".wmlsc": "application/vnd.wap.wmlscriptc", + ".bcpio": "application/x-bcpio", + ".torrent": "application/x-bittorrent", + ".bz2": "application/x-bzip2", + ".vcd": "application/x-cdlink", + ".pgn": "application/x-chess-pgn", + ".cpio": "application/x-cpio", + ".csh": "application/x-csh", + ".dvi": "application/x-dvi", + ".spl": "application/x-futuresplash", + ".gtar": "application/x-gtar", + ".hdf": "application/x-hdf", + ".jar": "application/x-java-archive", + ".jnlp": "application/x-java-jnlp-file", + ".js": "application/x-javascript", + ".ksp": "application/x-kspread", + ".chrt": "application/x-kchart", + ".kil": "application/x-killustrator", + ".latex": "application/x-latex", + ".rpm": "application/x-rpm", + ".sh": "application/x-sh", + ".shar": "application/x-shar", + ".swf": "application/x-shockwave-flash", + ".sit": "application/x-stuffit", + ".sv4cpio": "application/x-sv4cpio", + ".sv4crc": "application/x-sv4crc", + ".tar": "application/x-tar", + ".tcl": "application/x-tcl", + ".tex": "application/x-tex", + ".man": "application/x-troff-man", + ".me": "application/x-troff-me", + ".ms": "application/x-troff-ms", + ".ustar": "application/x-ustar", + ".src": "application/x-wais-source", + ".zip": "application/zip", + ".m3u": "audio/x-mpegurl", + ".ra": "audio/x-pn-realaudio", + ".wav": "audio/x-wav", + ".wma": "audio/x-ms-wma", + ".wax": "audio/x-ms-wax", + ".pdb": "chemical/x-pdb", + ".xyz": "chemical/x-xyz", + ".bmp": "image/bmp", + ".gif": "image/gif", + ".ief": "image/ief", + ".png": "image/png", + ".wbmp": "image/vnd.wap.wbmp", + ".ras": "image/x-cmu-raster", + ".pnm": "image/x-portable-anymap", + ".pbm": "image/x-portable-bitmap", + ".pgm": "image/x-portable-graymap", + ".ppm": "image/x-portable-pixmap", + ".rgb": "image/x-rgb", + ".xbm": "image/x-xbitmap", + ".xpm": "image/x-xpixmap", + ".xwd": "image/x-xwindowdump", + ".css": "text/css", + ".rtx": "text/richtext", + ".tsv": "text/tab-separated-values", + ".jad": "text/vnd.sun.j2me.app-descriptor", + ".wml": "text/vnd.wap.wml", + ".wmls": "text/vnd.wap.wmlscript", + ".etx": "text/x-setext", + ".mxu": "video/vnd.mpegurl", + ".flv": "video/x-flv", + ".wm": "video/x-ms-wm", + ".wmv": "video/x-ms-wmv", + ".wmx": "video/x-ms-wmx", + ".wvx": "video/x-ms-wvx", + ".avi": "video/x-msvideo", + ".movie": "video/x-sgi-movie", + ".ice": "x-conference/x-cooltalk", + ".3gp": "video/3gpp", + ".ai": "application/postscript", + ".aif": "audio/x-aiff", + ".aifc": "audio/x-aiff", + ".aiff": "audio/x-aiff", + ".asc": "text/plain", + ".atom": "application/atom+xml", + ".au": "audio/basic", + ".bin": "application/octet-stream", + ".cdf": "application/x-netcdf", + ".cgm": "image/cgm", + ".class": "application/octet-stream", + ".dcr": "application/x-director", + ".dif": "video/x-dv", + ".dir": "application/x-director", + ".djv": "image/vnd.djvu", + ".djvu": "image/vnd.djvu", + ".dll": "application/octet-stream", + ".dmg": "application/octet-stream", + ".dms": "application/octet-stream", + ".dtd": "application/xml-dtd", + ".dv": "video/x-dv", + ".dxr": "application/x-director", + ".eps": "application/postscript", + ".exe": "application/octet-stream", + ".ez": "application/andrew-inset", + ".gram": "application/srgs", + ".grxml": "application/srgs+xml", + ".gz": "application/x-gzip", + ".htm": "text/html", + ".html": "text/html", + ".ico": "image/x-icon", + ".ics": "text/calendar", + ".ifb": "text/calendar", + ".iges": "model/iges", + ".igs": "model/iges", + ".jp2": "image/jp2", + ".jpe": "image/jpeg", + ".jpeg": "image/jpeg", + ".jpg": "image/jpeg", + ".kar": "audio/midi", + ".lha": "application/octet-stream", + ".lzh": "application/octet-stream", + ".m4a": "audio/mp4a-latm", + ".m4p": "audio/mp4a-latm", + ".m4u": "video/vnd.mpegurl", + ".m4v": "video/x-m4v", + ".mac": "image/x-macpaint", + ".mathml": "application/mathml+xml", + ".mesh": "model/mesh", + ".mid": "audio/midi", + ".midi": "audio/midi", + ".mov": "video/quicktime", + ".mp2": "audio/mpeg", + ".mp3": "audio/mpeg", + ".mp4": "video/mp4", + ".mpe": "video/mpeg", + ".mpeg": "video/mpeg", + ".mpg": "video/mpeg", + ".mpga": "audio/mpeg", + ".msh": "model/mesh", + ".nc": "application/x-netcdf", + ".oda": "application/oda", + ".ogv": "video/ogv", + ".pct": "image/pict", + ".pic": "image/pict", + ".pict": "image/pict", + ".pnt": "image/x-macpaint", + ".pntg": "image/x-macpaint", + ".ps": "application/postscript", + ".qt": "video/quicktime", + ".qti": "image/x-quicktime", + ".qtif": "image/x-quicktime", + ".ram": "audio/x-pn-realaudio", + ".rdf": "application/rdf+xml", + ".rm": "application/vnd.rn-realmedia", + ".roff": "application/x-troff", + ".sgm": "text/sgml", + ".sgml": "text/sgml", + ".silo": "model/mesh", + ".skd": "application/x-koan", + ".skm": "application/x-koan", + ".skp": "application/x-koan", + ".skt": "application/x-koan", + ".smi": "application/smil", + ".smil": "application/smil", + ".snd": "audio/basic", + ".so": "application/octet-stream", + ".svg": "image/svg+xml", + ".t": "application/x-troff", + ".texi": "application/x-texinfo", + ".texinfo": "application/x-texinfo", + ".tif": "image/tiff", + ".tiff": "image/tiff", + ".tr": "application/x-troff", + ".txt": "text/plain", + ".vrml": "model/vrml", + ".vxml": "application/voicexml+xml", + ".webm": "video/webm", + ".wrl": "model/vrml", + ".xht": "application/xhtml+xml", + ".xhtml": "application/xhtml+xml", + ".xml": "application/xml", + ".xsl": "application/xml", + ".xslt": "application/xslt+xml", + ".xul": "application/vnd.mozilla.xul+xml", + ".webp": "image/webp", + ".323": "text/h323", + ".aab": "application/x-authoware-bin", + ".aam": "application/x-authoware-map", + ".aas": "application/x-authoware-seg", + ".acx": "application/internet-property-stream", + ".als": "audio/X-Alpha5", + ".amc": "application/x-mpeg", + ".ani": "application/octet-stream", + ".asd": "application/astound", + ".asf": "video/x-ms-asf", + ".asn": "application/astound", + ".asp": "application/x-asap", + ".asr": "video/x-ms-asf", + ".asx": "video/x-ms-asf", + ".avb": "application/octet-stream", + ".awb": "audio/amr-wb", + ".axs": "application/olescript", + ".bas": "text/plain", + ".bin ": "application/octet-stream", + ".bld": "application/bld", + ".bld2": "application/bld2", + ".bpk": "application/octet-stream", + ".c": "text/plain", + ".cal": "image/x-cals", + ".cat": "application/vnd.ms-pkiseccat", + ".ccn": "application/x-cnc", + ".cco": "application/x-cocoa", + ".cer": "application/x-x509-ca-cert", + ".cgi": "magnus-internal/cgi", + ".chat": "application/x-chat", + ".clp": "application/x-msclip", + ".cmx": "image/x-cmx", + ".co": "application/x-cult3d-object", + ".cod": "image/cis-cod", + ".conf": "text/plain", + ".cpp": "text/plain", + ".crd": "application/x-mscardfile", + ".crl": "application/pkix-crl", + ".crt": "application/x-x509-ca-cert", + ".csm": "chemical/x-csml", + ".csml": "chemical/x-csml", + ".cur": "application/octet-stream", + ".dcm": "x-lml/x-evm", + ".dcx": "image/x-dcx", + ".der": "application/x-x509-ca-cert", + ".dhtml": "text/html", + ".dot": "application/msword", + ".dwf": "drawing/x-dwf", + ".dwg": "application/x-autocad", + ".dxf": "application/x-autocad", + ".ebk": "application/x-expandedbook", + ".emb": "chemical/x-embl-dl-nucleotide", + ".embl": "chemical/x-embl-dl-nucleotide", + ".epub": "application/epub+zip", + ".eri": "image/x-eri", + ".es": "audio/echospeech", + ".esl": "audio/echospeech", + ".etc": "application/x-earthtime", + ".evm": "x-lml/x-evm", + ".evy": "application/envoy", + ".fh4": "image/x-freehand", + ".fh5": "image/x-freehand", + ".fhc": "image/x-freehand", + ".fif": "application/fractals", + ".flr": "x-world/x-vrml", + ".fm": "application/x-maker", + ".fpx": "image/x-fpx", + ".fvi": "video/isivideo", + ".gau": "chemical/x-gaussian-input", + ".gca": "application/x-gca-compressed", + ".gdb": "x-lml/x-gdb", + ".gps": "application/x-gps", + ".h": "text/plain", + ".hdm": "text/x-hdml", + ".hdml": "text/x-hdml", + ".hlp": "application/winhlp", + ".hta": "application/hta", + ".htc": "text/x-component", + ".hts": "text/html", + ".htt": "text/webviewhtml", + ".ifm": "image/gif", + ".ifs": "image/ifs", + ".iii": "application/x-iphone", + ".imy": "audio/melody", + ".ins": "application/x-internet-signup", + ".ips": "application/x-ipscript", + ".ipx": "application/x-ipix", + ".isp": "application/x-internet-signup", + ".it": "audio/x-mod", + ".itz": "audio/x-mod", + ".ivr": "i-world/i-vrml", + ".j2k": "image/j2k", + ".jam": "application/x-jam", + ".java": "text/plain", + ".jfif": "image/pipeg", + ".jpz": "image/jpeg", + ".jwc": "application/jwc", + ".kjx": "application/x-kjx", + ".lak": "x-lml/x-lak", + ".lcc": "application/fastman", + ".lcl": "application/x-digitalloca", + ".lcr": "application/x-digitalloca", + ".lgh": "application/lgh", + ".lml": "x-lml/x-lml", + ".lmlpack": "x-lml/x-lmlpack", + ".log": "text/plain", + ".lsf": "video/x-la-asf", + ".lsx": "video/x-la-asf", + ".m13": "application/x-msmediaview", + ".m14": "application/x-msmediaview", + ".m15": "audio/x-mod", + ".m3url": "audio/x-mpegurl", + ".m4b": "audio/mp4a-latm", + ".ma1": "audio/ma1", + ".ma2": "audio/ma2", + ".ma3": "audio/ma3", + ".ma5": "audio/ma5", + ".map": "magnus-internal/imagemap", + ".mbd": "application/mbedlet", + ".mct": "application/x-mascot", + ".mdb": "application/x-msaccess", + ".mdz": "audio/x-mod", + ".mel": "text/x-vmel", + ".mht": "message/rfc822", + ".mhtml": "message/rfc822", + ".mi": "application/x-mif", + ".mil": "image/x-cals", + ".mio": "audio/x-mio", + ".mmf": "application/x-skt-lbs", + ".mng": "video/x-mng", + ".mny": "application/x-msmoney", + ".moc": "application/x-mocha", + ".mocha": "application/x-mocha", + ".mod": "audio/x-mod", + ".mof": "application/x-yumekara", + ".mol": "chemical/x-mdl-molfile", + ".mop": "chemical/x-mopac-input", + ".mpa": "video/mpeg", + ".mpc": "application/vnd.mpohun.certificate", + ".mpg4": "video/mp4", + ".mpn": "application/vnd.mophun.application", + ".mpp": "application/vnd.ms-project", + ".mps": "application/x-mapserver", + ".mpv2": "video/mpeg", + ".mrl": "text/x-mrml", + ".mrm": "application/x-mrm", + ".msg": "application/vnd.ms-outlook", + ".mts": "application/metastream", + ".mtx": "application/metastream", + ".mtz": "application/metastream", + ".mvb": "application/x-msmediaview", + ".mzv": "application/metastream", + ".nar": "application/zip", + ".nbmp": "image/nbmp", + ".ndb": "x-lml/x-ndb", + ".ndwn": "application/ndwn", + ".nif": "application/x-nif", + ".nmz": "application/x-scream", + ".nokia-op-logo": "image/vnd.nok-oplogo-color", + ".npx": "application/x-netfpx", + ".nsnd": "audio/nsnd", + ".nva": "application/x-neva1", + ".nws": "message/rfc822", + ".oom": "application/x-AtlasMate-Plugin", + ".p10": "application/pkcs10", + ".p12": "application/x-pkcs12", + ".p7b": "application/x-pkcs7-certificates", + ".p7c": "application/x-pkcs7-mime", + ".p7m": "application/x-pkcs7-mime", + ".p7r": "application/x-pkcs7-certreqresp", + ".p7s": "application/x-pkcs7-signature", + ".pac": "audio/x-pac", + ".pae": "audio/x-epac", + ".pan": "application/x-pan", + ".pcx": "image/x-pcx", + ".pda": "image/x-pda", + ".pfr": "application/font-tdpfr", + ".pfx": "application/x-pkcs12", + ".pko": "application/ynd.ms-pkipko", + ".pm": "application/x-perl", + ".pma": "application/x-perfmon", + ".pmc": "application/x-perfmon", + ".pmd": "application/x-pmd", + ".pml": "application/x-perfmon", + ".pmr": "application/x-perfmon", + ".pmw": "application/x-perfmon", + ".pnz": "image/png", + ".pot,": "application/vnd.ms-powerpoint", + ".pps": "application/vnd.ms-powerpoint", + ".pqf": "application/x-cprplayer", + ".pqi": "application/cprplayer", + ".prc": "application/x-prc", + ".prf": "application/pics-rules", + ".prop": "text/plain", + ".proxy": "application/x-ns-proxy-autoconfig", + ".ptlk": "application/listenup", + ".pub": "application/x-mspublisher", + ".pvx": "video/x-pv-pvx", + ".qcp": "audio/vnd.qcelp", + ".r3t": "text/vnd.rn-realtext3d", + ".rar": "application/octet-stream", + ".rc": "text/plain", + ".rf": "image/vnd.rn-realflash", + ".rlf": "application/x-richlink", + ".rmf": "audio/x-rmf", + ".rmi": "audio/mid", + ".rmm": "audio/x-pn-realaudio", + ".rmvb": "audio/x-pn-realaudio", + ".rnx": "application/vnd.rn-realplayer", + ".rp": "image/vnd.rn-realpix", + ".rt": "text/vnd.rn-realtext", + ".rte": "x-lml/x-gps", + ".rtg": "application/metastream", + ".rv": "video/vnd.rn-realvideo", + ".rwc": "application/x-rogerwilco", + ".s3m": "audio/x-mod", + ".s3z": "audio/x-mod", + ".sca": "application/x-supercard", + ".scd": "application/x-msschedule", + ".sct": "text/scriptlet", + ".sdf": "application/e-score", + ".sea": "application/x-stuffit", + ".setpay": "application/set-payment-initiation", + ".setreg": "application/set-registration-initiation", + ".shtml": "text/html", + ".shtm": "text/html", + ".shw": "application/presentations", + ".si6": "image/si6", + ".si7": "image/vnd.stiwap.sis", + ".si9": "image/vnd.lgtwap.sis", + ".slc": "application/x-salsa", + ".smd": "audio/x-smd", + ".smp": "application/studiom", + ".smz": "audio/x-smd", + ".spc": "application/x-pkcs7-certificates", + ".spr": "application/x-sprite", + ".sprite": "application/x-sprite", + ".sdp": "application/sdp", + ".spt": "application/x-spt", + ".sst": "application/vnd.ms-pkicertstore", + ".stk": "application/hyperstudio", + ".stl": "application/vnd.ms-pkistl", + ".stm": "text/html", + ".svf": "image/vnd", + ".svh": "image/svh", + ".svr": "x-world/x-svr", + ".swfl": "application/x-shockwave-flash", + ".tad": "application/octet-stream", + ".talk": "text/x-speech", + ".taz": "application/x-tar", + ".tbp": "application/x-timbuktu", + ".tbt": "application/x-timbuktu", + ".tgz": "application/x-compressed", + ".thm": "application/vnd.eri.thm", + ".tki": "application/x-tkined", + ".tkined": "application/x-tkined", + ".toc": "application/toc", + ".toy": "image/toy", + ".trk": "x-lml/x-gps", + ".trm": "application/x-msterminal", + ".tsi": "audio/tsplayer", + ".tsp": "application/dsptype", + ".ttf": "application/octet-stream", + ".ttz": "application/t-time", + ".uls": "text/iuls", + ".ult": "audio/x-mod", + ".uu": "application/x-uuencode", + ".uue": "application/x-uuencode", + ".vcf": "text/x-vcard", + ".vdo": "video/vdo", + ".vib": "audio/vib", + ".viv": "video/vivo", + ".vivo": "video/vivo", + ".vmd": "application/vocaltec-media-desc", + ".vmf": "application/vocaltec-media-file", + ".vmi": "application/x-dreamcast-vms-info", + ".vms": "application/x-dreamcast-vms", + ".vox": "audio/voxware", + ".vqe": "audio/x-twinvq-plugin", + ".vqf": "audio/x-twinvq", + ".vql": "audio/x-twinvq", + ".vre": "x-world/x-vream", + ".vrt": "x-world/x-vrt", + ".vrw": "x-world/x-vream", + ".vts": "workbook/formulaone", + ".wcm": "application/vnd.ms-works", + ".wdb": "application/vnd.ms-works", + ".web": "application/vnd.xara", + ".wi": "image/wavelet", + ".wis": "application/x-InstallShield", + ".wks": "application/vnd.ms-works", + ".wmd": "application/x-ms-wmd", + ".wmf": "application/x-msmetafile", + ".wmlscript": "text/vnd.wap.wmlscript", + ".wmz": "application/x-ms-wmz", + ".wpng": "image/x-up-wpng", + ".wps": "application/vnd.ms-works", + ".wpt": "x-lml/x-gps", + ".wri": "application/x-mswrite", + ".wrz": "x-world/x-vrml", + ".ws": "text/vnd.wap.wmlscript", + ".wsc": "application/vnd.wap.wmlscriptc", + ".wv": "video/wavelet", + ".wxl": "application/x-wxl", + ".x-gzip": "application/x-gzip", + ".xaf": "x-world/x-vrml", + ".xar": "application/vnd.xara", + ".xdm": "application/x-xdma", + ".xdma": "application/x-xdma", + ".xdw": "application/vnd.fujixerox.docuworks", + ".xhtm": "application/xhtml+xml", + ".xla": "application/vnd.ms-excel", + ".xlc": "application/vnd.ms-excel", + ".xll": "application/x-excel", + ".xlm": "application/vnd.ms-excel", + ".xlt": "application/vnd.ms-excel", + ".xlw": "application/vnd.ms-excel", + ".xm": "audio/x-mod", + ".xmz": "audio/x-mod", + ".xof": "x-world/x-vrml", + ".xpi": "application/x-xpinstall", + ".xsit": "text/xml", + ".yz1": "application/x-yz1", + ".z": "application/x-compress", + ".zac": "application/x-zaurus-zac", + ".json": "application/json", +} + +// TypeByExtension returns the MIME type associated with the file extension ext. +// gets the file's MIME type for HTTP header Content-Type +func TypeByExtension(filePath string) string { + typ := mime.TypeByExtension(path.Ext(filePath)) + if typ == "" { + typ = extToMimeType[strings.ToLower(path.Ext(filePath))] + } + return typ +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/model.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/model.go new file mode 100644 index 0000000000..b0b4a50271 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/model.go @@ -0,0 +1,69 @@ +package oss + +import ( + "hash" + "io" + "net/http" +) + +// Response defines HTTP response from OSS +type Response struct { + StatusCode int + Headers http.Header + Body io.ReadCloser + ClientCRC uint64 + ServerCRC uint64 +} + +func (r *Response) Read(p []byte) (n int, err error) { + return r.Body.Read(p) +} + +// Close close http reponse body +func (r *Response) Close() error { + return r.Body.Close() +} + +// PutObjectRequest is the request of DoPutObject +type PutObjectRequest struct { + ObjectKey string + Reader io.Reader +} + +// GetObjectRequest is the request of DoGetObject +type GetObjectRequest struct { + ObjectKey string +} + +// GetObjectResult is the result of DoGetObject +type GetObjectResult struct { + Response *Response + ClientCRC hash.Hash64 + ServerCRC uint64 +} + +// AppendObjectRequest is the requtest of DoAppendObject +type AppendObjectRequest struct { + ObjectKey string + Reader io.Reader + Position int64 +} + +// AppendObjectResult is the result of DoAppendObject +type AppendObjectResult struct { + NextPosition int64 + CRC uint64 +} + +// UploadPartRequest is the request of DoUploadPart +type UploadPartRequest struct { + InitResult *InitiateMultipartUploadResult + Reader io.Reader + PartSize int64 + PartNumber int +} + +// UploadPartResult is the result of DoUploadPart +type UploadPartResult struct { + Part UploadPart +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multicopy.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multicopy.go new file mode 100644 index 0000000000..56ed8cadfd --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multicopy.go @@ -0,0 +1,474 @@ +package oss + +import ( + "crypto/md5" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "os" + "strconv" +) + +// CopyFile is multipart copy object +// +// srcBucketName source bucket name +// srcObjectKey source object name +// destObjectKey target object name in the form of bucketname.objectkey +// partSize the part size in byte. +// options object's contraints. Check out function InitiateMultipartUpload. +// +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) CopyFile(srcBucketName, srcObjectKey, destObjectKey string, partSize int64, options ...Option) error { + destBucketName := bucket.BucketName + if partSize < MinPartSize || partSize > MaxPartSize { + return errors.New("oss: part size invalid range (1024KB, 5GB]") + } + + cpConf := getCpConfig(options) + routines := getRoutines(options) + + var strVersionId string + versionId, _ := FindOption(options, "versionId", nil) + if versionId != nil { + strVersionId = versionId.(string) + } + + if cpConf != nil && cpConf.IsEnable { + cpFilePath := getCopyCpFilePath(cpConf, srcBucketName, srcObjectKey, destBucketName, destObjectKey, strVersionId) + if cpFilePath != "" { + return bucket.copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey, partSize, options, cpFilePath, routines) + } + } + + return bucket.copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey, + partSize, options, routines) +} + +func getCopyCpFilePath(cpConf *cpConfig, srcBucket, srcObject, destBucket, destObject, versionId string) string { + if cpConf.FilePath == "" && cpConf.DirPath != "" { + dest := fmt.Sprintf("oss://%v/%v", destBucket, destObject) + src := fmt.Sprintf("oss://%v/%v", srcBucket, srcObject) + cpFileName := getCpFileName(src, dest, versionId) + cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName + } + return cpConf.FilePath +} + +// ----- Concurrently copy without checkpoint --------- + +// copyWorkerArg defines the copy worker arguments +type copyWorkerArg struct { + bucket *Bucket + imur InitiateMultipartUploadResult + srcBucketName string + srcObjectKey string + options []Option + hook copyPartHook +} + +// copyPartHook is the hook for testing purpose +type copyPartHook func(part copyPart) error + +var copyPartHooker copyPartHook = defaultCopyPartHook + +func defaultCopyPartHook(part copyPart) error { + return nil +} + +// copyWorker copies worker +func copyWorker(id int, arg copyWorkerArg, jobs <-chan copyPart, results chan<- UploadPart, failed chan<- error, die <-chan bool) { + for chunk := range jobs { + if err := arg.hook(chunk); err != nil { + failed <- err + break + } + chunkSize := chunk.End - chunk.Start + 1 + part, err := arg.bucket.UploadPartCopy(arg.imur, arg.srcBucketName, arg.srcObjectKey, + chunk.Start, chunkSize, chunk.Number, arg.options...) + if err != nil { + failed <- err + break + } + select { + case <-die: + return + default: + } + results <- part + } +} + +// copyScheduler +func copyScheduler(jobs chan copyPart, parts []copyPart) { + for _, part := range parts { + jobs <- part + } + close(jobs) +} + +// copyPart structure +type copyPart struct { + Number int // Part number (from 1 to 10,000) + Start int64 // The start index in the source file. + End int64 // The end index in the source file +} + +// getCopyParts calculates copy parts +func getCopyParts(objectSize, partSize int64) []copyPart { + parts := []copyPart{} + part := copyPart{} + i := 0 + for offset := int64(0); offset < objectSize; offset += partSize { + part.Number = i + 1 + part.Start = offset + part.End = GetPartEnd(offset, objectSize, partSize) + parts = append(parts, part) + i++ + } + return parts +} + +// getSrcObjectBytes gets the source file size +func getSrcObjectBytes(parts []copyPart) int64 { + var ob int64 + for _, part := range parts { + ob += (part.End - part.Start + 1) + } + return ob +} + +// copyFile is a concurrently copy without checkpoint +func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey string, + partSize int64, options []Option, routines int) error { + descBucket, err := bucket.Client.Bucket(destBucketName) + srcBucket, err := bucket.Client.Bucket(srcBucketName) + listener := GetProgressListener(options) + + // choice valid options + headerOptions := ChoiceHeadObjectOption(options) + partOptions := ChoiceTransferPartOption(options) + completeOptions := ChoiceCompletePartOption(options) + abortOptions := ChoiceAbortPartOption(options) + + meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey, headerOptions...) + if err != nil { + return err + } + + objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0) + if err != nil { + return err + } + + // Get copy parts + parts := getCopyParts(objectSize, partSize) + // Initialize the multipart upload + imur, err := descBucket.InitiateMultipartUpload(destObjectKey, options...) + if err != nil { + return err + } + + jobs := make(chan copyPart, len(parts)) + results := make(chan UploadPart, len(parts)) + failed := make(chan error) + die := make(chan bool) + + var completedBytes int64 + totalBytes := getSrcObjectBytes(parts) + event := newProgressEvent(TransferStartedEvent, 0, totalBytes, 0) + publishProgress(listener, event) + + // Start to copy workers + arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, partOptions, copyPartHooker} + for w := 1; w <= routines; w++ { + go copyWorker(w, arg, jobs, results, failed, die) + } + + // Start the scheduler + go copyScheduler(jobs, parts) + + // Wait for the parts finished. + completed := 0 + ups := make([]UploadPart, len(parts)) + for completed < len(parts) { + select { + case part := <-results: + completed++ + ups[part.PartNumber-1] = part + copyBytes := (parts[part.PartNumber-1].End - parts[part.PartNumber-1].Start + 1) + completedBytes += copyBytes + event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes, copyBytes) + publishProgress(listener, event) + case err := <-failed: + close(die) + descBucket.AbortMultipartUpload(imur, abortOptions...) + event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes, 0) + publishProgress(listener, event) + return err + } + + if completed >= len(parts) { + break + } + } + + event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes, 0) + publishProgress(listener, event) + + // Complete the multipart upload + _, err = descBucket.CompleteMultipartUpload(imur, ups, completeOptions...) + if err != nil { + bucket.AbortMultipartUpload(imur, abortOptions...) + return err + } + return nil +} + +// ----- Concurrently copy with checkpoint ----- + +const copyCpMagic = "84F1F18C-FF1D-403B-A1D8-9DEB5F65910A" + +type copyCheckpoint struct { + Magic string // Magic + MD5 string // CP content MD5 + SrcBucketName string // Source bucket + SrcObjectKey string // Source object + DestBucketName string // Target bucket + DestObjectKey string // Target object + CopyID string // Copy ID + ObjStat objectStat // Object stat + Parts []copyPart // Copy parts + CopyParts []UploadPart // The uploaded parts + PartStat []bool // The part status +} + +// isValid checks if the data is valid which means CP is valid and object is not updated. +func (cp copyCheckpoint) isValid(meta http.Header) (bool, error) { + // Compare CP's magic number and the MD5. + cpb := cp + cpb.MD5 = "" + js, _ := json.Marshal(cpb) + sum := md5.Sum(js) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + + if cp.Magic != downloadCpMagic || b64 != cp.MD5 { + return false, nil + } + + objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64) + if err != nil { + return false, err + } + + // Compare the object size and last modified time and etag. + if cp.ObjStat.Size != objectSize || + cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) || + cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) { + return false, nil + } + + return true, nil +} + +// load loads from the checkpoint file +func (cp *copyCheckpoint) load(filePath string) error { + contents, err := ioutil.ReadFile(filePath) + if err != nil { + return err + } + + err = json.Unmarshal(contents, cp) + return err +} + +// update updates the parts status +func (cp *copyCheckpoint) update(part UploadPart) { + cp.CopyParts[part.PartNumber-1] = part + cp.PartStat[part.PartNumber-1] = true +} + +// dump dumps the CP to the file +func (cp *copyCheckpoint) dump(filePath string) error { + bcp := *cp + + // Calculate MD5 + bcp.MD5 = "" + js, err := json.Marshal(bcp) + if err != nil { + return err + } + sum := md5.Sum(js) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + bcp.MD5 = b64 + + // Serialization + js, err = json.Marshal(bcp) + if err != nil { + return err + } + + // Dump + return ioutil.WriteFile(filePath, js, FilePermMode) +} + +// todoParts returns unfinished parts +func (cp copyCheckpoint) todoParts() []copyPart { + dps := []copyPart{} + for i, ps := range cp.PartStat { + if !ps { + dps = append(dps, cp.Parts[i]) + } + } + return dps +} + +// getCompletedBytes returns finished bytes count +func (cp copyCheckpoint) getCompletedBytes() int64 { + var completedBytes int64 + for i, part := range cp.Parts { + if cp.PartStat[i] { + completedBytes += (part.End - part.Start + 1) + } + } + return completedBytes +} + +// prepare initializes the multipart upload +func (cp *copyCheckpoint) prepare(meta http.Header, srcBucket *Bucket, srcObjectKey string, destBucket *Bucket, destObjectKey string, + partSize int64, options []Option) error { + // CP + cp.Magic = copyCpMagic + cp.SrcBucketName = srcBucket.BucketName + cp.SrcObjectKey = srcObjectKey + cp.DestBucketName = destBucket.BucketName + cp.DestObjectKey = destObjectKey + + objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64) + if err != nil { + return err + } + + cp.ObjStat.Size = objectSize + cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified) + cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag) + + // Parts + cp.Parts = getCopyParts(objectSize, partSize) + cp.PartStat = make([]bool, len(cp.Parts)) + for i := range cp.PartStat { + cp.PartStat[i] = false + } + cp.CopyParts = make([]UploadPart, len(cp.Parts)) + + // Init copy + imur, err := destBucket.InitiateMultipartUpload(destObjectKey, options...) + if err != nil { + return err + } + cp.CopyID = imur.UploadID + + return nil +} + +func (cp *copyCheckpoint) complete(bucket *Bucket, parts []UploadPart, cpFilePath string, options []Option) error { + imur := InitiateMultipartUploadResult{Bucket: cp.DestBucketName, + Key: cp.DestObjectKey, UploadID: cp.CopyID} + _, err := bucket.CompleteMultipartUpload(imur, parts, options...) + if err != nil { + return err + } + os.Remove(cpFilePath) + return err +} + +// copyFileWithCp is concurrently copy with checkpoint +func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey string, + partSize int64, options []Option, cpFilePath string, routines int) error { + descBucket, err := bucket.Client.Bucket(destBucketName) + srcBucket, err := bucket.Client.Bucket(srcBucketName) + listener := GetProgressListener(options) + + // Load CP data + ccp := copyCheckpoint{} + err = ccp.load(cpFilePath) + if err != nil { + os.Remove(cpFilePath) + } + + // choice valid options + headerOptions := ChoiceHeadObjectOption(options) + partOptions := ChoiceTransferPartOption(options) + completeOptions := ChoiceCompletePartOption(options) + + meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey, headerOptions...) + if err != nil { + return err + } + + // Load error or the CP data is invalid---reinitialize + valid, err := ccp.isValid(meta) + if err != nil || !valid { + if err = ccp.prepare(meta, srcBucket, srcObjectKey, descBucket, destObjectKey, partSize, options); err != nil { + return err + } + os.Remove(cpFilePath) + } + + // Unfinished parts + parts := ccp.todoParts() + imur := InitiateMultipartUploadResult{ + Bucket: destBucketName, + Key: destObjectKey, + UploadID: ccp.CopyID} + + jobs := make(chan copyPart, len(parts)) + results := make(chan UploadPart, len(parts)) + failed := make(chan error) + die := make(chan bool) + + completedBytes := ccp.getCompletedBytes() + event := newProgressEvent(TransferStartedEvent, completedBytes, ccp.ObjStat.Size, 0) + publishProgress(listener, event) + + // Start the worker coroutines + arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, partOptions, copyPartHooker} + for w := 1; w <= routines; w++ { + go copyWorker(w, arg, jobs, results, failed, die) + } + + // Start the scheduler + go copyScheduler(jobs, parts) + + // Wait for the parts completed. + completed := 0 + for completed < len(parts) { + select { + case part := <-results: + completed++ + ccp.update(part) + ccp.dump(cpFilePath) + copyBytes := (parts[part.PartNumber-1].End - parts[part.PartNumber-1].Start + 1) + completedBytes += copyBytes + event = newProgressEvent(TransferDataEvent, completedBytes, ccp.ObjStat.Size, copyBytes) + publishProgress(listener, event) + case err := <-failed: + close(die) + event = newProgressEvent(TransferFailedEvent, completedBytes, ccp.ObjStat.Size, 0) + publishProgress(listener, event) + return err + } + + if completed >= len(parts) { + break + } + } + + event = newProgressEvent(TransferCompletedEvent, completedBytes, ccp.ObjStat.Size, 0) + publishProgress(listener, event) + + return ccp.complete(descBucket, ccp.CopyParts, cpFilePath, completeOptions) +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multipart.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multipart.go new file mode 100644 index 0000000000..9e71419712 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multipart.go @@ -0,0 +1,305 @@ +package oss + +import ( + "bytes" + "encoding/xml" + "io" + "net/http" + "net/url" + "os" + "sort" + "strconv" +) + +// InitiateMultipartUpload initializes multipart upload +// +// objectKey object name +// options the object constricts for upload. The valid options are CacheControl, ContentDisposition, ContentEncoding, Expires, +// ServerSideEncryption, Meta, check out the following link: +// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/InitiateMultipartUpload.html +// +// InitiateMultipartUploadResult the return value of the InitiateMultipartUpload, which is used for calls later on such as UploadPartFromFile,UploadPartCopy. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) InitiateMultipartUpload(objectKey string, options ...Option) (InitiateMultipartUploadResult, error) { + var imur InitiateMultipartUploadResult + opts := AddContentType(options, objectKey) + params, _ := GetRawParams(options) + paramKeys := []string{"sequential", "withHashContext", "x-oss-enable-md5", "x-oss-enable-sha1", "x-oss-enable-sha256"} + ConvertEmptyValueToNil(params, paramKeys) + params["uploads"] = nil + + resp, err := bucket.do("POST", objectKey, params, opts, nil, nil) + if err != nil { + return imur, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &imur) + return imur, err +} + +// UploadPart uploads parts +// +// After initializing a Multipart Upload, the upload Id and object key could be used for uploading the parts. +// Each part has its part number (ranges from 1 to 10,000). And for each upload Id, the part number identifies the position of the part in the whole file. +// And thus with the same part number and upload Id, another part upload will overwrite the data. +// Except the last one, minimal part size is 100KB. There's no limit on the last part size. +// +// imur the returned value of InitiateMultipartUpload. +// reader io.Reader the reader for the part's data. +// size the part size. +// partNumber the part number (ranges from 1 to 10,000). Invalid part number will lead to InvalidArgument error. +// +// UploadPart the return value of the upload part. It consists of PartNumber and ETag. It's valid when error is nil. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) UploadPart(imur InitiateMultipartUploadResult, reader io.Reader, + partSize int64, partNumber int, options ...Option) (UploadPart, error) { + request := &UploadPartRequest{ + InitResult: &imur, + Reader: reader, + PartSize: partSize, + PartNumber: partNumber, + } + + result, err := bucket.DoUploadPart(request, options) + + return result.Part, err +} + +// UploadPartFromFile uploads part from the file. +// +// imur the return value of a successful InitiateMultipartUpload. +// filePath the local file path to upload. +// startPosition the start position in the local file. +// partSize the part size. +// partNumber the part number (from 1 to 10,000) +// +// UploadPart the return value consists of PartNumber and ETag. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) UploadPartFromFile(imur InitiateMultipartUploadResult, filePath string, + startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) { + var part = UploadPart{} + fd, err := os.Open(filePath) + if err != nil { + return part, err + } + defer fd.Close() + fd.Seek(startPosition, os.SEEK_SET) + + request := &UploadPartRequest{ + InitResult: &imur, + Reader: fd, + PartSize: partSize, + PartNumber: partNumber, + } + + result, err := bucket.DoUploadPart(request, options) + + return result.Part, err +} + +// DoUploadPart does the actual part upload. +// +// request part upload request +// +// UploadPartResult the result of uploading part. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) DoUploadPart(request *UploadPartRequest, options []Option) (*UploadPartResult, error) { + listener := GetProgressListener(options) + options = append(options, ContentLength(request.PartSize)) + params := map[string]interface{}{} + params["partNumber"] = strconv.Itoa(request.PartNumber) + params["uploadId"] = request.InitResult.UploadID + resp, err := bucket.do("PUT", request.InitResult.Key, params, options, + &io.LimitedReader{R: request.Reader, N: request.PartSize}, listener) + if err != nil { + return &UploadPartResult{}, err + } + defer resp.Body.Close() + + part := UploadPart{ + ETag: resp.Headers.Get(HTTPHeaderEtag), + PartNumber: request.PartNumber, + } + + if bucket.GetConfig().IsEnableCRC { + err = CheckCRC(resp, "DoUploadPart") + if err != nil { + return &UploadPartResult{part}, err + } + } + + return &UploadPartResult{part}, nil +} + +// UploadPartCopy uploads part copy +// +// imur the return value of InitiateMultipartUpload +// copySrc source Object name +// startPosition the part's start index in the source file +// partSize the part size +// partNumber the part number, ranges from 1 to 10,000. If it exceeds the range OSS returns InvalidArgument error. +// options the constraints of source object for the copy. The copy happens only when these contraints are met. Otherwise it returns error. +// CopySourceIfNoneMatch, CopySourceIfModifiedSince CopySourceIfUnmodifiedSince, check out the following link for the detail +// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/UploadPartCopy.html +// +// UploadPart the return value consists of PartNumber and ETag. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) UploadPartCopy(imur InitiateMultipartUploadResult, srcBucketName, srcObjectKey string, + startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) { + var out UploadPartCopyResult + var part UploadPart + var opts []Option + + //first find version id + versionIdKey := "versionId" + versionId, _ := FindOption(options, versionIdKey, nil) + if versionId == nil { + opts = []Option{CopySource(srcBucketName, url.QueryEscape(srcObjectKey)), + CopySourceRange(startPosition, partSize)} + } else { + opts = []Option{CopySourceVersion(srcBucketName, url.QueryEscape(srcObjectKey), versionId.(string)), + CopySourceRange(startPosition, partSize)} + options = DeleteOption(options, versionIdKey) + } + + opts = append(opts, options...) + + params := map[string]interface{}{} + params["partNumber"] = strconv.Itoa(partNumber) + params["uploadId"] = imur.UploadID + resp, err := bucket.do("PUT", imur.Key, params, opts, nil, nil) + if err != nil { + return part, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + if err != nil { + return part, err + } + part.ETag = out.ETag + part.PartNumber = partNumber + + return part, nil +} + +// CompleteMultipartUpload completes the multipart upload. +// +// imur the return value of InitiateMultipartUpload. +// parts the array of return value of UploadPart/UploadPartFromFile/UploadPartCopy. +// +// CompleteMultipartUploadResponse the return value when the call succeeds. Only valid when the error is nil. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) CompleteMultipartUpload(imur InitiateMultipartUploadResult, + parts []UploadPart, options ...Option) (CompleteMultipartUploadResult, error) { + var out CompleteMultipartUploadResult + + sort.Sort(UploadParts(parts)) + cxml := completeMultipartUploadXML{} + cxml.Part = parts + bs, err := xml.Marshal(cxml) + if err != nil { + return out, err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + params := map[string]interface{}{} + params["uploadId"] = imur.UploadID + resp, err := bucket.do("POST", imur.Key, params, options, buffer, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// AbortMultipartUpload aborts the multipart upload. +// +// imur the return value of InitiateMultipartUpload. +// +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) AbortMultipartUpload(imur InitiateMultipartUploadResult, options ...Option) error { + params := map[string]interface{}{} + params["uploadId"] = imur.UploadID + resp, err := bucket.do("DELETE", imur.Key, params, options, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// ListUploadedParts lists the uploaded parts. +// +// imur the return value of InitiateMultipartUpload. +// +// ListUploadedPartsResponse the return value if it succeeds, only valid when error is nil. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) ListUploadedParts(imur InitiateMultipartUploadResult, options ...Option) (ListUploadedPartsResult, error) { + var out ListUploadedPartsResult + options = append(options, EncodingType("url")) + + params := map[string]interface{}{} + params, err := GetRawParams(options) + if err != nil { + return out, err + } + + params["uploadId"] = imur.UploadID + resp, err := bucket.do("GET", imur.Key, params, options, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + if err != nil { + return out, err + } + err = decodeListUploadedPartsResult(&out) + return out, err +} + +// ListMultipartUploads lists all ongoing multipart upload tasks +// +// options listObject's filter. Prefix specifies the returned object's prefix; KeyMarker specifies the returned object's start point in lexicographic order; +// MaxKeys specifies the max entries to return; Delimiter is the character for grouping object keys. +// +// ListMultipartUploadResponse the return value if it succeeds, only valid when error is nil. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) ListMultipartUploads(options ...Option) (ListMultipartUploadResult, error) { + var out ListMultipartUploadResult + + options = append(options, EncodingType("url")) + params, err := GetRawParams(options) + if err != nil { + return out, err + } + params["uploads"] = nil + + resp, err := bucket.do("GET", "", params, options, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + if err != nil { + return out, err + } + err = decodeListMultipartUploadResult(&out) + return out, err +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/option.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/option.go new file mode 100644 index 0000000000..ccae9f4299 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/option.go @@ -0,0 +1,689 @@ +package oss + +import ( + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +type optionType string + +const ( + optionParam optionType = "HTTPParameter" // URL parameter + optionHTTP optionType = "HTTPHeader" // HTTP header + optionArg optionType = "FuncArgument" // Function argument +) + +const ( + deleteObjectsQuiet = "delete-objects-quiet" + routineNum = "x-routine-num" + checkpointConfig = "x-cp-config" + initCRC64 = "init-crc64" + progressListener = "x-progress-listener" + storageClass = "storage-class" + responseHeader = "x-response-header" + redundancyType = "redundancy-type" + objectHashFunc = "object-hash-func" +) + +type ( + optionValue struct { + Value interface{} + Type optionType + } + + // Option HTTP option + Option func(map[string]optionValue) error +) + +// ACL is an option to set X-Oss-Acl header +func ACL(acl ACLType) Option { + return setHeader(HTTPHeaderOssACL, string(acl)) +} + +// ContentType is an option to set Content-Type header +func ContentType(value string) Option { + return setHeader(HTTPHeaderContentType, value) +} + +// ContentLength is an option to set Content-Length header +func ContentLength(length int64) Option { + return setHeader(HTTPHeaderContentLength, strconv.FormatInt(length, 10)) +} + +// CacheControl is an option to set Cache-Control header +func CacheControl(value string) Option { + return setHeader(HTTPHeaderCacheControl, value) +} + +// ContentDisposition is an option to set Content-Disposition header +func ContentDisposition(value string) Option { + return setHeader(HTTPHeaderContentDisposition, value) +} + +// ContentEncoding is an option to set Content-Encoding header +func ContentEncoding(value string) Option { + return setHeader(HTTPHeaderContentEncoding, value) +} + +// ContentLanguage is an option to set Content-Language header +func ContentLanguage(value string) Option { + return setHeader(HTTPHeaderContentLanguage, value) +} + +// ContentMD5 is an option to set Content-MD5 header +func ContentMD5(value string) Option { + return setHeader(HTTPHeaderContentMD5, value) +} + +// Expires is an option to set Expires header +func Expires(t time.Time) Option { + return setHeader(HTTPHeaderExpires, t.Format(http.TimeFormat)) +} + +// Meta is an option to set Meta header +func Meta(key, value string) Option { + return setHeader(HTTPHeaderOssMetaPrefix+key, value) +} + +// Range is an option to set Range header, [start, end] +func Range(start, end int64) Option { + return setHeader(HTTPHeaderRange, fmt.Sprintf("bytes=%d-%d", start, end)) +} + +// NormalizedRange is an option to set Range header, such as 1024-2048 or 1024- or -2048 +func NormalizedRange(nr string) Option { + return setHeader(HTTPHeaderRange, fmt.Sprintf("bytes=%s", strings.TrimSpace(nr))) +} + +// AcceptEncoding is an option to set Accept-Encoding header +func AcceptEncoding(value string) Option { + return setHeader(HTTPHeaderAcceptEncoding, value) +} + +// IfModifiedSince is an option to set If-Modified-Since header +func IfModifiedSince(t time.Time) Option { + return setHeader(HTTPHeaderIfModifiedSince, t.Format(http.TimeFormat)) +} + +// IfUnmodifiedSince is an option to set If-Unmodified-Since header +func IfUnmodifiedSince(t time.Time) Option { + return setHeader(HTTPHeaderIfUnmodifiedSince, t.Format(http.TimeFormat)) +} + +// IfMatch is an option to set If-Match header +func IfMatch(value string) Option { + return setHeader(HTTPHeaderIfMatch, value) +} + +// IfNoneMatch is an option to set IfNoneMatch header +func IfNoneMatch(value string) Option { + return setHeader(HTTPHeaderIfNoneMatch, value) +} + +// CopySource is an option to set X-Oss-Copy-Source header +func CopySource(sourceBucket, sourceObject string) Option { + return setHeader(HTTPHeaderOssCopySource, "/"+sourceBucket+"/"+sourceObject) +} + +// CopySourceVersion is an option to set X-Oss-Copy-Source header,include versionId +func CopySourceVersion(sourceBucket, sourceObject string, versionId string) Option { + return setHeader(HTTPHeaderOssCopySource, "/"+sourceBucket+"/"+sourceObject+"?"+"versionId="+versionId) +} + +// CopySourceRange is an option to set X-Oss-Copy-Source header +func CopySourceRange(startPosition, partSize int64) Option { + val := "bytes=" + strconv.FormatInt(startPosition, 10) + "-" + + strconv.FormatInt((startPosition+partSize-1), 10) + return setHeader(HTTPHeaderOssCopySourceRange, val) +} + +// CopySourceIfMatch is an option to set X-Oss-Copy-Source-If-Match header +func CopySourceIfMatch(value string) Option { + return setHeader(HTTPHeaderOssCopySourceIfMatch, value) +} + +// CopySourceIfNoneMatch is an option to set X-Oss-Copy-Source-If-None-Match header +func CopySourceIfNoneMatch(value string) Option { + return setHeader(HTTPHeaderOssCopySourceIfNoneMatch, value) +} + +// CopySourceIfModifiedSince is an option to set X-Oss-CopySource-If-Modified-Since header +func CopySourceIfModifiedSince(t time.Time) Option { + return setHeader(HTTPHeaderOssCopySourceIfModifiedSince, t.Format(http.TimeFormat)) +} + +// CopySourceIfUnmodifiedSince is an option to set X-Oss-Copy-Source-If-Unmodified-Since header +func CopySourceIfUnmodifiedSince(t time.Time) Option { + return setHeader(HTTPHeaderOssCopySourceIfUnmodifiedSince, t.Format(http.TimeFormat)) +} + +// MetadataDirective is an option to set X-Oss-Metadata-Directive header +func MetadataDirective(directive MetadataDirectiveType) Option { + return setHeader(HTTPHeaderOssMetadataDirective, string(directive)) +} + +// ServerSideEncryption is an option to set X-Oss-Server-Side-Encryption header +func ServerSideEncryption(value string) Option { + return setHeader(HTTPHeaderOssServerSideEncryption, value) +} + +// ServerSideEncryptionKeyID is an option to set X-Oss-Server-Side-Encryption-Key-Id header +func ServerSideEncryptionKeyID(value string) Option { + return setHeader(HTTPHeaderOssServerSideEncryptionKeyID, value) +} + +// ServerSideDataEncryption is an option to set X-Oss-Server-Side-Data-Encryption header +func ServerSideDataEncryption(value string) Option { + return setHeader(HTTPHeaderOssServerSideDataEncryption, value) +} + +// SSECAlgorithm is an option to set X-Oss-Server-Side-Encryption-Customer-Algorithm header +func SSECAlgorithm(value string) Option { + return setHeader(HTTPHeaderSSECAlgorithm, value) +} + +// SSECKey is an option to set X-Oss-Server-Side-Encryption-Customer-Key header +func SSECKey(value string) Option { + return setHeader(HTTPHeaderSSECKey, value) +} + +// SSECKeyMd5 is an option to set X-Oss-Server-Side-Encryption-Customer-Key-Md5 header +func SSECKeyMd5(value string) Option { + return setHeader(HTTPHeaderSSECKeyMd5, value) +} + +// ObjectACL is an option to set X-Oss-Object-Acl header +func ObjectACL(acl ACLType) Option { + return setHeader(HTTPHeaderOssObjectACL, string(acl)) +} + +// symlinkTarget is an option to set X-Oss-Symlink-Target +func symlinkTarget(targetObjectKey string) Option { + return setHeader(HTTPHeaderOssSymlinkTarget, targetObjectKey) +} + +// Origin is an option to set Origin header +func Origin(value string) Option { + return setHeader(HTTPHeaderOrigin, value) +} + +// ObjectStorageClass is an option to set the storage class of object +func ObjectStorageClass(storageClass StorageClassType) Option { + return setHeader(HTTPHeaderOssStorageClass, string(storageClass)) +} + +// Callback is an option to set callback values +func Callback(callback string) Option { + return setHeader(HTTPHeaderOssCallback, callback) +} + +// CallbackVar is an option to set callback user defined values +func CallbackVar(callbackVar string) Option { + return setHeader(HTTPHeaderOssCallbackVar, callbackVar) +} + +// RequestPayer is an option to set payer who pay for the request +func RequestPayer(payerType PayerType) Option { + return setHeader(HTTPHeaderOssRequester, strings.ToLower(string(payerType))) +} + +// RequestPayerParam is an option to set payer who pay for the request +func RequestPayerParam(payerType PayerType) Option { + return addParam(strings.ToLower(HTTPHeaderOssRequester), strings.ToLower(string(payerType))) +} + +// SetTagging is an option to set object tagging +func SetTagging(tagging Tagging) Option { + if len(tagging.Tags) == 0 { + return nil + } + + taggingValue := "" + for index, tag := range tagging.Tags { + if index != 0 { + taggingValue += "&" + } + taggingValue += url.QueryEscape(tag.Key) + "=" + url.QueryEscape(tag.Value) + } + return setHeader(HTTPHeaderOssTagging, taggingValue) +} + +// TaggingDirective is an option to set X-Oss-Metadata-Directive header +func TaggingDirective(directive TaggingDirectiveType) Option { + return setHeader(HTTPHeaderOssTaggingDirective, string(directive)) +} + +// ACReqMethod is an option to set Access-Control-Request-Method header +func ACReqMethod(value string) Option { + return setHeader(HTTPHeaderACReqMethod, value) +} + +// ACReqHeaders is an option to set Access-Control-Request-Headers header +func ACReqHeaders(value string) Option { + return setHeader(HTTPHeaderACReqHeaders, value) +} + +// TrafficLimitHeader is an option to set X-Oss-Traffic-Limit +func TrafficLimitHeader(value int64) Option { + return setHeader(HTTPHeaderOssTrafficLimit, strconv.FormatInt(value, 10)) +} + +// UserAgentHeader is an option to set HTTPHeaderUserAgent +func UserAgentHeader(ua string) Option { + return setHeader(HTTPHeaderUserAgent, ua) +} + +// ForbidOverWrite is an option to set X-Oss-Forbid-Overwrite +func ForbidOverWrite(forbidWrite bool) Option { + if forbidWrite { + return setHeader(HTTPHeaderOssForbidOverWrite, "true") + } else { + return setHeader(HTTPHeaderOssForbidOverWrite, "false") + } +} + +// RangeBehavior is an option to set Range value, such as "standard" +func RangeBehavior(value string) Option { + return setHeader(HTTPHeaderOssRangeBehavior, value) +} + +func PartHashCtxHeader(value string) Option { + return setHeader(HTTPHeaderOssHashCtx, value) +} + +func PartMd5CtxHeader(value string) Option { + return setHeader(HTTPHeaderOssMd5Ctx, value) +} + +func PartHashCtxParam(value string) Option { + return addParam("x-oss-hash-ctx", value) +} + +func PartMd5CtxParam(value string) Option { + return addParam("x-oss-md5-ctx", value) +} + +// Delimiter is an option to set delimiler parameter +func Delimiter(value string) Option { + return addParam("delimiter", value) +} + +// Marker is an option to set marker parameter +func Marker(value string) Option { + return addParam("marker", value) +} + +// MaxKeys is an option to set maxkeys parameter +func MaxKeys(value int) Option { + return addParam("max-keys", strconv.Itoa(value)) +} + +// Prefix is an option to set prefix parameter +func Prefix(value string) Option { + return addParam("prefix", value) +} + +// EncodingType is an option to set encoding-type parameter +func EncodingType(value string) Option { + return addParam("encoding-type", value) +} + +// MaxUploads is an option to set max-uploads parameter +func MaxUploads(value int) Option { + return addParam("max-uploads", strconv.Itoa(value)) +} + +// KeyMarker is an option to set key-marker parameter +func KeyMarker(value string) Option { + return addParam("key-marker", value) +} + +// VersionIdMarker is an option to set version-id-marker parameter +func VersionIdMarker(value string) Option { + return addParam("version-id-marker", value) +} + +// VersionId is an option to set versionId parameter +func VersionId(value string) Option { + return addParam("versionId", value) +} + +// TagKey is an option to set tag key parameter +func TagKey(value string) Option { + return addParam("tag-key", value) +} + +// TagValue is an option to set tag value parameter +func TagValue(value string) Option { + return addParam("tag-value", value) +} + +// UploadIDMarker is an option to set upload-id-marker parameter +func UploadIDMarker(value string) Option { + return addParam("upload-id-marker", value) +} + +// MaxParts is an option to set max-parts parameter +func MaxParts(value int) Option { + return addParam("max-parts", strconv.Itoa(value)) +} + +// PartNumberMarker is an option to set part-number-marker parameter +func PartNumberMarker(value int) Option { + return addParam("part-number-marker", strconv.Itoa(value)) +} + +// Sequential is an option to set sequential parameter for InitiateMultipartUpload +func Sequential() Option { + return addParam("sequential", "") +} + +// WithHashContext is an option to set withHashContext parameter for InitiateMultipartUpload +func WithHashContext() Option { + return addParam("withHashContext", "") +} + +// EnableMd5 is an option to set x-oss-enable-md5 parameter for InitiateMultipartUpload +func EnableMd5() Option { + return addParam("x-oss-enable-md5", "") +} + +// EnableSha1 is an option to set x-oss-enable-sha1 parameter for InitiateMultipartUpload +func EnableSha1() Option { + return addParam("x-oss-enable-sha1", "") +} + +// EnableSha256 is an option to set x-oss-enable-sha256 parameter for InitiateMultipartUpload +func EnableSha256() Option { + return addParam("x-oss-enable-sha256", "") +} + +// ListType is an option to set List-type parameter for ListObjectsV2 +func ListType(value int) Option { + return addParam("list-type", strconv.Itoa(value)) +} + +// StartAfter is an option to set start-after parameter for ListObjectsV2 +func StartAfter(value string) Option { + return addParam("start-after", value) +} + +// ContinuationToken is an option to set Continuation-token parameter for ListObjectsV2 +func ContinuationToken(value string) Option { + if value == "" { + return addParam("continuation-token", nil) + } + return addParam("continuation-token", value) +} + +// FetchOwner is an option to set Fetch-owner parameter for ListObjectsV2 +func FetchOwner(value bool) Option { + if value { + return addParam("fetch-owner", "true") + } + return addParam("fetch-owner", "false") +} + +// DeleteObjectsQuiet false:DeleteObjects in verbose mode; true:DeleteObjects in quite mode. Default is false. +func DeleteObjectsQuiet(isQuiet bool) Option { + return addArg(deleteObjectsQuiet, isQuiet) +} + +// StorageClass bucket storage class +func StorageClass(value StorageClassType) Option { + return addArg(storageClass, value) +} + +// RedundancyType bucket data redundancy type +func RedundancyType(value DataRedundancyType) Option { + return addArg(redundancyType, value) +} + +// RedundancyType bucket data redundancy type +func ObjectHashFunc(value ObjecthashFuncType) Option { + return addArg(objectHashFunc, value) +} + +// Checkpoint configuration +type cpConfig struct { + IsEnable bool + FilePath string + DirPath string +} + +// Checkpoint sets the isEnable flag and checkpoint file path for DownloadFile/UploadFile. +func Checkpoint(isEnable bool, filePath string) Option { + return addArg(checkpointConfig, &cpConfig{IsEnable: isEnable, FilePath: filePath}) +} + +// CheckpointDir sets the isEnable flag and checkpoint dir path for DownloadFile/UploadFile. +func CheckpointDir(isEnable bool, dirPath string) Option { + return addArg(checkpointConfig, &cpConfig{IsEnable: isEnable, DirPath: dirPath}) +} + +// Routines DownloadFile/UploadFile routine count +func Routines(n int) Option { + return addArg(routineNum, n) +} + +// InitCRC Init AppendObject CRC +func InitCRC(initCRC uint64) Option { + return addArg(initCRC64, initCRC) +} + +// Progress set progress listener +func Progress(listener ProgressListener) Option { + return addArg(progressListener, listener) +} + +// GetResponseHeader for get response http header +func GetResponseHeader(respHeader *http.Header) Option { + return addArg(responseHeader, respHeader) +} + +// ResponseContentType is an option to set response-content-type param +func ResponseContentType(value string) Option { + return addParam("response-content-type", value) +} + +// ResponseContentLanguage is an option to set response-content-language param +func ResponseContentLanguage(value string) Option { + return addParam("response-content-language", value) +} + +// ResponseExpires is an option to set response-expires param +func ResponseExpires(value string) Option { + return addParam("response-expires", value) +} + +// ResponseCacheControl is an option to set response-cache-control param +func ResponseCacheControl(value string) Option { + return addParam("response-cache-control", value) +} + +// ResponseContentDisposition is an option to set response-content-disposition param +func ResponseContentDisposition(value string) Option { + return addParam("response-content-disposition", value) +} + +// ResponseContentEncoding is an option to set response-content-encoding param +func ResponseContentEncoding(value string) Option { + return addParam("response-content-encoding", value) +} + +// Process is an option to set x-oss-process param +func Process(value string) Option { + return addParam("x-oss-process", value) +} + +// TrafficLimitParam is a option to set x-oss-traffic-limit +func TrafficLimitParam(value int64) Option { + return addParam("x-oss-traffic-limit", strconv.FormatInt(value, 10)) +} + +// SetHeader Allow users to set personalized http headers +func SetHeader(key string, value interface{}) Option { + return setHeader(key, value) +} + +// AddParam Allow users to set personalized http params +func AddParam(key string, value interface{}) Option { + return addParam(key, value) +} + +func setHeader(key string, value interface{}) Option { + return func(params map[string]optionValue) error { + if value == nil { + return nil + } + params[key] = optionValue{value, optionHTTP} + return nil + } +} + +func addParam(key string, value interface{}) Option { + return func(params map[string]optionValue) error { + if value == nil { + return nil + } + params[key] = optionValue{value, optionParam} + return nil + } +} + +func addArg(key string, value interface{}) Option { + return func(params map[string]optionValue) error { + if value == nil { + return nil + } + params[key] = optionValue{value, optionArg} + return nil + } +} + +func handleOptions(headers map[string]string, options []Option) error { + params := map[string]optionValue{} + for _, option := range options { + if option != nil { + if err := option(params); err != nil { + return err + } + } + } + + for k, v := range params { + if v.Type == optionHTTP { + headers[k] = v.Value.(string) + } + } + return nil +} + +func GetRawParams(options []Option) (map[string]interface{}, error) { + // Option + params := map[string]optionValue{} + for _, option := range options { + if option != nil { + if err := option(params); err != nil { + return nil, err + } + } + } + + paramsm := map[string]interface{}{} + // Serialize + for k, v := range params { + if v.Type == optionParam { + vs := params[k] + paramsm[k] = vs.Value.(string) + } + } + + return paramsm, nil +} + +func FindOption(options []Option, param string, defaultVal interface{}) (interface{}, error) { + params := map[string]optionValue{} + for _, option := range options { + if option != nil { + if err := option(params); err != nil { + return nil, err + } + } + } + + if val, ok := params[param]; ok { + return val.Value, nil + } + return defaultVal, nil +} + +func IsOptionSet(options []Option, option string) (bool, interface{}, error) { + params := map[string]optionValue{} + for _, option := range options { + if option != nil { + if err := option(params); err != nil { + return false, nil, err + } + } + } + + if val, ok := params[option]; ok { + return true, val.Value, nil + } + return false, nil, nil +} + +func DeleteOption(options []Option, strKey string) []Option { + var outOption []Option + params := map[string]optionValue{} + for _, option := range options { + if option != nil { + option(params) + _, exist := params[strKey] + if !exist { + outOption = append(outOption, option) + } else { + delete(params, strKey) + } + } + } + return outOption +} + +func GetRequestId(header http.Header) string { + return header.Get("x-oss-request-id") +} + +func GetVersionId(header http.Header) string { + return header.Get("x-oss-version-id") +} + +func GetCopySrcVersionId(header http.Header) string { + return header.Get("x-oss-copy-source-version-id") +} + +func GetDeleteMark(header http.Header) bool { + value := header.Get("x-oss-delete-marker") + if strings.ToUpper(value) == "TRUE" { + return true + } + return false +} + +func GetQosDelayTime(header http.Header) string { + return header.Get("x-oss-qos-delay-time") +} + +// ForbidOverWrite is an option to set X-Oss-Forbid-Overwrite +func AllowSameActionOverLap(enabled bool) Option { + if enabled { + return setHeader(HTTPHeaderAllowSameActionOverLap, "true") + } else { + return setHeader(HTTPHeaderAllowSameActionOverLap, "false") + } +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/progress.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/progress.go new file mode 100644 index 0000000000..9f3aa9f614 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/progress.go @@ -0,0 +1,116 @@ +package oss + +import ( + "io" +) + +// ProgressEventType defines transfer progress event type +type ProgressEventType int + +const ( + // TransferStartedEvent transfer started, set TotalBytes + TransferStartedEvent ProgressEventType = 1 + iota + // TransferDataEvent transfer data, set ConsumedBytes anmd TotalBytes + TransferDataEvent + // TransferCompletedEvent transfer completed + TransferCompletedEvent + // TransferFailedEvent transfer encounters an error + TransferFailedEvent +) + +// ProgressEvent defines progress event +type ProgressEvent struct { + ConsumedBytes int64 + TotalBytes int64 + RwBytes int64 + EventType ProgressEventType +} + +// ProgressListener listens progress change +type ProgressListener interface { + ProgressChanged(event *ProgressEvent) +} + +// -------------------- Private -------------------- + +func newProgressEvent(eventType ProgressEventType, consumed, total int64, rwBytes int64) *ProgressEvent { + return &ProgressEvent{ + ConsumedBytes: consumed, + TotalBytes: total, + RwBytes: rwBytes, + EventType: eventType} +} + +// publishProgress +func publishProgress(listener ProgressListener, event *ProgressEvent) { + if listener != nil && event != nil { + listener.ProgressChanged(event) + } +} + +type readerTracker struct { + completedBytes int64 +} + +type teeReader struct { + reader io.Reader + writer io.Writer + listener ProgressListener + consumedBytes int64 + totalBytes int64 + tracker *readerTracker +} + +// TeeReader returns a Reader that writes to w what it reads from r. +// All reads from r performed through it are matched with +// corresponding writes to w. There is no internal buffering - +// the write must complete before the read completes. +// Any error encountered while writing is reported as a read error. +func TeeReader(reader io.Reader, writer io.Writer, totalBytes int64, listener ProgressListener, tracker *readerTracker) io.ReadCloser { + return &teeReader{ + reader: reader, + writer: writer, + listener: listener, + consumedBytes: 0, + totalBytes: totalBytes, + tracker: tracker, + } +} + +func (t *teeReader) Read(p []byte) (n int, err error) { + n, err = t.reader.Read(p) + + // Read encountered error + if err != nil && err != io.EOF { + event := newProgressEvent(TransferFailedEvent, t.consumedBytes, t.totalBytes, 0) + publishProgress(t.listener, event) + } + + if n > 0 { + t.consumedBytes += int64(n) + // CRC + if t.writer != nil { + if n, err := t.writer.Write(p[:n]); err != nil { + return n, err + } + } + // Progress + if t.listener != nil { + event := newProgressEvent(TransferDataEvent, t.consumedBytes, t.totalBytes, int64(n)) + publishProgress(t.listener, event) + } + // Track + if t.tracker != nil { + t.tracker.completedBytes = t.consumedBytes + } + } + + return +} + +func (t *teeReader) Close() error { + if rc, ok := t.reader.(io.ReadCloser); ok { + return rc.Close() + } + return nil +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/redirect_1_6.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/redirect_1_6.go new file mode 100644 index 0000000000..d09bc5ebd3 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/redirect_1_6.go @@ -0,0 +1,11 @@ +// +build !go1.7 + +package oss + +import "net/http" + +// http.ErrUseLastResponse only is defined go1.7 onward + +func disableHTTPRedirect(client *http.Client) { + +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/redirect_1_7.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/redirect_1_7.go new file mode 100644 index 0000000000..5b0bb8674e --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/redirect_1_7.go @@ -0,0 +1,12 @@ +// +build go1.7 + +package oss + +import "net/http" + +// http.ErrUseLastResponse only is defined go1.7 onward +func disableHTTPRedirect(client *http.Client) { + client.CheckRedirect = func(req *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse + } +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/select_object.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/select_object.go new file mode 100644 index 0000000000..2e0da4637f --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/select_object.go @@ -0,0 +1,197 @@ +package oss + +import ( + "bytes" + "encoding/xml" + "hash/crc32" + "io" + "io/ioutil" + "net/http" + "os" + "strings" +) + +// CreateSelectCsvObjectMeta is Creating csv object meta +// +// key the object key. +// csvMeta the csv file meta +// options the options for create csv Meta of the object. +// +// MetaEndFrameCSV the csv file meta info +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) CreateSelectCsvObjectMeta(key string, csvMeta CsvMetaRequest, options ...Option) (MetaEndFrameCSV, error) { + var endFrame MetaEndFrameCSV + params := map[string]interface{}{} + params["x-oss-process"] = "csv/meta" + + csvMeta.encodeBase64() + bs, err := xml.Marshal(csvMeta) + if err != nil { + return endFrame, err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + resp, err := bucket.DoPostSelectObject(key, params, buffer, options...) + if err != nil { + return endFrame, err + } + defer resp.Body.Close() + + _, err = ioutil.ReadAll(resp) + + return resp.Frame.MetaEndFrameCSV, err +} + +// CreateSelectJsonObjectMeta is Creating json object meta +// +// key the object key. +// csvMeta the json file meta +// options the options for create json Meta of the object. +// +// MetaEndFrameJSON the json file meta info +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) CreateSelectJsonObjectMeta(key string, jsonMeta JsonMetaRequest, options ...Option) (MetaEndFrameJSON, error) { + var endFrame MetaEndFrameJSON + params := map[string]interface{}{} + params["x-oss-process"] = "json/meta" + + bs, err := xml.Marshal(jsonMeta) + if err != nil { + return endFrame, err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + resp, err := bucket.DoPostSelectObject(key, params, buffer, options...) + if err != nil { + return endFrame, err + } + defer resp.Body.Close() + + _, err = ioutil.ReadAll(resp) + + return resp.Frame.MetaEndFrameJSON, err +} + +// SelectObject is the select object api, approve csv and json file. +// +// key the object key. +// selectReq the request data for select object +// options the options for select file of the object. +// +// o.ReadCloser reader instance for reading data from response. It must be called close() after the usage and only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) SelectObject(key string, selectReq SelectRequest, options ...Option) (io.ReadCloser, error) { + params := map[string]interface{}{} + if selectReq.InputSerializationSelect.JsonBodyInput.JsonIsEmpty() { + params["x-oss-process"] = "csv/select" // default select csv file + } else { + params["x-oss-process"] = "json/select" + } + selectReq.encodeBase64() + bs, err := xml.Marshal(selectReq) + if err != nil { + return nil, err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + resp, err := bucket.DoPostSelectObject(key, params, buffer, options...) + if err != nil { + return nil, err + } + if selectReq.OutputSerializationSelect.EnablePayloadCrc != nil && *selectReq.OutputSerializationSelect.EnablePayloadCrc == true { + resp.Frame.EnablePayloadCrc = true + } + resp.Frame.OutputRawData = strings.ToUpper(resp.Headers.Get("x-oss-select-output-raw")) == "TRUE" + + return resp, err +} + +// DoPostSelectObject is the SelectObject/CreateMeta api, approve csv and json file. +// +// key the object key. +// params the resource of oss approve csv/meta, json/meta, csv/select, json/select. +// buf the request data trans to buffer. +// options the options for select file of the object. +// +// SelectObjectResponse the response of select object. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DoPostSelectObject(key string, params map[string]interface{}, buf *bytes.Buffer, options ...Option) (*SelectObjectResponse, error) { + resp, err := bucket.do("POST", key, params, options, buf, nil) + if err != nil { + return nil, err + } + + result := &SelectObjectResponse{ + Body: resp.Body, + StatusCode: resp.StatusCode, + Frame: SelectObjectResult{}, + } + result.Headers = resp.Headers + // result.Frame = SelectObjectResult{} + result.ReadTimeOut = bucket.GetConfig().Timeout + + // Progress + listener := GetProgressListener(options) + + // CRC32 + crcCalc := crc32.NewIEEE() + result.WriterForCheckCrc32 = crcCalc + result.Body = TeeReader(resp.Body, nil, 0, listener, nil) + + err = CheckRespCode(resp.StatusCode, []int{http.StatusPartialContent, http.StatusOK}) + + return result, err +} + +// SelectObjectIntoFile is the selectObject to file api +// +// key the object key. +// fileName saving file's name to localstation. +// selectReq the request data for select object +// options the options for select file of the object. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) SelectObjectIntoFile(key, fileName string, selectReq SelectRequest, options ...Option) error { + tempFilePath := fileName + TempFileSuffix + + params := map[string]interface{}{} + if selectReq.InputSerializationSelect.JsonBodyInput.JsonIsEmpty() { + params["x-oss-process"] = "csv/select" // default select csv file + } else { + params["x-oss-process"] = "json/select" + } + selectReq.encodeBase64() + bs, err := xml.Marshal(selectReq) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + resp, err := bucket.DoPostSelectObject(key, params, buffer, options...) + if err != nil { + return err + } + defer resp.Close() + + // If the local file does not exist, create a new one. If it exists, overwrite it. + fd, err := os.OpenFile(tempFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, FilePermMode) + if err != nil { + return err + } + + // Copy the data to the local file path. + _, err = io.Copy(fd, resp) + fd.Close() + if err != nil { + return err + } + + return os.Rename(tempFilePath, fileName) +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/select_object_type.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/select_object_type.go new file mode 100644 index 0000000000..8b75782f35 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/select_object_type.go @@ -0,0 +1,364 @@ +package oss + +import ( + "bytes" + "encoding/binary" + "fmt" + "hash" + "hash/crc32" + "io" + "net/http" + "time" +) + +// The adapter class for Select object's response. +// The response consists of frames. Each frame has the following format: + +// Type | Payload Length | Header Checksum | Payload | Payload Checksum + +// |<4-->| <--4 bytes------><---4 bytes-------><-n/a-----><--4 bytes---------> +// And we have three kind of frames. +// Data Frame: +// Type:8388609 +// Payload: Offset | Data +// <-8 bytes> + +// Continuous Frame +// Type:8388612 +// Payload: Offset (8-bytes) + +// End Frame +// Type:8388613 +// Payload: Offset | total scanned bytes | http status code | error message +// <-- 8bytes--><-----8 bytes--------><---4 bytes-------><---variabe---> + +// SelectObjectResponse defines HTTP response from OSS SelectObject +type SelectObjectResponse struct { + StatusCode int + Headers http.Header + Body io.ReadCloser + Frame SelectObjectResult + ReadTimeOut uint + ClientCRC32 uint32 + ServerCRC32 uint32 + WriterForCheckCrc32 hash.Hash32 + Finish bool +} + +func (sr *SelectObjectResponse) Read(p []byte) (n int, err error) { + n, err = sr.readFrames(p) + return +} + +// Close http reponse body +func (sr *SelectObjectResponse) Close() error { + return sr.Body.Close() +} + +// PostSelectResult is the request of SelectObject +type PostSelectResult struct { + Response *SelectObjectResponse +} + +// readFrames is read Frame +func (sr *SelectObjectResponse) readFrames(p []byte) (int, error) { + var nn int + var err error + var checkValid bool + if sr.Frame.OutputRawData == true { + nn, err = sr.Body.Read(p) + return nn, err + } + + if sr.Finish { + return 0, io.EOF + } + + for { + // if this Frame is Readed, then not reading Header + if sr.Frame.OpenLine != true { + err = sr.analysisHeader() + if err != nil { + return nn, err + } + } + + if sr.Frame.FrameType == DataFrameType { + n, err := sr.analysisData(p[nn:]) + if err != nil { + return nn, err + } + nn += n + + // if this Frame is readed all data, then empty the Frame to read it with next frame + if sr.Frame.ConsumedBytesLength == sr.Frame.PayloadLength-8 { + checkValid, err = sr.checkPayloadSum() + if err != nil || !checkValid { + return nn, fmt.Errorf("%s", err.Error()) + } + sr.emptyFrame() + } + + if nn == len(p) { + return nn, nil + } + } else if sr.Frame.FrameType == ContinuousFrameType { + checkValid, err = sr.checkPayloadSum() + if err != nil || !checkValid { + return nn, fmt.Errorf("%s", err.Error()) + } + } else if sr.Frame.FrameType == EndFrameType { + err = sr.analysisEndFrame() + if err != nil { + return nn, err + } + checkValid, err = sr.checkPayloadSum() + if checkValid { + sr.Finish = true + } + return nn, err + } else if sr.Frame.FrameType == MetaEndFrameCSVType { + err = sr.analysisMetaEndFrameCSV() + if err != nil { + return nn, err + } + checkValid, err = sr.checkPayloadSum() + if checkValid { + sr.Finish = true + } + return nn, err + } else if sr.Frame.FrameType == MetaEndFrameJSONType { + err = sr.analysisMetaEndFrameJSON() + if err != nil { + return nn, err + } + checkValid, err = sr.checkPayloadSum() + if checkValid { + sr.Finish = true + } + return nn, err + } + } + return nn, nil +} + +type chanReadIO struct { + readLen int + err error +} + +func (sr *SelectObjectResponse) readLen(p []byte, timeOut time.Duration) (int, error) { + r := sr.Body + ch := make(chan chanReadIO, 1) + defer close(ch) + go func(p []byte) { + var needReadLength int + readChan := chanReadIO{} + needReadLength = len(p) + for { + n, err := r.Read(p[readChan.readLen:needReadLength]) + readChan.readLen += n + if err != nil { + readChan.err = err + ch <- readChan + return + } + + if readChan.readLen == needReadLength { + break + } + } + ch <- readChan + }(p) + + select { + case <-time.After(time.Second * timeOut): + return 0, fmt.Errorf("requestId: %s, readLen timeout, timeout is %d(second),need read:%d", sr.Headers.Get(HTTPHeaderOssRequestID), timeOut, len(p)) + case result := <-ch: + return result.readLen, result.err + } +} + +// analysisHeader is reading selectObject response body's header +func (sr *SelectObjectResponse) analysisHeader() error { + headFrameByte := make([]byte, 20) + _, err := sr.readLen(headFrameByte, time.Duration(sr.ReadTimeOut)) + if err != nil { + return fmt.Errorf("requestId: %s, Read response frame header failure,err:%s", sr.Headers.Get(HTTPHeaderOssRequestID), err.Error()) + } + + frameTypeByte := headFrameByte[0:4] + sr.Frame.Version = frameTypeByte[0] + frameTypeByte[0] = 0 + bytesToInt(frameTypeByte, &sr.Frame.FrameType) + + if sr.Frame.FrameType != DataFrameType && sr.Frame.FrameType != ContinuousFrameType && + sr.Frame.FrameType != EndFrameType && sr.Frame.FrameType != MetaEndFrameCSVType && sr.Frame.FrameType != MetaEndFrameJSONType { + return fmt.Errorf("requestId: %s, Unexpected frame type: %d", sr.Headers.Get(HTTPHeaderOssRequestID), sr.Frame.FrameType) + } + + payloadLengthByte := headFrameByte[4:8] + bytesToInt(payloadLengthByte, &sr.Frame.PayloadLength) + headCheckSumByte := headFrameByte[8:12] + bytesToInt(headCheckSumByte, &sr.Frame.HeaderCheckSum) + byteOffset := headFrameByte[12:20] + bytesToInt(byteOffset, &sr.Frame.Offset) + sr.Frame.OpenLine = true + + err = sr.writerCheckCrc32(byteOffset) + return err +} + +// analysisData is reading the DataFrameType data of selectObject response body +func (sr *SelectObjectResponse) analysisData(p []byte) (int, error) { + var needReadLength int32 + lenP := int32(len(p)) + restByteLength := sr.Frame.PayloadLength - 8 - sr.Frame.ConsumedBytesLength + if lenP <= restByteLength { + needReadLength = lenP + } else { + needReadLength = restByteLength + } + n, err := sr.readLen(p[:needReadLength], time.Duration(sr.ReadTimeOut)) + if err != nil { + return n, fmt.Errorf("read frame data error,%s", err.Error()) + } + sr.Frame.ConsumedBytesLength += int32(n) + err = sr.writerCheckCrc32(p[:n]) + return n, err +} + +// analysisEndFrame is reading the EndFrameType data of selectObject response body +func (sr *SelectObjectResponse) analysisEndFrame() error { + var eF EndFrame + payLoadBytes := make([]byte, sr.Frame.PayloadLength-8) + _, err := sr.readLen(payLoadBytes, time.Duration(sr.ReadTimeOut)) + if err != nil { + return fmt.Errorf("read end frame error:%s", err.Error()) + } + bytesToInt(payLoadBytes[0:8], &eF.TotalScanned) + bytesToInt(payLoadBytes[8:12], &eF.HTTPStatusCode) + errMsgLength := sr.Frame.PayloadLength - 20 + eF.ErrorMsg = string(payLoadBytes[12 : errMsgLength+12]) + sr.Frame.EndFrame.TotalScanned = eF.TotalScanned + sr.Frame.EndFrame.HTTPStatusCode = eF.HTTPStatusCode + sr.Frame.EndFrame.ErrorMsg = eF.ErrorMsg + err = sr.writerCheckCrc32(payLoadBytes) + return err +} + +// analysisMetaEndFrameCSV is reading the MetaEndFrameCSVType data of selectObject response body +func (sr *SelectObjectResponse) analysisMetaEndFrameCSV() error { + var mCF MetaEndFrameCSV + payLoadBytes := make([]byte, sr.Frame.PayloadLength-8) + _, err := sr.readLen(payLoadBytes, time.Duration(sr.ReadTimeOut)) + if err != nil { + return fmt.Errorf("read meta end csv frame error:%s", err.Error()) + } + + bytesToInt(payLoadBytes[0:8], &mCF.TotalScanned) + bytesToInt(payLoadBytes[8:12], &mCF.Status) + bytesToInt(payLoadBytes[12:16], &mCF.SplitsCount) + bytesToInt(payLoadBytes[16:24], &mCF.RowsCount) + bytesToInt(payLoadBytes[24:28], &mCF.ColumnsCount) + errMsgLength := sr.Frame.PayloadLength - 36 + mCF.ErrorMsg = string(payLoadBytes[28 : errMsgLength+28]) + sr.Frame.MetaEndFrameCSV.ErrorMsg = mCF.ErrorMsg + sr.Frame.MetaEndFrameCSV.TotalScanned = mCF.TotalScanned + sr.Frame.MetaEndFrameCSV.Status = mCF.Status + sr.Frame.MetaEndFrameCSV.SplitsCount = mCF.SplitsCount + sr.Frame.MetaEndFrameCSV.RowsCount = mCF.RowsCount + sr.Frame.MetaEndFrameCSV.ColumnsCount = mCF.ColumnsCount + err = sr.writerCheckCrc32(payLoadBytes) + return err +} + +// analysisMetaEndFrameJSON is reading the MetaEndFrameJSONType data of selectObject response body +func (sr *SelectObjectResponse) analysisMetaEndFrameJSON() error { + var mJF MetaEndFrameJSON + payLoadBytes := make([]byte, sr.Frame.PayloadLength-8) + _, err := sr.readLen(payLoadBytes, time.Duration(sr.ReadTimeOut)) + if err != nil { + return fmt.Errorf("read meta end json frame error:%s", err.Error()) + } + + bytesToInt(payLoadBytes[0:8], &mJF.TotalScanned) + bytesToInt(payLoadBytes[8:12], &mJF.Status) + bytesToInt(payLoadBytes[12:16], &mJF.SplitsCount) + bytesToInt(payLoadBytes[16:24], &mJF.RowsCount) + errMsgLength := sr.Frame.PayloadLength - 32 + mJF.ErrorMsg = string(payLoadBytes[24 : errMsgLength+24]) + sr.Frame.MetaEndFrameJSON.ErrorMsg = mJF.ErrorMsg + sr.Frame.MetaEndFrameJSON.TotalScanned = mJF.TotalScanned + sr.Frame.MetaEndFrameJSON.Status = mJF.Status + sr.Frame.MetaEndFrameJSON.SplitsCount = mJF.SplitsCount + sr.Frame.MetaEndFrameJSON.RowsCount = mJF.RowsCount + + err = sr.writerCheckCrc32(payLoadBytes) + return err +} + +func (sr *SelectObjectResponse) checkPayloadSum() (bool, error) { + payLoadChecksumByte := make([]byte, 4) + n, err := sr.readLen(payLoadChecksumByte, time.Duration(sr.ReadTimeOut)) + if n == 4 { + bytesToInt(payLoadChecksumByte, &sr.Frame.PayloadChecksum) + sr.ServerCRC32 = sr.Frame.PayloadChecksum + sr.ClientCRC32 = sr.WriterForCheckCrc32.Sum32() + if sr.Frame.EnablePayloadCrc == true && sr.ServerCRC32 != 0 && sr.ServerCRC32 != sr.ClientCRC32 { + return false, fmt.Errorf("RequestId: %s, Unexpected frame type: %d, client %d but server %d", + sr.Headers.Get(HTTPHeaderOssRequestID), sr.Frame.FrameType, sr.ClientCRC32, sr.ServerCRC32) + } + return true, err + } + return false, fmt.Errorf("RequestId:%s, read checksum error:%s", sr.Headers.Get(HTTPHeaderOssRequestID), err.Error()) +} + +func (sr *SelectObjectResponse) writerCheckCrc32(p []byte) (err error) { + err = nil + if sr.Frame.EnablePayloadCrc == true { + _, err = sr.WriterForCheckCrc32.Write(p) + } + return err +} + +// emptyFrame is emptying SelectObjectResponse Frame information +func (sr *SelectObjectResponse) emptyFrame() { + crcCalc := crc32.NewIEEE() + sr.WriterForCheckCrc32 = crcCalc + sr.Finish = false + + sr.Frame.ConsumedBytesLength = 0 + sr.Frame.OpenLine = false + sr.Frame.Version = byte(0) + sr.Frame.FrameType = 0 + sr.Frame.PayloadLength = 0 + sr.Frame.HeaderCheckSum = 0 + sr.Frame.Offset = 0 + sr.Frame.Data = "" + + sr.Frame.EndFrame.TotalScanned = 0 + sr.Frame.EndFrame.HTTPStatusCode = 0 + sr.Frame.EndFrame.ErrorMsg = "" + + sr.Frame.MetaEndFrameCSV.TotalScanned = 0 + sr.Frame.MetaEndFrameCSV.Status = 0 + sr.Frame.MetaEndFrameCSV.SplitsCount = 0 + sr.Frame.MetaEndFrameCSV.RowsCount = 0 + sr.Frame.MetaEndFrameCSV.ColumnsCount = 0 + sr.Frame.MetaEndFrameCSV.ErrorMsg = "" + + sr.Frame.MetaEndFrameJSON.TotalScanned = 0 + sr.Frame.MetaEndFrameJSON.Status = 0 + sr.Frame.MetaEndFrameJSON.SplitsCount = 0 + sr.Frame.MetaEndFrameJSON.RowsCount = 0 + sr.Frame.MetaEndFrameJSON.ErrorMsg = "" + + sr.Frame.PayloadChecksum = 0 +} + +// bytesToInt byte's array trans to int +func bytesToInt(b []byte, ret interface{}) { + binBuf := bytes.NewBuffer(b) + binary.Read(binBuf, binary.BigEndian, ret) +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_6.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_6.go new file mode 100644 index 0000000000..4fb8b1741b --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_6.go @@ -0,0 +1,41 @@ +// +build !go1.7 + +package oss + +import ( + "crypto/tls" + "net" + "net/http" + "time" +) + +func newTransport(conn *Conn, config *Config) *http.Transport { + httpTimeOut := conn.config.HTTPTimeout + httpMaxConns := conn.config.HTTPMaxConns + // New Transport + transport := &http.Transport{ + Dial: func(netw, addr string) (net.Conn, error) { + d := net.Dialer{ + Timeout: httpTimeOut.ConnectTimeout, + KeepAlive: 30 * time.Second, + } + if config.LocalAddr != nil { + d.LocalAddr = config.LocalAddr + } + conn, err := d.Dial(netw, addr) + if err != nil { + return nil, err + } + return newTimeoutConn(conn, httpTimeOut.ReadWriteTimeout, httpTimeOut.LongTimeout), nil + }, + MaxIdleConnsPerHost: httpMaxConns.MaxIdleConnsPerHost, + ResponseHeaderTimeout: httpTimeOut.HeaderTimeout, + } + + if config.InsecureSkipVerify { + transport.TLSClientConfig = &tls.Config{ + InsecureSkipVerify: true, + } + } + return transport +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_7.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_7.go new file mode 100644 index 0000000000..2fae124e8e --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_7.go @@ -0,0 +1,43 @@ +// +build go1.7 + +package oss + +import ( + "crypto/tls" + "net" + "net/http" + "time" +) + +func newTransport(conn *Conn, config *Config) *http.Transport { + httpTimeOut := conn.config.HTTPTimeout + httpMaxConns := conn.config.HTTPMaxConns + // New Transport + transport := &http.Transport{ + Dial: func(netw, addr string) (net.Conn, error) { + d := net.Dialer{ + Timeout: httpTimeOut.ConnectTimeout, + KeepAlive: 30 * time.Second, + } + if config.LocalAddr != nil { + d.LocalAddr = config.LocalAddr + } + conn, err := d.Dial(netw, addr) + if err != nil { + return nil, err + } + return newTimeoutConn(conn, httpTimeOut.ReadWriteTimeout, httpTimeOut.LongTimeout), nil + }, + MaxIdleConns: httpMaxConns.MaxIdleConns, + MaxIdleConnsPerHost: httpMaxConns.MaxIdleConnsPerHost, + IdleConnTimeout: httpTimeOut.IdleConnTimeout, + ResponseHeaderTimeout: httpTimeOut.HeaderTimeout, + } + + if config.InsecureSkipVerify { + transport.TLSClientConfig = &tls.Config{ + InsecureSkipVerify: true, + } + } + return transport +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/type.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/type.go new file mode 100644 index 0000000000..73d54c5f22 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/type.go @@ -0,0 +1,1262 @@ +package oss + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "net/url" + "time" +) + +// ListBucketsResult defines the result object from ListBuckets request +type ListBucketsResult struct { + XMLName xml.Name `xml:"ListAllMyBucketsResult"` + Prefix string `xml:"Prefix"` // The prefix in this query + Marker string `xml:"Marker"` // The marker filter + MaxKeys int `xml:"MaxKeys"` // The max entry count to return. This information is returned when IsTruncated is true. + IsTruncated bool `xml:"IsTruncated"` // Flag true means there's remaining buckets to return. + NextMarker string `xml:"NextMarker"` // The marker filter for the next list call + Owner Owner `xml:"Owner"` // The owner information + Buckets []BucketProperties `xml:"Buckets>Bucket"` // The bucket list +} + +// BucketProperties defines bucket properties +type BucketProperties struct { + XMLName xml.Name `xml:"Bucket"` + Name string `xml:"Name"` // Bucket name + Location string `xml:"Location"` // Bucket datacenter + CreationDate time.Time `xml:"CreationDate"` // Bucket create time + StorageClass string `xml:"StorageClass"` // Bucket storage class +} + +// GetBucketACLResult defines GetBucketACL request's result +type GetBucketACLResult struct { + XMLName xml.Name `xml:"AccessControlPolicy"` + ACL string `xml:"AccessControlList>Grant"` // Bucket ACL + Owner Owner `xml:"Owner"` // Bucket owner +} + +// LifecycleConfiguration is the Bucket Lifecycle configuration +type LifecycleConfiguration struct { + XMLName xml.Name `xml:"LifecycleConfiguration"` + Rules []LifecycleRule `xml:"Rule"` +} + +// LifecycleRule defines Lifecycle rules +type LifecycleRule struct { + XMLName xml.Name `xml:"Rule"` + ID string `xml:"ID,omitempty"` // The rule ID + Prefix string `xml:"Prefix"` // The object key prefix + Status string `xml:"Status"` // The rule status (enabled or not) + Tags []Tag `xml:"Tag,omitempty"` // the tags property + Expiration *LifecycleExpiration `xml:"Expiration,omitempty"` // The expiration property + Transitions []LifecycleTransition `xml:"Transition,omitempty"` // The transition property + AbortMultipartUpload *LifecycleAbortMultipartUpload `xml:"AbortMultipartUpload,omitempty"` // The AbortMultipartUpload property + NonVersionExpiration *LifecycleVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty"` + // Deprecated: Use NonVersionTransitions instead. + NonVersionTransition *LifecycleVersionTransition `xml:"-"` // NonVersionTransition is not suggested to use + NonVersionTransitions []LifecycleVersionTransition `xml:"NoncurrentVersionTransition,omitempty"` +} + +// LifecycleExpiration defines the rule's expiration property +type LifecycleExpiration struct { + XMLName xml.Name `xml:"Expiration"` + Days int `xml:"Days,omitempty"` // Relative expiration time: The expiration time in days after the last modified time + Date string `xml:"Date,omitempty"` // Absolute expiration time: The expiration time in date, not recommended + CreatedBeforeDate string `xml:"CreatedBeforeDate,omitempty"` // objects created before the date will be expired + ExpiredObjectDeleteMarker *bool `xml:"ExpiredObjectDeleteMarker,omitempty"` // Specifies whether the expired delete tag is automatically deleted +} + +// LifecycleTransition defines the rule's transition propery +type LifecycleTransition struct { + XMLName xml.Name `xml:"Transition"` + Days int `xml:"Days,omitempty"` // Relative transition time: The transition time in days after the last modified time + CreatedBeforeDate string `xml:"CreatedBeforeDate,omitempty"` // objects created before the date will be expired + StorageClass StorageClassType `xml:"StorageClass,omitempty"` // Specifies the target storage type +} + +// LifecycleAbortMultipartUpload defines the rule's abort multipart upload propery +type LifecycleAbortMultipartUpload struct { + XMLName xml.Name `xml:"AbortMultipartUpload"` + Days int `xml:"Days,omitempty"` // Relative expiration time: The expiration time in days after the last modified time + CreatedBeforeDate string `xml:"CreatedBeforeDate,omitempty"` // objects created before the date will be expired +} + +// LifecycleVersionExpiration defines the rule's NoncurrentVersionExpiration propery +type LifecycleVersionExpiration struct { + XMLName xml.Name `xml:"NoncurrentVersionExpiration"` + NoncurrentDays int `xml:"NoncurrentDays,omitempty"` // How many days after the Object becomes a non-current version +} + +// LifecycleVersionTransition defines the rule's NoncurrentVersionTransition propery +type LifecycleVersionTransition struct { + XMLName xml.Name `xml:"NoncurrentVersionTransition"` + NoncurrentDays int `xml:"NoncurrentDays,omitempty"` // How many days after the Object becomes a non-current version + StorageClass StorageClassType `xml:"StorageClass,omitempty"` +} + +const iso8601DateFormat = "2006-01-02T15:04:05.000Z" + +// BuildLifecycleRuleByDays builds a lifecycle rule objects will expiration in days after the last modified time +func BuildLifecycleRuleByDays(id, prefix string, status bool, days int) LifecycleRule { + var statusStr = "Enabled" + if !status { + statusStr = "Disabled" + } + return LifecycleRule{ID: id, Prefix: prefix, Status: statusStr, + Expiration: &LifecycleExpiration{Days: days}} +} + +// BuildLifecycleRuleByDate builds a lifecycle rule objects will expiration in specified date +func BuildLifecycleRuleByDate(id, prefix string, status bool, year, month, day int) LifecycleRule { + var statusStr = "Enabled" + if !status { + statusStr = "Disabled" + } + date := time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC).Format(iso8601DateFormat) + return LifecycleRule{ID: id, Prefix: prefix, Status: statusStr, + Expiration: &LifecycleExpiration{Date: date}} +} + +// ValidateLifecycleRule Determine if a lifecycle rule is valid, if it is invalid, it will return an error. +func verifyLifecycleRules(rules []LifecycleRule) error { + if len(rules) == 0 { + return fmt.Errorf("invalid rules, the length of rules is zero") + } + for k, rule := range rules { + if rule.Status != "Enabled" && rule.Status != "Disabled" { + return fmt.Errorf("invalid rule, the value of status must be Enabled or Disabled") + } + + abortMPU := rule.AbortMultipartUpload + if abortMPU != nil { + if (abortMPU.Days != 0 && abortMPU.CreatedBeforeDate != "") || (abortMPU.Days == 0 && abortMPU.CreatedBeforeDate == "") { + return fmt.Errorf("invalid abort multipart upload lifecycle, must be set one of CreatedBeforeDate and Days") + } + } + + transitions := rule.Transitions + if len(transitions) > 0 { + for _, transition := range transitions { + if (transition.Days != 0 && transition.CreatedBeforeDate != "") || (transition.Days == 0 && transition.CreatedBeforeDate == "") { + return fmt.Errorf("invalid transition lifecycle, must be set one of CreatedBeforeDate and Days") + } + } + } + + // NonVersionTransition is not suggested to use + // to keep compatible + if rule.NonVersionTransition != nil && len(rule.NonVersionTransitions) > 0 { + return fmt.Errorf("NonVersionTransition and NonVersionTransitions cannot both have values") + } else if rule.NonVersionTransition != nil { + rules[k].NonVersionTransitions = append(rules[k].NonVersionTransitions, *rule.NonVersionTransition) + } + } + + return nil +} + +// GetBucketLifecycleResult defines GetBucketLifecycle's result object +type GetBucketLifecycleResult LifecycleConfiguration + +// RefererXML defines Referer configuration +type RefererXML struct { + XMLName xml.Name `xml:"RefererConfiguration"` + AllowEmptyReferer bool `xml:"AllowEmptyReferer"` // Allow empty referrer + RefererList []string `xml:"RefererList>Referer"` // Referer whitelist +} + +// GetBucketRefererResult defines result object for GetBucketReferer request +type GetBucketRefererResult RefererXML + +// LoggingXML defines logging configuration +type LoggingXML struct { + XMLName xml.Name `xml:"BucketLoggingStatus"` + LoggingEnabled LoggingEnabled `xml:"LoggingEnabled"` // The logging configuration information +} + +type loggingXMLEmpty struct { + XMLName xml.Name `xml:"BucketLoggingStatus"` +} + +// LoggingEnabled defines the logging configuration information +type LoggingEnabled struct { + XMLName xml.Name `xml:"LoggingEnabled"` + TargetBucket string `xml:"TargetBucket"` // The bucket name for storing the log files + TargetPrefix string `xml:"TargetPrefix"` // The log file prefix +} + +// GetBucketLoggingResult defines the result from GetBucketLogging request +type GetBucketLoggingResult LoggingXML + +// WebsiteXML defines Website configuration +type WebsiteXML struct { + XMLName xml.Name `xml:"WebsiteConfiguration"` + IndexDocument IndexDocument `xml:"IndexDocument,omitempty"` // The index page + ErrorDocument ErrorDocument `xml:"ErrorDocument,omitempty"` // The error page + RoutingRules []RoutingRule `xml:"RoutingRules>RoutingRule,omitempty"` // The routing Rule list +} + +// IndexDocument defines the index page info +type IndexDocument struct { + XMLName xml.Name `xml:"IndexDocument"` + Suffix string `xml:"Suffix"` // The file name for the index page +} + +// ErrorDocument defines the 404 error page info +type ErrorDocument struct { + XMLName xml.Name `xml:"ErrorDocument"` + Key string `xml:"Key"` // 404 error file name +} + +// RoutingRule defines the routing rules +type RoutingRule struct { + XMLName xml.Name `xml:"RoutingRule"` + RuleNumber int `xml:"RuleNumber,omitempty"` // The routing number + Condition Condition `xml:"Condition,omitempty"` // The routing condition + Redirect Redirect `xml:"Redirect,omitempty"` // The routing redirect + +} + +// Condition defines codition in the RoutingRule +type Condition struct { + XMLName xml.Name `xml:"Condition"` + KeyPrefixEquals string `xml:"KeyPrefixEquals,omitempty"` // Matching objcet prefix + HTTPErrorCodeReturnedEquals int `xml:"HttpErrorCodeReturnedEquals,omitempty"` // The rule is for Accessing to the specified object + IncludeHeader []IncludeHeader `xml:"IncludeHeader"` // The rule is for request which include header +} + +// IncludeHeader defines includeHeader in the RoutingRule's Condition +type IncludeHeader struct { + XMLName xml.Name `xml:"IncludeHeader"` + Key string `xml:"Key,omitempty"` // The Include header key + Equals string `xml:"Equals,omitempty"` // The Include header value +} + +// Redirect defines redirect in the RoutingRule +type Redirect struct { + XMLName xml.Name `xml:"Redirect"` + RedirectType string `xml:"RedirectType,omitempty"` // The redirect type, it have Mirror,External,Internal,AliCDN + PassQueryString *bool `xml:"PassQueryString"` // Whether to send the specified request's parameters, true or false + MirrorURL string `xml:"MirrorURL,omitempty"` // Mirror of the website address back to the source. + MirrorPassQueryString *bool `xml:"MirrorPassQueryString"` // To Mirror of the website Whether to send the specified request's parameters, true or false + MirrorFollowRedirect *bool `xml:"MirrorFollowRedirect"` // Redirect the location, if the mirror return 3XX + MirrorCheckMd5 *bool `xml:"MirrorCheckMd5"` // Check the mirror is MD5. + MirrorHeaders MirrorHeaders `xml:"MirrorHeaders,omitempty"` // Mirror headers + Protocol string `xml:"Protocol,omitempty"` // The redirect Protocol + HostName string `xml:"HostName,omitempty"` // The redirect HostName + ReplaceKeyPrefixWith string `xml:"ReplaceKeyPrefixWith,omitempty"` // object name'Prefix replace the value + HttpRedirectCode int `xml:"HttpRedirectCode,omitempty"` // THe redirect http code + ReplaceKeyWith string `xml:"ReplaceKeyWith,omitempty"` // object name replace the value +} + +// MirrorHeaders defines MirrorHeaders in the Redirect +type MirrorHeaders struct { + XMLName xml.Name `xml:"MirrorHeaders"` + PassAll *bool `xml:"PassAll"` // Penetrating all of headers to source website. + Pass []string `xml:"Pass"` // Penetrating some of headers to source website. + Remove []string `xml:"Remove"` // Prohibit passthrough some of headers to source website + Set []MirrorHeaderSet `xml:"Set"` // Setting some of headers send to source website +} + +// MirrorHeaderSet defines Set for Redirect's MirrorHeaders +type MirrorHeaderSet struct { + XMLName xml.Name `xml:"Set"` + Key string `xml:"Key,omitempty"` // The mirror header key + Value string `xml:"Value,omitempty"` // The mirror header value +} + +// GetBucketWebsiteResult defines the result from GetBucketWebsite request. +type GetBucketWebsiteResult WebsiteXML + +// CORSXML defines CORS configuration +type CORSXML struct { + XMLName xml.Name `xml:"CORSConfiguration"` + CORSRules []CORSRule `xml:"CORSRule"` // CORS rules +} + +// CORSRule defines CORS rules +type CORSRule struct { + XMLName xml.Name `xml:"CORSRule"` + AllowedOrigin []string `xml:"AllowedOrigin"` // Allowed origins. By default it's wildcard '*' + AllowedMethod []string `xml:"AllowedMethod"` // Allowed methods + AllowedHeader []string `xml:"AllowedHeader"` // Allowed headers + ExposeHeader []string `xml:"ExposeHeader"` // Allowed response headers + MaxAgeSeconds int `xml:"MaxAgeSeconds"` // Max cache ages in seconds +} + +// GetBucketCORSResult defines the result from GetBucketCORS request. +type GetBucketCORSResult CORSXML + +// GetBucketInfoResult defines the result from GetBucketInfo request. +type GetBucketInfoResult struct { + XMLName xml.Name `xml:"BucketInfo"` + BucketInfo BucketInfo `xml:"Bucket"` +} + +// BucketInfo defines Bucket information +type BucketInfo struct { + XMLName xml.Name `xml:"Bucket"` + Name string `xml:"Name"` // Bucket name + Location string `xml:"Location"` // Bucket datacenter + CreationDate time.Time `xml:"CreationDate"` // Bucket creation time + ExtranetEndpoint string `xml:"ExtranetEndpoint"` // Bucket external endpoint + IntranetEndpoint string `xml:"IntranetEndpoint"` // Bucket internal endpoint + ACL string `xml:"AccessControlList>Grant"` // Bucket ACL + RedundancyType string `xml:"DataRedundancyType"` // Bucket DataRedundancyType + Owner Owner `xml:"Owner"` // Bucket owner + StorageClass string `xml:"StorageClass"` // Bucket storage class + SseRule SSERule `xml:"ServerSideEncryptionRule"` // Bucket ServerSideEncryptionRule + Versioning string `xml:"Versioning"` // Bucket Versioning + TransferAcceleration string `xml:"TransferAcceleration"` // bucket TransferAcceleration + CrossRegionReplication string `xml:"CrossRegionReplication"` // bucket CrossRegionReplication +} + +type SSERule struct { + XMLName xml.Name `xml:"ServerSideEncryptionRule"` // Bucket ServerSideEncryptionRule + KMSMasterKeyID string `xml:"KMSMasterKeyID,omitempty"` // Bucket KMSMasterKeyID + SSEAlgorithm string `xml:"SSEAlgorithm,omitempty"` // Bucket SSEAlgorithm + KMSDataEncryption string `xml:"KMSDataEncryption,omitempty"` //Bucket KMSDataEncryption +} + +// ListObjectsResult defines the result from ListObjects request +type ListObjectsResult struct { + XMLName xml.Name `xml:"ListBucketResult"` + Prefix string `xml:"Prefix"` // The object prefix + Marker string `xml:"Marker"` // The marker filter. + MaxKeys int `xml:"MaxKeys"` // Max keys to return + Delimiter string `xml:"Delimiter"` // The delimiter for grouping objects' name + IsTruncated bool `xml:"IsTruncated"` // Flag indicates if all results are returned (when it's false) + NextMarker string `xml:"NextMarker"` // The start point of the next query + Objects []ObjectProperties `xml:"Contents"` // Object list + CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` // You can think of commonprefixes as "folders" whose names end with the delimiter +} + +// ObjectProperties defines Objecct properties +type ObjectProperties struct { + XMLName xml.Name `xml:"Contents"` + Key string `xml:"Key"` // Object key + Type string `xml:"Type"` // Object type + Size int64 `xml:"Size"` // Object size + ETag string `xml:"ETag"` // Object ETag + Owner Owner `xml:"Owner"` // Object owner information + LastModified time.Time `xml:"LastModified"` // Object last modified time + StorageClass string `xml:"StorageClass"` // Object storage class (Standard, IA, Archive) +} + +// ListObjectsResultV2 defines the result from ListObjectsV2 request +type ListObjectsResultV2 struct { + XMLName xml.Name `xml:"ListBucketResult"` + Prefix string `xml:"Prefix"` // The object prefix + StartAfter string `xml:"StartAfter"` // the input StartAfter + ContinuationToken string `xml:"ContinuationToken"` // the input ContinuationToken + MaxKeys int `xml:"MaxKeys"` // Max keys to return + Delimiter string `xml:"Delimiter"` // The delimiter for grouping objects' name + IsTruncated bool `xml:"IsTruncated"` // Flag indicates if all results are returned (when it's false) + NextContinuationToken string `xml:"NextContinuationToken"` // The start point of the next NextContinuationToken + Objects []ObjectProperties `xml:"Contents"` // Object list + CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` // You can think of commonprefixes as "folders" whose names end with the delimiter +} + +// ListObjectVersionsResult defines the result from ListObjectVersions request +type ListObjectVersionsResult struct { + XMLName xml.Name `xml:"ListVersionsResult"` + Name string `xml:"Name"` // The Bucket Name + Owner Owner `xml:"Owner"` // The owner of bucket + Prefix string `xml:"Prefix"` // The object prefix + KeyMarker string `xml:"KeyMarker"` // The start marker filter. + VersionIdMarker string `xml:"VersionIdMarker"` // The start VersionIdMarker filter. + MaxKeys int `xml:"MaxKeys"` // Max keys to return + Delimiter string `xml:"Delimiter"` // The delimiter for grouping objects' name + IsTruncated bool `xml:"IsTruncated"` // Flag indicates if all results are returned (when it's false) + NextKeyMarker string `xml:"NextKeyMarker"` // The start point of the next query + NextVersionIdMarker string `xml:"NextVersionIdMarker"` // The start point of the next query + CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` // You can think of commonprefixes as "folders" whose names end with the delimiter + ObjectDeleteMarkers []ObjectDeleteMarkerProperties `xml:"DeleteMarker"` // DeleteMarker list + ObjectVersions []ObjectVersionProperties `xml:"Version"` // version list +} + +type ObjectDeleteMarkerProperties struct { + XMLName xml.Name `xml:"DeleteMarker"` + Key string `xml:"Key"` // The Object Key + VersionId string `xml:"VersionId"` // The Object VersionId + IsLatest bool `xml:"IsLatest"` // is current version or not + LastModified time.Time `xml:"LastModified"` // Object last modified time + Owner Owner `xml:"Owner"` // bucket owner element +} + +type ObjectVersionProperties struct { + XMLName xml.Name `xml:"Version"` + Key string `xml:"Key"` // The Object Key + VersionId string `xml:"VersionId"` // The Object VersionId + IsLatest bool `xml:"IsLatest"` // is latest version or not + LastModified time.Time `xml:"LastModified"` // Object last modified time + Type string `xml:"Type"` // Object type + Size int64 `xml:"Size"` // Object size + ETag string `xml:"ETag"` // Object ETag + StorageClass string `xml:"StorageClass"` // Object storage class (Standard, IA, Archive) + Owner Owner `xml:"Owner"` // bucket owner element +} + +// Owner defines Bucket/Object's owner +type Owner struct { + XMLName xml.Name `xml:"Owner"` + ID string `xml:"ID"` // Owner ID + DisplayName string `xml:"DisplayName"` // Owner's display name +} + +// CopyObjectResult defines result object of CopyObject +type CopyObjectResult struct { + XMLName xml.Name `xml:"CopyObjectResult"` + LastModified time.Time `xml:"LastModified"` // New object's last modified time. + ETag string `xml:"ETag"` // New object's ETag +} + +// GetObjectACLResult defines result of GetObjectACL request +type GetObjectACLResult GetBucketACLResult + +type deleteXML struct { + XMLName xml.Name `xml:"Delete"` + Objects []DeleteObject `xml:"Object"` // Objects to delete + Quiet bool `xml:"Quiet"` // Flag of quiet mode. +} + +// DeleteObject defines the struct for deleting object +type DeleteObject struct { + XMLName xml.Name `xml:"Object"` + Key string `xml:"Key"` // Object name + VersionId string `xml:"VersionId,omitempty"` // Object VersionId +} + +// DeleteObjectsResult defines result of DeleteObjects request +type DeleteObjectsResult struct { + XMLName xml.Name + DeletedObjects []string // Deleted object key list +} + +// DeleteObjectsResult_inner defines result of DeleteObjects request +type DeleteObjectVersionsResult struct { + XMLName xml.Name `xml:"DeleteResult"` + DeletedObjectsDetail []DeletedKeyInfo `xml:"Deleted"` // Deleted object detail info +} + +// DeleteKeyInfo defines object delete info +type DeletedKeyInfo struct { + XMLName xml.Name `xml:"Deleted"` + Key string `xml:"Key"` // Object key + VersionId string `xml:"VersionId"` // VersionId + DeleteMarker bool `xml:"DeleteMarker"` // Object DeleteMarker + DeleteMarkerVersionId string `xml:"DeleteMarkerVersionId"` // Object DeleteMarkerVersionId +} + +// InitiateMultipartUploadResult defines result of InitiateMultipartUpload request +type InitiateMultipartUploadResult struct { + XMLName xml.Name `xml:"InitiateMultipartUploadResult"` + Bucket string `xml:"Bucket"` // Bucket name + Key string `xml:"Key"` // Object name to upload + UploadID string `xml:"UploadId"` // Generated UploadId +} + +// UploadPart defines the upload/copy part +type UploadPart struct { + XMLName xml.Name `xml:"Part"` + PartNumber int `xml:"PartNumber"` // Part number + ETag string `xml:"ETag"` // ETag value of the part's data +} + +type UploadParts []UploadPart + +func (slice UploadParts) Len() int { + return len(slice) +} + +func (slice UploadParts) Less(i, j int) bool { + return slice[i].PartNumber < slice[j].PartNumber +} + +func (slice UploadParts) Swap(i, j int) { + slice[i], slice[j] = slice[j], slice[i] +} + +// UploadPartCopyResult defines result object of multipart copy request. +type UploadPartCopyResult struct { + XMLName xml.Name `xml:"CopyPartResult"` + LastModified time.Time `xml:"LastModified"` // Last modified time + ETag string `xml:"ETag"` // ETag +} + +type completeMultipartUploadXML struct { + XMLName xml.Name `xml:"CompleteMultipartUpload"` + Part []UploadPart `xml:"Part"` +} + +// CompleteMultipartUploadResult defines result object of CompleteMultipartUploadRequest +type CompleteMultipartUploadResult struct { + XMLName xml.Name `xml:"CompleteMultipartUploadResult"` + Location string `xml:"Location"` // Object URL + Bucket string `xml:"Bucket"` // Bucket name + ETag string `xml:"ETag"` // Object ETag + Key string `xml:"Key"` // Object name +} + +// ListUploadedPartsResult defines result object of ListUploadedParts +type ListUploadedPartsResult struct { + XMLName xml.Name `xml:"ListPartsResult"` + Bucket string `xml:"Bucket"` // Bucket name + Key string `xml:"Key"` // Object name + UploadID string `xml:"UploadId"` // Upload ID + NextPartNumberMarker string `xml:"NextPartNumberMarker"` // Next part number + MaxParts int `xml:"MaxParts"` // Max parts count + IsTruncated bool `xml:"IsTruncated"` // Flag indicates all entries returned.false: all entries returned. + UploadedParts []UploadedPart `xml:"Part"` // Uploaded parts +} + +// UploadedPart defines uploaded part +type UploadedPart struct { + XMLName xml.Name `xml:"Part"` + PartNumber int `xml:"PartNumber"` // Part number + LastModified time.Time `xml:"LastModified"` // Last modified time + ETag string `xml:"ETag"` // ETag cache + Size int `xml:"Size"` // Part size +} + +// ListMultipartUploadResult defines result object of ListMultipartUpload +type ListMultipartUploadResult struct { + XMLName xml.Name `xml:"ListMultipartUploadsResult"` + Bucket string `xml:"Bucket"` // Bucket name + Delimiter string `xml:"Delimiter"` // Delimiter for grouping object. + Prefix string `xml:"Prefix"` // Object prefix + KeyMarker string `xml:"KeyMarker"` // Object key marker + UploadIDMarker string `xml:"UploadIdMarker"` // UploadId marker + NextKeyMarker string `xml:"NextKeyMarker"` // Next key marker, if not all entries returned. + NextUploadIDMarker string `xml:"NextUploadIdMarker"` // Next uploadId marker, if not all entries returned. + MaxUploads int `xml:"MaxUploads"` // Max uploads to return + IsTruncated bool `xml:"IsTruncated"` // Flag indicates all entries are returned. + Uploads []UncompletedUpload `xml:"Upload"` // Ongoing uploads (not completed, not aborted) + CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` // Common prefixes list. +} + +// UncompletedUpload structure wraps an uncompleted upload task +type UncompletedUpload struct { + XMLName xml.Name `xml:"Upload"` + Key string `xml:"Key"` // Object name + UploadID string `xml:"UploadId"` // The UploadId + Initiated time.Time `xml:"Initiated"` // Initialization time in the format such as 2012-02-23T04:18:23.000Z +} + +// ProcessObjectResult defines result object of ProcessObject +type ProcessObjectResult struct { + Bucket string `json:"bucket"` + FileSize int `json:"fileSize"` + Object string `json:"object"` + Status string `json:"status"` +} + +// decodeDeleteObjectsResult decodes deleting objects result in URL encoding +func decodeDeleteObjectsResult(result *DeleteObjectVersionsResult) error { + var err error + for i := 0; i < len(result.DeletedObjectsDetail); i++ { + result.DeletedObjectsDetail[i].Key, err = url.QueryUnescape(result.DeletedObjectsDetail[i].Key) + if err != nil { + return err + } + } + return nil +} + +// decodeListObjectsResult decodes list objects result in URL encoding +func decodeListObjectsResult(result *ListObjectsResult) error { + var err error + result.Prefix, err = url.QueryUnescape(result.Prefix) + if err != nil { + return err + } + result.Marker, err = url.QueryUnescape(result.Marker) + if err != nil { + return err + } + result.Delimiter, err = url.QueryUnescape(result.Delimiter) + if err != nil { + return err + } + result.NextMarker, err = url.QueryUnescape(result.NextMarker) + if err != nil { + return err + } + for i := 0; i < len(result.Objects); i++ { + result.Objects[i].Key, err = url.QueryUnescape(result.Objects[i].Key) + if err != nil { + return err + } + } + for i := 0; i < len(result.CommonPrefixes); i++ { + result.CommonPrefixes[i], err = url.QueryUnescape(result.CommonPrefixes[i]) + if err != nil { + return err + } + } + return nil +} + +// decodeListObjectsResult decodes list objects result in URL encoding +func decodeListObjectsResultV2(result *ListObjectsResultV2) error { + var err error + result.Prefix, err = url.QueryUnescape(result.Prefix) + if err != nil { + return err + } + result.StartAfter, err = url.QueryUnescape(result.StartAfter) + if err != nil { + return err + } + result.Delimiter, err = url.QueryUnescape(result.Delimiter) + if err != nil { + return err + } + result.NextContinuationToken, err = url.QueryUnescape(result.NextContinuationToken) + if err != nil { + return err + } + for i := 0; i < len(result.Objects); i++ { + result.Objects[i].Key, err = url.QueryUnescape(result.Objects[i].Key) + if err != nil { + return err + } + } + for i := 0; i < len(result.CommonPrefixes); i++ { + result.CommonPrefixes[i], err = url.QueryUnescape(result.CommonPrefixes[i]) + if err != nil { + return err + } + } + return nil +} + +// decodeListObjectVersionsResult decodes list version objects result in URL encoding +func decodeListObjectVersionsResult(result *ListObjectVersionsResult) error { + var err error + + // decode:Delimiter + result.Delimiter, err = url.QueryUnescape(result.Delimiter) + if err != nil { + return err + } + + // decode Prefix + result.Prefix, err = url.QueryUnescape(result.Prefix) + if err != nil { + return err + } + + // decode KeyMarker + result.KeyMarker, err = url.QueryUnescape(result.KeyMarker) + if err != nil { + return err + } + + // decode VersionIdMarker + result.VersionIdMarker, err = url.QueryUnescape(result.VersionIdMarker) + if err != nil { + return err + } + + // decode NextKeyMarker + result.NextKeyMarker, err = url.QueryUnescape(result.NextKeyMarker) + if err != nil { + return err + } + + // decode NextVersionIdMarker + result.NextVersionIdMarker, err = url.QueryUnescape(result.NextVersionIdMarker) + if err != nil { + return err + } + + // decode CommonPrefixes + for i := 0; i < len(result.CommonPrefixes); i++ { + result.CommonPrefixes[i], err = url.QueryUnescape(result.CommonPrefixes[i]) + if err != nil { + return err + } + } + + // decode deleteMarker + for i := 0; i < len(result.ObjectDeleteMarkers); i++ { + result.ObjectDeleteMarkers[i].Key, err = url.QueryUnescape(result.ObjectDeleteMarkers[i].Key) + if err != nil { + return err + } + } + + // decode ObjectVersions + for i := 0; i < len(result.ObjectVersions); i++ { + result.ObjectVersions[i].Key, err = url.QueryUnescape(result.ObjectVersions[i].Key) + if err != nil { + return err + } + } + + return nil +} + +// decodeListUploadedPartsResult decodes +func decodeListUploadedPartsResult(result *ListUploadedPartsResult) error { + var err error + result.Key, err = url.QueryUnescape(result.Key) + if err != nil { + return err + } + return nil +} + +// decodeListMultipartUploadResult decodes list multipart upload result in URL encoding +func decodeListMultipartUploadResult(result *ListMultipartUploadResult) error { + var err error + result.Prefix, err = url.QueryUnescape(result.Prefix) + if err != nil { + return err + } + result.Delimiter, err = url.QueryUnescape(result.Delimiter) + if err != nil { + return err + } + result.KeyMarker, err = url.QueryUnescape(result.KeyMarker) + if err != nil { + return err + } + result.NextKeyMarker, err = url.QueryUnescape(result.NextKeyMarker) + if err != nil { + return err + } + for i := 0; i < len(result.Uploads); i++ { + result.Uploads[i].Key, err = url.QueryUnescape(result.Uploads[i].Key) + if err != nil { + return err + } + } + for i := 0; i < len(result.CommonPrefixes); i++ { + result.CommonPrefixes[i], err = url.QueryUnescape(result.CommonPrefixes[i]) + if err != nil { + return err + } + } + return nil +} + +// createBucketConfiguration defines the configuration for creating a bucket. +type createBucketConfiguration struct { + XMLName xml.Name `xml:"CreateBucketConfiguration"` + StorageClass StorageClassType `xml:"StorageClass,omitempty"` + DataRedundancyType DataRedundancyType `xml:"DataRedundancyType,omitempty"` + ObjectHashFunction ObjecthashFuncType `xml:"ObjectHashFunction,omitempty"` +} + +// LiveChannelConfiguration defines the configuration for live-channel +type LiveChannelConfiguration struct { + XMLName xml.Name `xml:"LiveChannelConfiguration"` + Description string `xml:"Description,omitempty"` //Description of live-channel, up to 128 bytes + Status string `xml:"Status,omitempty"` //Specify the status of livechannel + Target LiveChannelTarget `xml:"Target"` //target configuration of live-channel + // use point instead of struct to avoid omit empty snapshot + Snapshot *LiveChannelSnapshot `xml:"Snapshot,omitempty"` //snapshot configuration of live-channel +} + +// LiveChannelTarget target configuration of live-channel +type LiveChannelTarget struct { + XMLName xml.Name `xml:"Target"` + Type string `xml:"Type"` //the type of object, only supports HLS + FragDuration int `xml:"FragDuration,omitempty"` //the length of each ts object (in seconds), in the range [1,100] + FragCount int `xml:"FragCount,omitempty"` //the number of ts objects in the m3u8 object, in the range of [1,100] + PlaylistName string `xml:"PlaylistName,omitempty"` //the name of m3u8 object, which must end with ".m3u8" and the length range is [6,128] +} + +// LiveChannelSnapshot snapshot configuration of live-channel +type LiveChannelSnapshot struct { + XMLName xml.Name `xml:"Snapshot"` + RoleName string `xml:"RoleName,omitempty"` //The role of snapshot operations, it sholud has write permission of DestBucket and the permission to send messages to the NotifyTopic. + DestBucket string `xml:"DestBucket,omitempty"` //Bucket the snapshots will be written to. should be the same owner as the source bucket. + NotifyTopic string `xml:"NotifyTopic,omitempty"` //Topics of MNS for notifying users of high frequency screenshot operation results + Interval int `xml:"Interval,omitempty"` //interval of snapshots, threre is no snapshot if no I-frame during the interval time +} + +// CreateLiveChannelResult the result of crete live-channel +type CreateLiveChannelResult struct { + XMLName xml.Name `xml:"CreateLiveChannelResult"` + PublishUrls []string `xml:"PublishUrls>Url"` //push urls list + PlayUrls []string `xml:"PlayUrls>Url"` //play urls list +} + +// LiveChannelStat the result of get live-channel state +type LiveChannelStat struct { + XMLName xml.Name `xml:"LiveChannelStat"` + Status string `xml:"Status"` //Current push status of live-channel: Disabled,Live,Idle + ConnectedTime time.Time `xml:"ConnectedTime"` //The time when the client starts pushing, format: ISO8601 + RemoteAddr string `xml:"RemoteAddr"` //The ip address of the client + Video LiveChannelVideo `xml:"Video"` //Video stream information + Audio LiveChannelAudio `xml:"Audio"` //Audio stream information +} + +// LiveChannelVideo video stream information +type LiveChannelVideo struct { + XMLName xml.Name `xml:"Video"` + Width int `xml:"Width"` //Width (unit: pixels) + Height int `xml:"Height"` //Height (unit: pixels) + FrameRate int `xml:"FrameRate"` //FramRate + Bandwidth int `xml:"Bandwidth"` //Bandwidth (unit: B/s) +} + +// LiveChannelAudio audio stream information +type LiveChannelAudio struct { + XMLName xml.Name `xml:"Audio"` + SampleRate int `xml:"SampleRate"` //SampleRate + Bandwidth int `xml:"Bandwidth"` //Bandwidth (unit: B/s) + Codec string `xml:"Codec"` //Encoding forma +} + +// LiveChannelHistory the result of GetLiveChannelHistory, at most return up to lastest 10 push records +type LiveChannelHistory struct { + XMLName xml.Name `xml:"LiveChannelHistory"` + Record []LiveRecord `xml:"LiveRecord"` //push records list +} + +// LiveRecord push recode +type LiveRecord struct { + XMLName xml.Name `xml:"LiveRecord"` + StartTime time.Time `xml:"StartTime"` //StartTime, format: ISO8601 + EndTime time.Time `xml:"EndTime"` //EndTime, format: ISO8601 + RemoteAddr string `xml:"RemoteAddr"` //The ip address of remote client +} + +// ListLiveChannelResult the result of ListLiveChannel +type ListLiveChannelResult struct { + XMLName xml.Name `xml:"ListLiveChannelResult"` + Prefix string `xml:"Prefix"` //Filter by the name start with the value of "Prefix" + Marker string `xml:"Marker"` //cursor from which starting list + MaxKeys int `xml:"MaxKeys"` //The maximum count returned. the default value is 100. it cannot be greater than 1000. + IsTruncated bool `xml:"IsTruncated"` //Indicates whether all results have been returned, "true" indicates partial results returned while "false" indicates all results have been returned + NextMarker string `xml:"NextMarker"` //NextMarker indicate the Marker value of the next request + LiveChannel []LiveChannelInfo `xml:"LiveChannel"` //The infomation of live-channel +} + +// LiveChannelInfo the infomation of live-channel +type LiveChannelInfo struct { + XMLName xml.Name `xml:"LiveChannel"` + Name string `xml:"Name"` //The name of live-channel + Description string `xml:"Description"` //Description of live-channel + Status string `xml:"Status"` //Status: disabled or enabled + LastModified time.Time `xml:"LastModified"` //Last modification time, format: ISO8601 + PublishUrls []string `xml:"PublishUrls>Url"` //push urls list + PlayUrls []string `xml:"PlayUrls>Url"` //play urls list +} + +// Tag a tag for the object +type Tag struct { + XMLName xml.Name `xml:"Tag"` + Key string `xml:"Key"` + Value string `xml:"Value"` +} + +// Tagging tagset for the object +type Tagging struct { + XMLName xml.Name `xml:"Tagging"` + Tags []Tag `xml:"TagSet>Tag,omitempty"` +} + +// for GetObjectTagging return value +type GetObjectTaggingResult Tagging + +// VersioningConfig for the bucket +type VersioningConfig struct { + XMLName xml.Name `xml:"VersioningConfiguration"` + Status string `xml:"Status"` +} + +type GetBucketVersioningResult VersioningConfig + +// Server Encryption rule for the bucket +type ServerEncryptionRule struct { + XMLName xml.Name `xml:"ServerSideEncryptionRule"` + SSEDefault SSEDefaultRule `xml:"ApplyServerSideEncryptionByDefault"` +} + +// Server Encryption deafult rule for the bucket +type SSEDefaultRule struct { + XMLName xml.Name `xml:"ApplyServerSideEncryptionByDefault"` + SSEAlgorithm string `xml:"SSEAlgorithm,omitempty"` + KMSMasterKeyID string `xml:"KMSMasterKeyID,omitempty"` + KMSDataEncryption string `xml:"KMSDataEncryption,,omitempty"` +} + +type GetBucketEncryptionResult ServerEncryptionRule +type GetBucketTaggingResult Tagging + +type BucketStat struct { + XMLName xml.Name `xml:"BucketStat"` + Storage int64 `xml:"Storage"` + ObjectCount int64 `xml:"ObjectCount"` + MultipartUploadCount int64 `xml:"MultipartUploadCount"` +} +type GetBucketStatResult BucketStat + +// RequestPaymentConfiguration define the request payment configuration +type RequestPaymentConfiguration struct { + XMLName xml.Name `xml:"RequestPaymentConfiguration"` + Payer string `xml:"Payer,omitempty"` +} + +// BucketQoSConfiguration define QoS configuration +type BucketQoSConfiguration struct { + XMLName xml.Name `xml:"QoSConfiguration"` + TotalUploadBandwidth *int `xml:"TotalUploadBandwidth"` // Total upload bandwidth + IntranetUploadBandwidth *int `xml:"IntranetUploadBandwidth"` // Intranet upload bandwidth + ExtranetUploadBandwidth *int `xml:"ExtranetUploadBandwidth"` // Extranet upload bandwidth + TotalDownloadBandwidth *int `xml:"TotalDownloadBandwidth"` // Total download bandwidth + IntranetDownloadBandwidth *int `xml:"IntranetDownloadBandwidth"` // Intranet download bandwidth + ExtranetDownloadBandwidth *int `xml:"ExtranetDownloadBandwidth"` // Extranet download bandwidth + TotalQPS *int `xml:"TotalQps"` // Total Qps + IntranetQPS *int `xml:"IntranetQps"` // Intranet Qps + ExtranetQPS *int `xml:"ExtranetQps"` // Extranet Qps +} + +// UserQoSConfiguration define QoS and Range configuration +type UserQoSConfiguration struct { + XMLName xml.Name `xml:"QoSConfiguration"` + Region string `xml:"Region,omitempty"` // Effective area of Qos configuration + BucketQoSConfiguration +} + +////////////////////////////////////////////////////////////// +/////////////////// Select OBject //////////////////////////// +////////////////////////////////////////////////////////////// + +type CsvMetaRequest struct { + XMLName xml.Name `xml:"CsvMetaRequest"` + InputSerialization InputSerialization `xml:"InputSerialization"` + OverwriteIfExists *bool `xml:"OverwriteIfExists,omitempty"` +} + +// encodeBase64 encode base64 of the CreateSelectObjectMeta api request params +func (meta *CsvMetaRequest) encodeBase64() { + meta.InputSerialization.CSV.RecordDelimiter = + base64.StdEncoding.EncodeToString([]byte(meta.InputSerialization.CSV.RecordDelimiter)) + meta.InputSerialization.CSV.FieldDelimiter = + base64.StdEncoding.EncodeToString([]byte(meta.InputSerialization.CSV.FieldDelimiter)) + meta.InputSerialization.CSV.QuoteCharacter = + base64.StdEncoding.EncodeToString([]byte(meta.InputSerialization.CSV.QuoteCharacter)) +} + +type JsonMetaRequest struct { + XMLName xml.Name `xml:"JsonMetaRequest"` + InputSerialization InputSerialization `xml:"InputSerialization"` + OverwriteIfExists *bool `xml:"OverwriteIfExists,omitempty"` +} + +type InputSerialization struct { + XMLName xml.Name `xml:"InputSerialization"` + CSV CSV `xml:CSV,omitempty` + JSON JSON `xml:JSON,omitempty` + CompressionType string `xml:"CompressionType,omitempty"` +} +type CSV struct { + XMLName xml.Name `xml:"CSV"` + RecordDelimiter string `xml:"RecordDelimiter,omitempty"` + FieldDelimiter string `xml:"FieldDelimiter,omitempty"` + QuoteCharacter string `xml:"QuoteCharacter,omitempty"` +} + +type JSON struct { + XMLName xml.Name `xml:"JSON"` + JSONType string `xml:"Type,omitempty"` +} + +// SelectRequest is for the SelectObject request params of json file +type SelectRequest struct { + XMLName xml.Name `xml:"SelectRequest"` + Expression string `xml:"Expression"` + InputSerializationSelect InputSerializationSelect `xml:"InputSerialization"` + OutputSerializationSelect OutputSerializationSelect `xml:"OutputSerialization"` + SelectOptions SelectOptions `xml:"Options,omitempty"` +} +type InputSerializationSelect struct { + XMLName xml.Name `xml:"InputSerialization"` + CsvBodyInput CSVSelectInput `xml:CSV,omitempty` + JsonBodyInput JSONSelectInput `xml:JSON,omitempty` + CompressionType string `xml:"CompressionType,omitempty"` +} +type CSVSelectInput struct { + XMLName xml.Name `xml:"CSV"` + FileHeaderInfo string `xml:"FileHeaderInfo,omitempty"` + RecordDelimiter string `xml:"RecordDelimiter,omitempty"` + FieldDelimiter string `xml:"FieldDelimiter,omitempty"` + QuoteCharacter string `xml:"QuoteCharacter,omitempty"` + CommentCharacter string `xml:"CommentCharacter,omitempty"` + Range string `xml:"Range,omitempty"` + SplitRange string +} +type JSONSelectInput struct { + XMLName xml.Name `xml:"JSON"` + JSONType string `xml:"Type,omitempty"` + Range string `xml:"Range,omitempty"` + ParseJSONNumberAsString *bool `xml:"ParseJsonNumberAsString"` + SplitRange string +} + +func (jsonInput *JSONSelectInput) JsonIsEmpty() bool { + if jsonInput.JSONType != "" { + return false + } + return true +} + +type OutputSerializationSelect struct { + XMLName xml.Name `xml:"OutputSerialization"` + CsvBodyOutput CSVSelectOutput `xml:CSV,omitempty` + JsonBodyOutput JSONSelectOutput `xml:JSON,omitempty` + OutputRawData *bool `xml:"OutputRawData,omitempty"` + KeepAllColumns *bool `xml:"KeepAllColumns,omitempty"` + EnablePayloadCrc *bool `xml:"EnablePayloadCrc,omitempty"` + OutputHeader *bool `xml:"OutputHeader,omitempty"` +} +type CSVSelectOutput struct { + XMLName xml.Name `xml:"CSV"` + RecordDelimiter string `xml:"RecordDelimiter,omitempty"` + FieldDelimiter string `xml:"FieldDelimiter,omitempty"` +} +type JSONSelectOutput struct { + XMLName xml.Name `xml:"JSON"` + RecordDelimiter string `xml:"RecordDelimiter,omitempty"` +} + +func (selectReq *SelectRequest) encodeBase64() { + if selectReq.InputSerializationSelect.JsonBodyInput.JsonIsEmpty() { + selectReq.csvEncodeBase64() + } else { + selectReq.jsonEncodeBase64() + } +} + +// csvEncodeBase64 encode base64 of the SelectObject api request params +func (selectReq *SelectRequest) csvEncodeBase64() { + selectReq.Expression = base64.StdEncoding.EncodeToString([]byte(selectReq.Expression)) + selectReq.InputSerializationSelect.CsvBodyInput.RecordDelimiter = + base64.StdEncoding.EncodeToString([]byte(selectReq.InputSerializationSelect.CsvBodyInput.RecordDelimiter)) + selectReq.InputSerializationSelect.CsvBodyInput.FieldDelimiter = + base64.StdEncoding.EncodeToString([]byte(selectReq.InputSerializationSelect.CsvBodyInput.FieldDelimiter)) + selectReq.InputSerializationSelect.CsvBodyInput.QuoteCharacter = + base64.StdEncoding.EncodeToString([]byte(selectReq.InputSerializationSelect.CsvBodyInput.QuoteCharacter)) + selectReq.InputSerializationSelect.CsvBodyInput.CommentCharacter = + base64.StdEncoding.EncodeToString([]byte(selectReq.InputSerializationSelect.CsvBodyInput.CommentCharacter)) + selectReq.OutputSerializationSelect.CsvBodyOutput.FieldDelimiter = + base64.StdEncoding.EncodeToString([]byte(selectReq.OutputSerializationSelect.CsvBodyOutput.FieldDelimiter)) + selectReq.OutputSerializationSelect.CsvBodyOutput.RecordDelimiter = + base64.StdEncoding.EncodeToString([]byte(selectReq.OutputSerializationSelect.CsvBodyOutput.RecordDelimiter)) + + // handle Range + if selectReq.InputSerializationSelect.CsvBodyInput.Range != "" { + selectReq.InputSerializationSelect.CsvBodyInput.Range = "line-range=" + selectReq.InputSerializationSelect.CsvBodyInput.Range + } + + if selectReq.InputSerializationSelect.CsvBodyInput.SplitRange != "" { + selectReq.InputSerializationSelect.CsvBodyInput.Range = "split-range=" + selectReq.InputSerializationSelect.CsvBodyInput.SplitRange + } +} + +// jsonEncodeBase64 encode base64 of the SelectObject api request params +func (selectReq *SelectRequest) jsonEncodeBase64() { + selectReq.Expression = base64.StdEncoding.EncodeToString([]byte(selectReq.Expression)) + selectReq.OutputSerializationSelect.JsonBodyOutput.RecordDelimiter = + base64.StdEncoding.EncodeToString([]byte(selectReq.OutputSerializationSelect.JsonBodyOutput.RecordDelimiter)) + + // handle Range + if selectReq.InputSerializationSelect.JsonBodyInput.Range != "" { + selectReq.InputSerializationSelect.JsonBodyInput.Range = "line-range=" + selectReq.InputSerializationSelect.JsonBodyInput.Range + } + + if selectReq.InputSerializationSelect.JsonBodyInput.SplitRange != "" { + selectReq.InputSerializationSelect.JsonBodyInput.Range = "split-range=" + selectReq.InputSerializationSelect.JsonBodyInput.SplitRange + } +} + +// CsvOptions is a element in the SelectObject api request's params +type SelectOptions struct { + XMLName xml.Name `xml:"Options"` + SkipPartialDataRecord *bool `xml:"SkipPartialDataRecord,omitempty"` + MaxSkippedRecordsAllowed string `xml:"MaxSkippedRecordsAllowed,omitempty"` +} + +// SelectObjectResult is the SelectObject api's return +type SelectObjectResult struct { + Version byte + FrameType int32 + PayloadLength int32 + HeaderCheckSum uint32 + Offset uint64 + Data string // DataFrame + EndFrame EndFrame // EndFrame + MetaEndFrameCSV MetaEndFrameCSV // MetaEndFrameCSV + MetaEndFrameJSON MetaEndFrameJSON // MetaEndFrameJSON + PayloadChecksum uint32 + ReadFlagInfo +} + +// ReadFlagInfo if reading the frame data, recode the reading status +type ReadFlagInfo struct { + OpenLine bool + ConsumedBytesLength int32 + EnablePayloadCrc bool + OutputRawData bool +} + +// EndFrame is EndFrameType of SelectObject api +type EndFrame struct { + TotalScanned int64 + HTTPStatusCode int32 + ErrorMsg string +} + +// MetaEndFrameCSV is MetaEndFrameCSVType of CreateSelectObjectMeta +type MetaEndFrameCSV struct { + TotalScanned int64 + Status int32 + SplitsCount int32 + RowsCount int64 + ColumnsCount int32 + ErrorMsg string +} + +// MetaEndFrameJSON is MetaEndFrameJSON of CreateSelectObjectMeta +type MetaEndFrameJSON struct { + TotalScanned int64 + Status int32 + SplitsCount int32 + RowsCount int64 + ErrorMsg string +} + +// InventoryConfiguration is Inventory config +type InventoryConfiguration struct { + XMLName xml.Name `xml:"InventoryConfiguration"` + Id string `xml:"Id,omitempty"` + IsEnabled *bool `xml:"IsEnabled,omitempty"` + Prefix string `xml:"Filter>Prefix,omitempty"` + OSSBucketDestination OSSBucketDestination `xml:"Destination>OSSBucketDestination,omitempty"` + Frequency string `xml:"Schedule>Frequency,omitempty"` + IncludedObjectVersions string `xml:"IncludedObjectVersions,omitempty"` + OptionalFields OptionalFields `xml:OptionalFields,omitempty` +} + +type OptionalFields struct { + XMLName xml.Name `xml:"OptionalFields,omitempty` + Field []string `xml:"Field,omitempty` +} + +type OSSBucketDestination struct { + XMLName xml.Name `xml:"OSSBucketDestination"` + Format string `xml:"Format,omitempty"` + AccountId string `xml:"AccountId,omitempty"` + RoleArn string `xml:"RoleArn,omitempty"` + Bucket string `xml:"Bucket,omitempty"` + Prefix string `xml:"Prefix,omitempty"` + Encryption *InvEncryption `xml:"Encryption,omitempty"` +} + +type InvEncryption struct { + XMLName xml.Name `xml:"Encryption"` + SseOss *InvSseOss `xml:"SSE-OSS"` + SseKms *InvSseKms `xml:"SSE-KMS"` +} + +type InvSseOss struct { + XMLName xml.Name `xml:"SSE-OSS"` +} + +type InvSseKms struct { + XMLName xml.Name `xml:"SSE-KMS"` + KmsId string `xml:"KeyId,omitempty"` +} + +type ListInventoryConfigurationsResult struct { + XMLName xml.Name `xml:"ListInventoryConfigurationsResult"` + InventoryConfiguration []InventoryConfiguration `xml:"InventoryConfiguration,omitempty` + IsTruncated *bool `xml:"IsTruncated,omitempty"` + NextContinuationToken string `xml:"NextContinuationToken,omitempty"` +} + +// RestoreConfiguration for RestoreObject +type RestoreConfiguration struct { + XMLName xml.Name `xml:"RestoreRequest"` + Days int32 `xml:"Days,omitempty"` + Tier string `xml:"JobParameters>Tier,omitempty"` +} + +// AsyncFetchTaskConfiguration for SetBucketAsyncFetchTask +type AsyncFetchTaskConfiguration struct { + XMLName xml.Name `xml:"AsyncFetchTaskConfiguration"` + Url string `xml:"Url,omitempty"` + Object string `xml:"Object,omitempty"` + Host string `xml:"Host,omitempty"` + ContentMD5 string `xml:"ContentMD5,omitempty"` + Callback string `xml:"Callback,omitempty"` + StorageClass string `xml:"StorageClass,omitempty"` + IgnoreSameKey bool `xml:"IgnoreSameKey"` +} + +// AsyncFetchTaskResult for SetBucketAsyncFetchTask result +type AsyncFetchTaskResult struct { + XMLName xml.Name `xml:"AsyncFetchTaskResult"` + TaskId string `xml:"TaskId,omitempty"` +} + +// AsynFetchTaskInfo for GetBucketAsyncFetchTask result +type AsynFetchTaskInfo struct { + XMLName xml.Name `xml:"AsyncFetchTaskInfo"` + TaskId string `xml:"TaskId,omitempty"` + State string `xml:"State,omitempty"` + ErrorMsg string `xml:"ErrorMsg,omitempty"` + TaskInfo AsyncTaskInfo `xml:"TaskInfo,omitempty"` +} + +// AsyncTaskInfo for async task information +type AsyncTaskInfo struct { + XMLName xml.Name `xml:"TaskInfo"` + Url string `xml:"Url,omitempty"` + Object string `xml:"Object,omitempty"` + Host string `xml:"Host,omitempty"` + ContentMD5 string `xml:"ContentMD5,omitempty"` + Callback string `xml:"Callback,omitempty"` + StorageClass string `xml:"StorageClass,omitempty"` + IgnoreSameKey bool `xml:"IgnoreSameKey"` +} + +// InitiateWormConfiguration define InitiateBucketWorm configuration +type InitiateWormConfiguration struct { + XMLName xml.Name `xml:"InitiateWormConfiguration"` + RetentionPeriodInDays int `xml:"RetentionPeriodInDays"` // specify retention days +} + +// ExtendWormConfiguration define ExtendWormConfiguration configuration +type ExtendWormConfiguration struct { + XMLName xml.Name `xml:"ExtendWormConfiguration"` + RetentionPeriodInDays int `xml:"RetentionPeriodInDays"` // specify retention days +} + +// WormConfiguration define WormConfiguration +type WormConfiguration struct { + XMLName xml.Name `xml:"WormConfiguration"` + WormId string `xml:"WormId,omitempty"` + State string `xml:"State,omitempty"` + RetentionPeriodInDays int `xml:"RetentionPeriodInDays"` // specify retention days + CreationDate string `xml:"CreationDate,omitempty"` +} + +// TransferAccConfiguration define transfer acceleration configuration +type TransferAccConfiguration struct { + XMLName xml.Name `xml:"TransferAccelerationConfiguration"` + Enabled bool `xml:"Enabled"` +} + +// ReplicationXML defines simple replication xml, and ReplicationXML is used for "DeleteBucketReplication" in client.go +type ReplicationXML struct { + XMLName xml.Name `xml:"ReplicationRules"` + ID string `xml:"ID,omitempty"` +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/upload.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/upload.go new file mode 100644 index 0000000000..8b3ea09d28 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/upload.go @@ -0,0 +1,552 @@ +package oss + +import ( + "crypto/md5" + "encoding/base64" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "time" +) + +// UploadFile is multipart file upload. +// +// objectKey the object name. +// filePath the local file path to upload. +// partSize the part size in byte. +// options the options for uploading object. +// +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) UploadFile(objectKey, filePath string, partSize int64, options ...Option) error { + if partSize < MinPartSize || partSize > MaxPartSize { + return errors.New("oss: part size invalid range (100KB, 5GB]") + } + + cpConf := getCpConfig(options) + routines := getRoutines(options) + + if cpConf != nil && cpConf.IsEnable { + cpFilePath := getUploadCpFilePath(cpConf, filePath, bucket.BucketName, objectKey) + if cpFilePath != "" { + return bucket.uploadFileWithCp(objectKey, filePath, partSize, options, cpFilePath, routines) + } + } + + return bucket.uploadFile(objectKey, filePath, partSize, options, routines) +} + +func getUploadCpFilePath(cpConf *cpConfig, srcFile, destBucket, destObject string) string { + if cpConf.FilePath == "" && cpConf.DirPath != "" { + dest := fmt.Sprintf("oss://%v/%v", destBucket, destObject) + absPath, _ := filepath.Abs(srcFile) + cpFileName := getCpFileName(absPath, dest, "") + cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName + } + return cpConf.FilePath +} + +// ----- concurrent upload without checkpoint ----- + +// getCpConfig gets checkpoint configuration +func getCpConfig(options []Option) *cpConfig { + cpcOpt, err := FindOption(options, checkpointConfig, nil) + if err != nil || cpcOpt == nil { + return nil + } + + return cpcOpt.(*cpConfig) +} + +// getCpFileName return the name of the checkpoint file +func getCpFileName(src, dest, versionId string) string { + md5Ctx := md5.New() + md5Ctx.Write([]byte(src)) + srcCheckSum := hex.EncodeToString(md5Ctx.Sum(nil)) + + md5Ctx.Reset() + md5Ctx.Write([]byte(dest)) + destCheckSum := hex.EncodeToString(md5Ctx.Sum(nil)) + + if versionId == "" { + return fmt.Sprintf("%v-%v.cp", srcCheckSum, destCheckSum) + } + + md5Ctx.Reset() + md5Ctx.Write([]byte(versionId)) + versionCheckSum := hex.EncodeToString(md5Ctx.Sum(nil)) + return fmt.Sprintf("%v-%v-%v.cp", srcCheckSum, destCheckSum, versionCheckSum) +} + +// getRoutines gets the routine count. by default it's 1. +func getRoutines(options []Option) int { + rtnOpt, err := FindOption(options, routineNum, nil) + if err != nil || rtnOpt == nil { + return 1 + } + + rs := rtnOpt.(int) + if rs < 1 { + rs = 1 + } else if rs > 100 { + rs = 100 + } + + return rs +} + +// getPayer return the payer of the request +func getPayer(options []Option) string { + payerOpt, err := FindOption(options, HTTPHeaderOssRequester, nil) + if err != nil || payerOpt == nil { + return "" + } + return payerOpt.(string) +} + +// GetProgressListener gets the progress callback +func GetProgressListener(options []Option) ProgressListener { + isSet, listener, _ := IsOptionSet(options, progressListener) + if !isSet { + return nil + } + return listener.(ProgressListener) +} + +// uploadPartHook is for testing usage +type uploadPartHook func(id int, chunk FileChunk) error + +var uploadPartHooker uploadPartHook = defaultUploadPart + +func defaultUploadPart(id int, chunk FileChunk) error { + return nil +} + +// workerArg defines worker argument structure +type workerArg struct { + bucket *Bucket + filePath string + imur InitiateMultipartUploadResult + options []Option + hook uploadPartHook +} + +// worker is the worker coroutine function +type defaultUploadProgressListener struct { +} + +// ProgressChanged no-ops +func (listener *defaultUploadProgressListener) ProgressChanged(event *ProgressEvent) { +} + +func worker(id int, arg workerArg, jobs <-chan FileChunk, results chan<- UploadPart, failed chan<- error, die <-chan bool) { + for chunk := range jobs { + if err := arg.hook(id, chunk); err != nil { + failed <- err + break + } + var respHeader http.Header + p := Progress(&defaultUploadProgressListener{}) + opts := make([]Option, len(arg.options)+2) + opts = append(opts, arg.options...) + + // use defaultUploadProgressListener + opts = append(opts, p, GetResponseHeader(&respHeader)) + + startT := time.Now().UnixNano() / 1000 / 1000 / 1000 + part, err := arg.bucket.UploadPartFromFile(arg.imur, arg.filePath, chunk.Offset, chunk.Size, chunk.Number, opts...) + endT := time.Now().UnixNano() / 1000 / 1000 / 1000 + if err != nil { + arg.bucket.Client.Config.WriteLog(Debug, "upload part error,cost:%d second,part number:%d,request id:%s,error:%s\n", endT-startT, chunk.Number, GetRequestId(respHeader), err.Error()) + failed <- err + break + } + select { + case <-die: + return + default: + } + results <- part + } +} + +// scheduler function +func scheduler(jobs chan FileChunk, chunks []FileChunk) { + for _, chunk := range chunks { + jobs <- chunk + } + close(jobs) +} + +func getTotalBytes(chunks []FileChunk) int64 { + var tb int64 + for _, chunk := range chunks { + tb += chunk.Size + } + return tb +} + +// uploadFile is a concurrent upload, without checkpoint +func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, options []Option, routines int) error { + listener := GetProgressListener(options) + + chunks, err := SplitFileByPartSize(filePath, partSize) + if err != nil { + return err + } + + partOptions := ChoiceTransferPartOption(options) + completeOptions := ChoiceCompletePartOption(options) + abortOptions := ChoiceAbortPartOption(options) + + // Initialize the multipart upload + imur, err := bucket.InitiateMultipartUpload(objectKey, options...) + if err != nil { + return err + } + + jobs := make(chan FileChunk, len(chunks)) + results := make(chan UploadPart, len(chunks)) + failed := make(chan error) + die := make(chan bool) + + var completedBytes int64 + totalBytes := getTotalBytes(chunks) + event := newProgressEvent(TransferStartedEvent, 0, totalBytes, 0) + publishProgress(listener, event) + + // Start the worker coroutine + arg := workerArg{&bucket, filePath, imur, partOptions, uploadPartHooker} + for w := 1; w <= routines; w++ { + go worker(w, arg, jobs, results, failed, die) + } + + // Schedule the jobs + go scheduler(jobs, chunks) + + // Waiting for the upload finished + completed := 0 + parts := make([]UploadPart, len(chunks)) + for completed < len(chunks) { + select { + case part := <-results: + completed++ + parts[part.PartNumber-1] = part + completedBytes += chunks[part.PartNumber-1].Size + + // why RwBytes in ProgressEvent is 0 ? + // because read or write event has been notified in teeReader.Read() + event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes, chunks[part.PartNumber-1].Size) + publishProgress(listener, event) + case err := <-failed: + close(die) + event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes, 0) + publishProgress(listener, event) + bucket.AbortMultipartUpload(imur, abortOptions...) + return err + } + + if completed >= len(chunks) { + break + } + } + + event = newProgressEvent(TransferStartedEvent, completedBytes, totalBytes, 0) + publishProgress(listener, event) + + // Complete the multpart upload + _, err = bucket.CompleteMultipartUpload(imur, parts, completeOptions...) + if err != nil { + bucket.AbortMultipartUpload(imur, abortOptions...) + return err + } + return nil +} + +// ----- concurrent upload with checkpoint ----- +const uploadCpMagic = "FE8BB4EA-B593-4FAC-AD7A-2459A36E2E62" + +type uploadCheckpoint struct { + Magic string // Magic + MD5 string // Checkpoint file content's MD5 + FilePath string // Local file path + FileStat cpStat // File state + ObjectKey string // Key + UploadID string // Upload ID + Parts []cpPart // All parts of the local file +} + +type cpStat struct { + Size int64 // File size + LastModified time.Time // File's last modified time + MD5 string // Local file's MD5 +} + +type cpPart struct { + Chunk FileChunk // File chunk + Part UploadPart // Uploaded part + IsCompleted bool // Upload complete flag +} + +// isValid checks if the uploaded data is valid---it's valid when the file is not updated and the checkpoint data is valid. +func (cp uploadCheckpoint) isValid(filePath string) (bool, error) { + // Compare the CP's magic number and MD5. + cpb := cp + cpb.MD5 = "" + js, _ := json.Marshal(cpb) + sum := md5.Sum(js) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + + if cp.Magic != uploadCpMagic || b64 != cp.MD5 { + return false, nil + } + + // Make sure if the local file is updated. + fd, err := os.Open(filePath) + if err != nil { + return false, err + } + defer fd.Close() + + st, err := fd.Stat() + if err != nil { + return false, err + } + + md, err := calcFileMD5(filePath) + if err != nil { + return false, err + } + + // Compare the file size, file's last modified time and file's MD5 + if cp.FileStat.Size != st.Size() || + !cp.FileStat.LastModified.Equal(st.ModTime()) || + cp.FileStat.MD5 != md { + return false, nil + } + + return true, nil +} + +// load loads from the file +func (cp *uploadCheckpoint) load(filePath string) error { + contents, err := ioutil.ReadFile(filePath) + if err != nil { + return err + } + + err = json.Unmarshal(contents, cp) + return err +} + +// dump dumps to the local file +func (cp *uploadCheckpoint) dump(filePath string) error { + bcp := *cp + + // Calculate MD5 + bcp.MD5 = "" + js, err := json.Marshal(bcp) + if err != nil { + return err + } + sum := md5.Sum(js) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + bcp.MD5 = b64 + + // Serialization + js, err = json.Marshal(bcp) + if err != nil { + return err + } + + // Dump + return ioutil.WriteFile(filePath, js, FilePermMode) +} + +// updatePart updates the part status +func (cp *uploadCheckpoint) updatePart(part UploadPart) { + cp.Parts[part.PartNumber-1].Part = part + cp.Parts[part.PartNumber-1].IsCompleted = true +} + +// todoParts returns unfinished parts +func (cp *uploadCheckpoint) todoParts() []FileChunk { + fcs := []FileChunk{} + for _, part := range cp.Parts { + if !part.IsCompleted { + fcs = append(fcs, part.Chunk) + } + } + return fcs +} + +// allParts returns all parts +func (cp *uploadCheckpoint) allParts() []UploadPart { + ps := []UploadPart{} + for _, part := range cp.Parts { + ps = append(ps, part.Part) + } + return ps +} + +// getCompletedBytes returns completed bytes count +func (cp *uploadCheckpoint) getCompletedBytes() int64 { + var completedBytes int64 + for _, part := range cp.Parts { + if part.IsCompleted { + completedBytes += part.Chunk.Size + } + } + return completedBytes +} + +// calcFileMD5 calculates the MD5 for the specified local file +func calcFileMD5(filePath string) (string, error) { + return "", nil +} + +// prepare initializes the multipart upload +func prepare(cp *uploadCheckpoint, objectKey, filePath string, partSize int64, bucket *Bucket, options []Option) error { + // CP + cp.Magic = uploadCpMagic + cp.FilePath = filePath + cp.ObjectKey = objectKey + + // Local file + fd, err := os.Open(filePath) + if err != nil { + return err + } + defer fd.Close() + + st, err := fd.Stat() + if err != nil { + return err + } + cp.FileStat.Size = st.Size() + cp.FileStat.LastModified = st.ModTime() + md, err := calcFileMD5(filePath) + if err != nil { + return err + } + cp.FileStat.MD5 = md + + // Chunks + parts, err := SplitFileByPartSize(filePath, partSize) + if err != nil { + return err + } + + cp.Parts = make([]cpPart, len(parts)) + for i, part := range parts { + cp.Parts[i].Chunk = part + cp.Parts[i].IsCompleted = false + } + + // Init load + imur, err := bucket.InitiateMultipartUpload(objectKey, options...) + if err != nil { + return err + } + cp.UploadID = imur.UploadID + + return nil +} + +// complete completes the multipart upload and deletes the local CP files +func complete(cp *uploadCheckpoint, bucket *Bucket, parts []UploadPart, cpFilePath string, options []Option) error { + imur := InitiateMultipartUploadResult{Bucket: bucket.BucketName, + Key: cp.ObjectKey, UploadID: cp.UploadID} + _, err := bucket.CompleteMultipartUpload(imur, parts, options...) + if err != nil { + return err + } + os.Remove(cpFilePath) + return err +} + +// uploadFileWithCp handles concurrent upload with checkpoint +func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int) error { + listener := GetProgressListener(options) + + partOptions := ChoiceTransferPartOption(options) + completeOptions := ChoiceCompletePartOption(options) + + // Load CP data + ucp := uploadCheckpoint{} + err := ucp.load(cpFilePath) + if err != nil { + os.Remove(cpFilePath) + } + + // Load error or the CP data is invalid. + valid, err := ucp.isValid(filePath) + if err != nil || !valid { + if err = prepare(&ucp, objectKey, filePath, partSize, &bucket, options); err != nil { + return err + } + os.Remove(cpFilePath) + } + + chunks := ucp.todoParts() + imur := InitiateMultipartUploadResult{ + Bucket: bucket.BucketName, + Key: objectKey, + UploadID: ucp.UploadID} + + jobs := make(chan FileChunk, len(chunks)) + results := make(chan UploadPart, len(chunks)) + failed := make(chan error) + die := make(chan bool) + + completedBytes := ucp.getCompletedBytes() + + // why RwBytes in ProgressEvent is 0 ? + // because read or write event has been notified in teeReader.Read() + event := newProgressEvent(TransferStartedEvent, completedBytes, ucp.FileStat.Size, 0) + publishProgress(listener, event) + + // Start the workers + arg := workerArg{&bucket, filePath, imur, partOptions, uploadPartHooker} + for w := 1; w <= routines; w++ { + go worker(w, arg, jobs, results, failed, die) + } + + // Schedule jobs + go scheduler(jobs, chunks) + + // Waiting for the job finished + completed := 0 + for completed < len(chunks) { + select { + case part := <-results: + completed++ + ucp.updatePart(part) + ucp.dump(cpFilePath) + completedBytes += ucp.Parts[part.PartNumber-1].Chunk.Size + event = newProgressEvent(TransferDataEvent, completedBytes, ucp.FileStat.Size, ucp.Parts[part.PartNumber-1].Chunk.Size) + publishProgress(listener, event) + case err := <-failed: + close(die) + event = newProgressEvent(TransferFailedEvent, completedBytes, ucp.FileStat.Size, 0) + publishProgress(listener, event) + return err + } + + if completed >= len(chunks) { + break + } + } + + event = newProgressEvent(TransferCompletedEvent, completedBytes, ucp.FileStat.Size, 0) + publishProgress(listener, event) + + // Complete the multipart upload + err = complete(&ucp, &bucket, ucp.allParts(), cpFilePath, completeOptions) + return err +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/utils.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/utils.go new file mode 100644 index 0000000000..78b7913006 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/utils.go @@ -0,0 +1,522 @@ +package oss + +import ( + "bytes" + "errors" + "fmt" + "hash/crc32" + "hash/crc64" + "io" + "net/http" + "os" + "os/exec" + "runtime" + "strconv" + "strings" + "time" +) + +var sys_name string +var sys_release string +var sys_machine string + +func init() { + sys_name = runtime.GOOS + sys_release = "-" + sys_machine = runtime.GOARCH + + if out, err := exec.Command("uname", "-s").CombinedOutput(); err == nil { + sys_name = string(bytes.TrimSpace(out)) + } + if out, err := exec.Command("uname", "-r").CombinedOutput(); err == nil { + sys_release = string(bytes.TrimSpace(out)) + } + if out, err := exec.Command("uname", "-m").CombinedOutput(); err == nil { + sys_machine = string(bytes.TrimSpace(out)) + } +} + +// userAgent gets user agent +// It has the SDK version information, OS information and GO version +func userAgent() string { + sys := getSysInfo() + return fmt.Sprintf("aliyun-sdk-go/%s (%s/%s/%s;%s)", Version, sys.name, + sys.release, sys.machine, runtime.Version()) +} + +type sysInfo struct { + name string // OS name such as windows/Linux + release string // OS version 2.6.32-220.23.2.ali1089.el5.x86_64 etc + machine string // CPU type amd64/x86_64 +} + +// getSysInfo gets system info +// gets the OS information and CPU type +func getSysInfo() sysInfo { + return sysInfo{name: sys_name, release: sys_release, machine: sys_machine} +} + +// GetRangeConfig gets the download range from the options. +func GetRangeConfig(options []Option) (*UnpackedRange, error) { + rangeOpt, err := FindOption(options, HTTPHeaderRange, nil) + if err != nil || rangeOpt == nil { + return nil, err + } + return ParseRange(rangeOpt.(string)) +} + +// UnpackedRange +type UnpackedRange struct { + HasStart bool // Flag indicates if the start point is specified + HasEnd bool // Flag indicates if the end point is specified + Start int64 // Start point + End int64 // End point +} + +// InvalidRangeError returns invalid range error +func InvalidRangeError(r string) error { + return fmt.Errorf("InvalidRange %s", r) +} + +func GetRangeString(unpackRange UnpackedRange) string { + var strRange string + if unpackRange.HasStart && unpackRange.HasEnd { + strRange = fmt.Sprintf("%d-%d", unpackRange.Start, unpackRange.End) + } else if unpackRange.HasStart { + strRange = fmt.Sprintf("%d-", unpackRange.Start) + } else if unpackRange.HasEnd { + strRange = fmt.Sprintf("-%d", unpackRange.End) + } + return strRange +} + +// ParseRange parse various styles of range such as bytes=M-N +func ParseRange(normalizedRange string) (*UnpackedRange, error) { + var err error + hasStart := false + hasEnd := false + var start int64 + var end int64 + + // Bytes==M-N or ranges=M-N + nrSlice := strings.Split(normalizedRange, "=") + if len(nrSlice) != 2 || nrSlice[0] != "bytes" { + return nil, InvalidRangeError(normalizedRange) + } + + // Bytes=M-N,X-Y + rSlice := strings.Split(nrSlice[1], ",") + rStr := rSlice[0] + + if strings.HasSuffix(rStr, "-") { // M- + startStr := rStr[:len(rStr)-1] + start, err = strconv.ParseInt(startStr, 10, 64) + if err != nil { + return nil, InvalidRangeError(normalizedRange) + } + hasStart = true + } else if strings.HasPrefix(rStr, "-") { // -N + len := rStr[1:] + end, err = strconv.ParseInt(len, 10, 64) + if err != nil { + return nil, InvalidRangeError(normalizedRange) + } + if end == 0 { // -0 + return nil, InvalidRangeError(normalizedRange) + } + hasEnd = true + } else { // M-N + valSlice := strings.Split(rStr, "-") + if len(valSlice) != 2 { + return nil, InvalidRangeError(normalizedRange) + } + start, err = strconv.ParseInt(valSlice[0], 10, 64) + if err != nil { + return nil, InvalidRangeError(normalizedRange) + } + hasStart = true + end, err = strconv.ParseInt(valSlice[1], 10, 64) + if err != nil { + return nil, InvalidRangeError(normalizedRange) + } + hasEnd = true + } + + return &UnpackedRange{hasStart, hasEnd, start, end}, nil +} + +// AdjustRange returns adjusted range, adjust the range according to the length of the file +func AdjustRange(ur *UnpackedRange, size int64) (start, end int64) { + if ur == nil { + return 0, size + } + + if ur.HasStart && ur.HasEnd { + start = ur.Start + end = ur.End + 1 + if ur.Start < 0 || ur.Start >= size || ur.End > size || ur.Start > ur.End { + start = 0 + end = size + } + } else if ur.HasStart { + start = ur.Start + end = size + if ur.Start < 0 || ur.Start >= size { + start = 0 + } + } else if ur.HasEnd { + start = size - ur.End + end = size + if ur.End < 0 || ur.End > size { + start = 0 + end = size + } + } + return +} + +// GetNowSec returns Unix time, the number of seconds elapsed since January 1, 1970 UTC. +// gets the current time in Unix time, in seconds. +func GetNowSec() int64 { + return time.Now().Unix() +} + +// GetNowNanoSec returns t as a Unix time, the number of nanoseconds elapsed +// since January 1, 1970 UTC. The result is undefined if the Unix time +// in nanoseconds cannot be represented by an int64. Note that this +// means the result of calling UnixNano on the zero Time is undefined. +// gets the current time in Unix time, in nanoseconds. +func GetNowNanoSec() int64 { + return time.Now().UnixNano() +} + +// GetNowGMT gets the current time in GMT format. +func GetNowGMT() string { + return time.Now().UTC().Format(http.TimeFormat) +} + +// FileChunk is the file chunk definition +type FileChunk struct { + Number int // Chunk number + Offset int64 // Chunk offset + Size int64 // Chunk size. +} + +// SplitFileByPartNum splits big file into parts by the num of parts. +// Split the file with specified parts count, returns the split result when error is nil. +func SplitFileByPartNum(fileName string, chunkNum int) ([]FileChunk, error) { + if chunkNum <= 0 || chunkNum > 10000 { + return nil, errors.New("chunkNum invalid") + } + + file, err := os.Open(fileName) + if err != nil { + return nil, err + } + defer file.Close() + + stat, err := file.Stat() + if err != nil { + return nil, err + } + + if int64(chunkNum) > stat.Size() { + return nil, errors.New("oss: chunkNum invalid") + } + + var chunks []FileChunk + var chunk = FileChunk{} + var chunkN = (int64)(chunkNum) + for i := int64(0); i < chunkN; i++ { + chunk.Number = int(i + 1) + chunk.Offset = i * (stat.Size() / chunkN) + if i == chunkN-1 { + chunk.Size = stat.Size()/chunkN + stat.Size()%chunkN + } else { + chunk.Size = stat.Size() / chunkN + } + chunks = append(chunks, chunk) + } + + return chunks, nil +} + +// SplitFileByPartSize splits big file into parts by the size of parts. +// Splits the file by the part size. Returns the FileChunk when error is nil. +func SplitFileByPartSize(fileName string, chunkSize int64) ([]FileChunk, error) { + if chunkSize <= 0 { + return nil, errors.New("chunkSize invalid") + } + + file, err := os.Open(fileName) + if err != nil { + return nil, err + } + defer file.Close() + + stat, err := file.Stat() + if err != nil { + return nil, err + } + var chunkN = stat.Size() / chunkSize + if chunkN >= 10000 { + return nil, errors.New("Too many parts, please increase part size") + } + + var chunks []FileChunk + var chunk = FileChunk{} + for i := int64(0); i < chunkN; i++ { + chunk.Number = int(i + 1) + chunk.Offset = i * chunkSize + chunk.Size = chunkSize + chunks = append(chunks, chunk) + } + + if stat.Size()%chunkSize > 0 { + chunk.Number = len(chunks) + 1 + chunk.Offset = int64(len(chunks)) * chunkSize + chunk.Size = stat.Size() % chunkSize + chunks = append(chunks, chunk) + } + + return chunks, nil +} + +// GetPartEnd calculates the end position +func GetPartEnd(begin int64, total int64, per int64) int64 { + if begin+per > total { + return total - 1 + } + return begin + per - 1 +} + +// CrcTable returns the table constructed from the specified polynomial +var CrcTable = func() *crc64.Table { + return crc64.MakeTable(crc64.ECMA) +} + +// CrcTable returns the table constructed from the specified polynomial +var crc32Table = func() *crc32.Table { + return crc32.MakeTable(crc32.IEEE) +} + +// choiceTransferPartOption choices valid option supported by Uploadpart or DownloadPart +func ChoiceTransferPartOption(options []Option) []Option { + var outOption []Option + + listener, _ := FindOption(options, progressListener, nil) + if listener != nil { + outOption = append(outOption, Progress(listener.(ProgressListener))) + } + + payer, _ := FindOption(options, HTTPHeaderOssRequester, nil) + if payer != nil { + outOption = append(outOption, RequestPayer(PayerType(payer.(string)))) + } + + versionId, _ := FindOption(options, "versionId", nil) + if versionId != nil { + outOption = append(outOption, VersionId(versionId.(string))) + } + + trafficLimit, _ := FindOption(options, HTTPHeaderOssTrafficLimit, nil) + if trafficLimit != nil { + speed, _ := strconv.ParseInt(trafficLimit.(string), 10, 64) + outOption = append(outOption, TrafficLimitHeader(speed)) + } + + respHeader, _ := FindOption(options, responseHeader, nil) + if respHeader != nil { + outOption = append(outOption, GetResponseHeader(respHeader.(*http.Header))) + } + + return outOption +} + +// ChoiceCompletePartOption choices valid option supported by CompleteMulitiPart +func ChoiceCompletePartOption(options []Option) []Option { + var outOption []Option + + listener, _ := FindOption(options, progressListener, nil) + if listener != nil { + outOption = append(outOption, Progress(listener.(ProgressListener))) + } + + payer, _ := FindOption(options, HTTPHeaderOssRequester, nil) + if payer != nil { + outOption = append(outOption, RequestPayer(PayerType(payer.(string)))) + } + + acl, _ := FindOption(options, HTTPHeaderOssObjectACL, nil) + if acl != nil { + outOption = append(outOption, ObjectACL(ACLType(acl.(string)))) + } + + callback, _ := FindOption(options, HTTPHeaderOssCallback, nil) + if callback != nil { + outOption = append(outOption, Callback(callback.(string))) + } + + callbackVar, _ := FindOption(options, HTTPHeaderOssCallbackVar, nil) + if callbackVar != nil { + outOption = append(outOption, CallbackVar(callbackVar.(string))) + } + + respHeader, _ := FindOption(options, responseHeader, nil) + if respHeader != nil { + outOption = append(outOption, GetResponseHeader(respHeader.(*http.Header))) + } + + forbidOverWrite, _ := FindOption(options, HTTPHeaderOssForbidOverWrite, nil) + if forbidOverWrite != nil { + if forbidOverWrite.(string) == "true" { + outOption = append(outOption, ForbidOverWrite(true)) + } else { + outOption = append(outOption, ForbidOverWrite(false)) + } + } + + return outOption +} + +// ChoiceAbortPartOption choices valid option supported by AbortMultipartUpload +func ChoiceAbortPartOption(options []Option) []Option { + var outOption []Option + payer, _ := FindOption(options, HTTPHeaderOssRequester, nil) + if payer != nil { + outOption = append(outOption, RequestPayer(PayerType(payer.(string)))) + } + + respHeader, _ := FindOption(options, responseHeader, nil) + if respHeader != nil { + outOption = append(outOption, GetResponseHeader(respHeader.(*http.Header))) + } + + return outOption +} + +// ChoiceHeadObjectOption choices valid option supported by HeadObject +func ChoiceHeadObjectOption(options []Option) []Option { + var outOption []Option + + // not select HTTPHeaderRange to get whole object length + payer, _ := FindOption(options, HTTPHeaderOssRequester, nil) + if payer != nil { + outOption = append(outOption, RequestPayer(PayerType(payer.(string)))) + } + + versionId, _ := FindOption(options, "versionId", nil) + if versionId != nil { + outOption = append(outOption, VersionId(versionId.(string))) + } + + respHeader, _ := FindOption(options, responseHeader, nil) + if respHeader != nil { + outOption = append(outOption, GetResponseHeader(respHeader.(*http.Header))) + } + + return outOption +} + +func CheckBucketName(bucketName string) error { + nameLen := len(bucketName) + if nameLen < 3 || nameLen > 63 { + return fmt.Errorf("bucket name %s len is between [3-63],now is %d", bucketName, nameLen) + } + + for _, v := range bucketName { + if !(('a' <= v && v <= 'z') || ('0' <= v && v <= '9') || v == '-') { + return fmt.Errorf("bucket name %s can only include lowercase letters, numbers, and -", bucketName) + } + } + if bucketName[0] == '-' || bucketName[nameLen-1] == '-' { + return fmt.Errorf("bucket name %s must start and end with a lowercase letter or number", bucketName) + } + return nil +} + +func GetReaderLen(reader io.Reader) (int64, error) { + var contentLength int64 + var err error + switch v := reader.(type) { + case *bytes.Buffer: + contentLength = int64(v.Len()) + case *bytes.Reader: + contentLength = int64(v.Len()) + case *strings.Reader: + contentLength = int64(v.Len()) + case *os.File: + fInfo, fError := v.Stat() + if fError != nil { + err = fmt.Errorf("can't get reader content length,%s", fError.Error()) + } else { + contentLength = fInfo.Size() + } + case *io.LimitedReader: + contentLength = int64(v.N) + case *LimitedReadCloser: + contentLength = int64(v.N) + default: + err = fmt.Errorf("can't get reader content length,unkown reader type") + } + return contentLength, err +} + +func LimitReadCloser(r io.Reader, n int64) io.Reader { + var lc LimitedReadCloser + lc.R = r + lc.N = n + return &lc +} + +// LimitedRC support Close() +type LimitedReadCloser struct { + io.LimitedReader +} + +func (lc *LimitedReadCloser) Close() error { + if closer, ok := lc.R.(io.ReadCloser); ok { + return closer.Close() + } + return nil +} + +type DiscardReadCloser struct { + RC io.ReadCloser + Discard int +} + +func (drc *DiscardReadCloser) Read(b []byte) (int, error) { + n, err := drc.RC.Read(b) + if drc.Discard == 0 || n <= 0 { + return n, err + } + + if n <= drc.Discard { + drc.Discard -= n + return 0, err + } + + realLen := n - drc.Discard + copy(b[0:realLen], b[drc.Discard:n]) + drc.Discard = 0 + return realLen, err +} + +func (drc *DiscardReadCloser) Close() error { + closer, ok := drc.RC.(io.ReadCloser) + if ok { + return closer.Close() + } + return nil +} + +func ConvertEmptyValueToNil(params map[string]interface{}, keys []string) { + for _, key := range keys { + value, ok := params[key] + if ok && value == "" { + // convert "" to nil + params[key] = nil + } + } +} diff --git a/vendor/modules.txt b/vendor/modules.txt index bd6d7bff09..55c617f93e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -127,6 +127,9 @@ github.com/alicebob/miniredis/v2/geohash github.com/alicebob/miniredis/v2/hyperloglog github.com/alicebob/miniredis/v2/metro github.com/alicebob/miniredis/v2/server +# github.com/aliyun/aliyun-oss-go-sdk v2.2.2+incompatible +## explicit +github.com/aliyun/aliyun-oss-go-sdk/oss # github.com/armon/go-metrics v0.4.0 ## explicit; go 1.12 github.com/armon/go-metrics