mirror of https://github.com/grafana/loki
[new] chunk backend: Integrate Alibaba Cloud oss (#8410)
parent
a5fbc7cff4
commit
1fc00fa02d
@ -0,0 +1,17 @@ |
||||
# This partial configuration uses Alibaba for chunk storage |
||||
|
||||
schema_config: |
||||
configs: |
||||
- from: 2020-05-15 |
||||
object_store: alibabacloud |
||||
schema: v11 |
||||
index: |
||||
prefix: loki_index_ |
||||
period: 168h |
||||
|
||||
storage_config: |
||||
alibabacloud: |
||||
bucket: <bucket> |
||||
endpoint: <endpoint> |
||||
access_key_id: <access_key_id> |
||||
secret_access_key: <secret_access_key> |
||||
@ -0,0 +1,164 @@ |
||||
package alibaba |
||||
|
||||
import ( |
||||
"context" |
||||
"flag" |
||||
"io" |
||||
"net/http" |
||||
"strconv" |
||||
|
||||
"github.com/aliyun/aliyun-oss-go-sdk/oss" |
||||
"github.com/pkg/errors" |
||||
"github.com/prometheus/client_golang/prometheus" |
||||
"github.com/weaveworks/common/instrument" |
||||
|
||||
"github.com/grafana/loki/pkg/storage/chunk/client" |
||||
) |
||||
|
||||
const NoSuchKeyErr = "NoSuchKey" |
||||
|
||||
var ossRequestDuration = instrument.NewHistogramCollector(prometheus.NewHistogramVec(prometheus.HistogramOpts{ |
||||
Namespace: "loki", |
||||
Name: "oss_request_duration_seconds", |
||||
Help: "Time spent doing OSS requests.", |
||||
Buckets: prometheus.ExponentialBuckets(0.005, 4, 7), |
||||
}, []string{"operation", "status_code"})) |
||||
|
||||
func init() { |
||||
ossRequestDuration.Register() |
||||
} |
||||
|
||||
type OssObjectClient struct { |
||||
defaultBucket *oss.Bucket |
||||
} |
||||
|
||||
// OssConfig is config for the OSS Chunk Client.
|
||||
type OssConfig struct { |
||||
Bucket string `yaml:"bucket"` |
||||
Endpoint string `yaml:"endpoint"` |
||||
AccessKeyID string `yaml:"access_key_id"` |
||||
SecretAccessKey string `yaml:"secret_access_key"` |
||||
} |
||||
|
||||
// RegisterFlags registers flags.
|
||||
func (cfg *OssConfig) RegisterFlags(f *flag.FlagSet) { |
||||
cfg.RegisterFlagsWithPrefix("", f) |
||||
} |
||||
|
||||
// RegisterFlagsWithPrefix registers flags with prefix.
|
||||
func (cfg *OssConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { |
||||
f.StringVar(&cfg.Bucket, prefix+"oss.bucketname", "", "Name of OSS bucket.") |
||||
f.StringVar(&cfg.Endpoint, prefix+"oss.endpoint", "", "oss Endpoint to connect to.") |
||||
f.StringVar(&cfg.AccessKeyID, prefix+"oss.access-key-id", "", "alibabacloud Access Key ID") |
||||
f.StringVar(&cfg.SecretAccessKey, prefix+"oss.secret-access-key", "", "alibabacloud Secret Access Key") |
||||
} |
||||
|
||||
// NewOssObjectClient makes a new chunk.Client that writes chunks to OSS.
|
||||
func NewOssObjectClient(ctx context.Context, cfg OssConfig) (client.ObjectClient, error) { |
||||
client, err := oss.New(cfg.Endpoint, cfg.AccessKeyID, cfg.SecretAccessKey) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
bucket, err := client.Bucket(cfg.Bucket) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &OssObjectClient{ |
||||
defaultBucket: bucket, |
||||
}, nil |
||||
} |
||||
|
||||
func (s *OssObjectClient) Stop() { |
||||
} |
||||
|
||||
// GetObject returns a reader and the size for the specified object key from the configured OSS bucket.
|
||||
func (s *OssObjectClient) GetObject(ctx context.Context, objectKey string) (io.ReadCloser, int64, error) { |
||||
var resp *oss.GetObjectResult |
||||
var options []oss.Option |
||||
err := instrument.CollectedRequest(ctx, "OSS.GetObject", ossRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { |
||||
var requestErr error |
||||
resp, requestErr = s.defaultBucket.DoGetObject(&oss.GetObjectRequest{ObjectKey: objectKey}, options) |
||||
if requestErr != nil { |
||||
return requestErr |
||||
} |
||||
return nil |
||||
}) |
||||
if err != nil { |
||||
return nil, 0, err |
||||
} |
||||
length := resp.Response.Headers.Get("Content-Length") |
||||
size, err := strconv.Atoi(length) |
||||
if err != nil { |
||||
return nil, 0, err |
||||
} |
||||
return resp.Response.Body, int64(size), err |
||||
|
||||
} |
||||
|
||||
// PutObject puts the specified bytes into the configured OSS bucket at the provided key
|
||||
func (s *OssObjectClient) PutObject(ctx context.Context, objectKey string, object io.ReadSeeker) error { |
||||
return instrument.CollectedRequest(ctx, "OSS.PutObject", ossRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { |
||||
if err := s.defaultBucket.PutObject(objectKey, object); err != nil { |
||||
return errors.Wrap(err, "failed to put oss object") |
||||
} |
||||
return nil |
||||
}) |
||||
|
||||
} |
||||
|
||||
// List implements chunk.ObjectClient.
|
||||
func (s *OssObjectClient) List(ctx context.Context, prefix, delimiter string) ([]client.StorageObject, []client.StorageCommonPrefix, error) { |
||||
var storageObjects []client.StorageObject |
||||
var commonPrefixes []client.StorageCommonPrefix |
||||
marker := oss.Marker("") |
||||
for { |
||||
if ctx.Err() != nil { |
||||
return nil, nil, ctx.Err() |
||||
} |
||||
|
||||
objects, err := s.defaultBucket.ListObjects(oss.Prefix(prefix), oss.Delimiter(delimiter), marker) |
||||
if err != nil { |
||||
return nil, nil, errors.Wrap(err, "list alibaba oss bucket failed") |
||||
} |
||||
marker = oss.Marker(objects.NextMarker) |
||||
for _, object := range objects.Objects { |
||||
storageObjects = append(storageObjects, client.StorageObject{ |
||||
Key: object.Key, |
||||
ModifiedAt: object.LastModified, |
||||
}) |
||||
} |
||||
for _, object := range objects.CommonPrefixes { |
||||
if object != "" { |
||||
commonPrefixes = append(commonPrefixes, client.StorageCommonPrefix(object)) |
||||
} |
||||
} |
||||
if !objects.IsTruncated { |
||||
break |
||||
} |
||||
} |
||||
return storageObjects, commonPrefixes, nil |
||||
} |
||||
|
||||
// DeleteObject deletes the specified object key from the configured OSS bucket.
|
||||
func (s *OssObjectClient) DeleteObject(ctx context.Context, objectKey string) error { |
||||
return instrument.CollectedRequest(ctx, "OSS.DeleteObject", ossRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { |
||||
err := s.defaultBucket.DeleteObject(objectKey) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
return nil |
||||
}) |
||||
} |
||||
|
||||
// IsObjectNotFoundErr returns true if error means that object is not found. Relevant to GetObject and DeleteObject operations.
|
||||
func (s *OssObjectClient) IsObjectNotFoundErr(err error) bool { |
||||
switch caseErr := err.(type) { |
||||
case oss.ServiceError: |
||||
if caseErr.Code == NoSuchKeyErr && caseErr.StatusCode == http.StatusNotFound { |
||||
return true |
||||
} |
||||
return false |
||||
default: |
||||
return false |
||||
} |
||||
} |
||||
@ -0,0 +1,14 @@ |
||||
Copyright (c) 2015 aliyun.com |
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated |
||||
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the |
||||
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to |
||||
permit persons to whom the Software is furnished to do so, subject to the following conditions: |
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the |
||||
Software. |
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE |
||||
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR |
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
||||
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
||||
@ -0,0 +1,190 @@ |
||||
package oss |
||||
|
||||
import ( |
||||
"bytes" |
||||
"crypto/hmac" |
||||
"crypto/sha1" |
||||
"crypto/sha256" |
||||
"encoding/base64" |
||||
"fmt" |
||||
"hash" |
||||
"io" |
||||
"net/http" |
||||
"sort" |
||||
"strconv" |
||||
"strings" |
||||
) |
||||
|
||||
// headerSorter defines the key-value structure for storing the sorted data in signHeader.
|
||||
type headerSorter struct { |
||||
Keys []string |
||||
Vals []string |
||||
} |
||||
|
||||
// getAdditionalHeaderKeys get exist key in http header
|
||||
func (conn Conn) getAdditionalHeaderKeys(req *http.Request) ([]string, map[string]string) { |
||||
var keysList []string |
||||
keysMap := make(map[string]string) |
||||
srcKeys := make(map[string]string) |
||||
|
||||
for k := range req.Header { |
||||
srcKeys[strings.ToLower(k)] = "" |
||||
} |
||||
|
||||
for _, v := range conn.config.AdditionalHeaders { |
||||
if _, ok := srcKeys[strings.ToLower(v)]; ok { |
||||
keysMap[strings.ToLower(v)] = "" |
||||
} |
||||
} |
||||
|
||||
for k := range keysMap { |
||||
keysList = append(keysList, k) |
||||
} |
||||
sort.Strings(keysList) |
||||
return keysList, keysMap |
||||
} |
||||
|
||||
// signHeader signs the header and sets it as the authorization header.
|
||||
func (conn Conn) signHeader(req *http.Request, canonicalizedResource string) { |
||||
akIf := conn.config.GetCredentials() |
||||
authorizationStr := "" |
||||
if conn.config.AuthVersion == AuthV2 { |
||||
additionalList, _ := conn.getAdditionalHeaderKeys(req) |
||||
if len(additionalList) > 0 { |
||||
authorizationFmt := "OSS2 AccessKeyId:%v,AdditionalHeaders:%v,Signature:%v" |
||||
additionnalHeadersStr := strings.Join(additionalList, ";") |
||||
authorizationStr = fmt.Sprintf(authorizationFmt, akIf.GetAccessKeyID(), additionnalHeadersStr, conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret())) |
||||
} else { |
||||
authorizationFmt := "OSS2 AccessKeyId:%v,Signature:%v" |
||||
authorizationStr = fmt.Sprintf(authorizationFmt, akIf.GetAccessKeyID(), conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret())) |
||||
} |
||||
} else { |
||||
// Get the final authorization string
|
||||
authorizationStr = "OSS " + akIf.GetAccessKeyID() + ":" + conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret()) |
||||
} |
||||
|
||||
// Give the parameter "Authorization" value
|
||||
req.Header.Set(HTTPHeaderAuthorization, authorizationStr) |
||||
} |
||||
|
||||
func (conn Conn) getSignedStr(req *http.Request, canonicalizedResource string, keySecret string) string { |
||||
// Find out the "x-oss-"'s address in header of the request
|
||||
ossHeadersMap := make(map[string]string) |
||||
additionalList, additionalMap := conn.getAdditionalHeaderKeys(req) |
||||
for k, v := range req.Header { |
||||
if strings.HasPrefix(strings.ToLower(k), "x-oss-") { |
||||
ossHeadersMap[strings.ToLower(k)] = v[0] |
||||
} else if conn.config.AuthVersion == AuthV2 { |
||||
if _, ok := additionalMap[strings.ToLower(k)]; ok { |
||||
ossHeadersMap[strings.ToLower(k)] = v[0] |
||||
} |
||||
} |
||||
} |
||||
hs := newHeaderSorter(ossHeadersMap) |
||||
|
||||
// Sort the ossHeadersMap by the ascending order
|
||||
hs.Sort() |
||||
|
||||
// Get the canonicalizedOSSHeaders
|
||||
canonicalizedOSSHeaders := "" |
||||
for i := range hs.Keys { |
||||
canonicalizedOSSHeaders += hs.Keys[i] + ":" + hs.Vals[i] + "\n" |
||||
} |
||||
|
||||
// Give other parameters values
|
||||
// when sign URL, date is expires
|
||||
date := req.Header.Get(HTTPHeaderDate) |
||||
contentType := req.Header.Get(HTTPHeaderContentType) |
||||
contentMd5 := req.Header.Get(HTTPHeaderContentMD5) |
||||
|
||||
// default is v1 signature
|
||||
signStr := req.Method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedOSSHeaders + canonicalizedResource |
||||
h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(keySecret)) |
||||
|
||||
// v2 signature
|
||||
if conn.config.AuthVersion == AuthV2 { |
||||
signStr = req.Method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedOSSHeaders + strings.Join(additionalList, ";") + "\n" + canonicalizedResource |
||||
h = hmac.New(func() hash.Hash { return sha256.New() }, []byte(keySecret)) |
||||
} |
||||
|
||||
// convert sign to log for easy to view
|
||||
if conn.config.LogLevel >= Debug { |
||||
var signBuf bytes.Buffer |
||||
for i := 0; i < len(signStr); i++ { |
||||
if signStr[i] != '\n' { |
||||
signBuf.WriteByte(signStr[i]) |
||||
} else { |
||||
signBuf.WriteString("\\n") |
||||
} |
||||
} |
||||
conn.config.WriteLog(Debug, "[Req:%p]signStr:%s\n", req, signBuf.String()) |
||||
} |
||||
|
||||
io.WriteString(h, signStr) |
||||
signedStr := base64.StdEncoding.EncodeToString(h.Sum(nil)) |
||||
|
||||
return signedStr |
||||
} |
||||
|
||||
func (conn Conn) getRtmpSignedStr(bucketName, channelName, playlistName string, expiration int64, keySecret string, params map[string]interface{}) string { |
||||
if params[HTTPParamAccessKeyID] == nil { |
||||
return "" |
||||
} |
||||
|
||||
canonResource := fmt.Sprintf("/%s/%s", bucketName, channelName) |
||||
canonParamsKeys := []string{} |
||||
for key := range params { |
||||
if key != HTTPParamAccessKeyID && key != HTTPParamSignature && key != HTTPParamExpires && key != HTTPParamSecurityToken { |
||||
canonParamsKeys = append(canonParamsKeys, key) |
||||
} |
||||
} |
||||
|
||||
sort.Strings(canonParamsKeys) |
||||
canonParamsStr := "" |
||||
for _, key := range canonParamsKeys { |
||||
canonParamsStr = fmt.Sprintf("%s%s:%s\n", canonParamsStr, key, params[key].(string)) |
||||
} |
||||
|
||||
expireStr := strconv.FormatInt(expiration, 10) |
||||
signStr := expireStr + "\n" + canonParamsStr + canonResource |
||||
|
||||
h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(keySecret)) |
||||
io.WriteString(h, signStr) |
||||
signedStr := base64.StdEncoding.EncodeToString(h.Sum(nil)) |
||||
return signedStr |
||||
} |
||||
|
||||
// newHeaderSorter is an additional function for function SignHeader.
|
||||
func newHeaderSorter(m map[string]string) *headerSorter { |
||||
hs := &headerSorter{ |
||||
Keys: make([]string, 0, len(m)), |
||||
Vals: make([]string, 0, len(m)), |
||||
} |
||||
|
||||
for k, v := range m { |
||||
hs.Keys = append(hs.Keys, k) |
||||
hs.Vals = append(hs.Vals, v) |
||||
} |
||||
return hs |
||||
} |
||||
|
||||
// Sort is an additional function for function SignHeader.
|
||||
func (hs *headerSorter) Sort() { |
||||
sort.Sort(hs) |
||||
} |
||||
|
||||
// Len is an additional function for function SignHeader.
|
||||
func (hs *headerSorter) Len() int { |
||||
return len(hs.Vals) |
||||
} |
||||
|
||||
// Less is an additional function for function SignHeader.
|
||||
func (hs *headerSorter) Less(i, j int) bool { |
||||
return bytes.Compare([]byte(hs.Keys[i]), []byte(hs.Keys[j])) < 0 |
||||
} |
||||
|
||||
// Swap is an additional function for function SignHeader.
|
||||
func (hs *headerSorter) Swap(i, j int) { |
||||
hs.Vals[i], hs.Vals[j] = hs.Vals[j], hs.Vals[i] |
||||
hs.Keys[i], hs.Keys[j] = hs.Keys[j], hs.Keys[i] |
||||
} |
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,207 @@ |
||||
package oss |
||||
|
||||
import ( |
||||
"bytes" |
||||
"fmt" |
||||
"log" |
||||
"net" |
||||
"os" |
||||
"time" |
||||
) |
||||
|
||||
// Define the level of the output log
|
||||
const ( |
||||
LogOff = iota |
||||
Error |
||||
Warn |
||||
Info |
||||
Debug |
||||
) |
||||
|
||||
// LogTag Tag for each level of log
|
||||
var LogTag = []string{"[error]", "[warn]", "[info]", "[debug]"} |
||||
|
||||
// HTTPTimeout defines HTTP timeout.
|
||||
type HTTPTimeout struct { |
||||
ConnectTimeout time.Duration |
||||
ReadWriteTimeout time.Duration |
||||
HeaderTimeout time.Duration |
||||
LongTimeout time.Duration |
||||
IdleConnTimeout time.Duration |
||||
} |
||||
|
||||
// HTTPMaxConns defines max idle connections and max idle connections per host
|
||||
type HTTPMaxConns struct { |
||||
MaxIdleConns int |
||||
MaxIdleConnsPerHost int |
||||
} |
||||
|
||||
// CredentialInf is interface for get AccessKeyID,AccessKeySecret,SecurityToken
|
||||
type Credentials interface { |
||||
GetAccessKeyID() string |
||||
GetAccessKeySecret() string |
||||
GetSecurityToken() string |
||||
} |
||||
|
||||
// CredentialInfBuild is interface for get CredentialInf
|
||||
type CredentialsProvider interface { |
||||
GetCredentials() Credentials |
||||
} |
||||
|
||||
type defaultCredentials struct { |
||||
config *Config |
||||
} |
||||
|
||||
func (defCre *defaultCredentials) GetAccessKeyID() string { |
||||
return defCre.config.AccessKeyID |
||||
} |
||||
|
||||
func (defCre *defaultCredentials) GetAccessKeySecret() string { |
||||
return defCre.config.AccessKeySecret |
||||
} |
||||
|
||||
func (defCre *defaultCredentials) GetSecurityToken() string { |
||||
return defCre.config.SecurityToken |
||||
} |
||||
|
||||
type defaultCredentialsProvider struct { |
||||
config *Config |
||||
} |
||||
|
||||
func (defBuild *defaultCredentialsProvider) GetCredentials() Credentials { |
||||
return &defaultCredentials{config: defBuild.config} |
||||
} |
||||
|
||||
// Config defines oss configuration
|
||||
type Config struct { |
||||
Endpoint string // OSS endpoint
|
||||
AccessKeyID string // AccessId
|
||||
AccessKeySecret string // AccessKey
|
||||
RetryTimes uint // Retry count by default it's 5.
|
||||
UserAgent string // SDK name/version/system information
|
||||
IsDebug bool // Enable debug mode. Default is false.
|
||||
Timeout uint // Timeout in seconds. By default it's 60.
|
||||
SecurityToken string // STS Token
|
||||
IsCname bool // If cname is in the endpoint.
|
||||
HTTPTimeout HTTPTimeout // HTTP timeout
|
||||
HTTPMaxConns HTTPMaxConns // Http max connections
|
||||
IsUseProxy bool // Flag of using proxy.
|
||||
ProxyHost string // Flag of using proxy host.
|
||||
IsAuthProxy bool // Flag of needing authentication.
|
||||
ProxyUser string // Proxy user
|
||||
ProxyPassword string // Proxy password
|
||||
IsEnableMD5 bool // Flag of enabling MD5 for upload.
|
||||
MD5Threshold int64 // Memory footprint threshold for each MD5 computation (16MB is the default), in byte. When the data is more than that, temp file is used.
|
||||
IsEnableCRC bool // Flag of enabling CRC for upload.
|
||||
LogLevel int // Log level
|
||||
Logger *log.Logger // For write log
|
||||
UploadLimitSpeed int // Upload limit speed:KB/s, 0 is unlimited
|
||||
UploadLimiter *OssLimiter // Bandwidth limit reader for upload
|
||||
DownloadLimitSpeed int // Download limit speed:KB/s, 0 is unlimited
|
||||
DownloadLimiter *OssLimiter // Bandwidth limit reader for download
|
||||
CredentialsProvider CredentialsProvider // User provides interface to get AccessKeyID, AccessKeySecret, SecurityToken
|
||||
LocalAddr net.Addr // local client host info
|
||||
UserSetUa bool // UserAgent is set by user or not
|
||||
AuthVersion AuthVersionType // v1 or v2 signature,default is v1
|
||||
AdditionalHeaders []string // special http headers needed to be sign
|
||||
RedirectEnabled bool // only effective from go1.7 onward, enable http redirect or not
|
||||
InsecureSkipVerify bool // for https, Whether to skip verifying the server certificate file
|
||||
} |
||||
|
||||
// LimitUploadSpeed uploadSpeed:KB/s, 0 is unlimited,default is 0
|
||||
func (config *Config) LimitUploadSpeed(uploadSpeed int) error { |
||||
if uploadSpeed < 0 { |
||||
return fmt.Errorf("invalid argument, the value of uploadSpeed is less than 0") |
||||
} else if uploadSpeed == 0 { |
||||
config.UploadLimitSpeed = 0 |
||||
config.UploadLimiter = nil |
||||
return nil |
||||
} |
||||
|
||||
var err error |
||||
config.UploadLimiter, err = GetOssLimiter(uploadSpeed) |
||||
if err == nil { |
||||
config.UploadLimitSpeed = uploadSpeed |
||||
} |
||||
return err |
||||
} |
||||
|
||||
// LimitDownLoadSpeed downloadSpeed:KB/s, 0 is unlimited,default is 0
|
||||
func (config *Config) LimitDownloadSpeed(downloadSpeed int) error { |
||||
if downloadSpeed < 0 { |
||||
return fmt.Errorf("invalid argument, the value of downloadSpeed is less than 0") |
||||
} else if downloadSpeed == 0 { |
||||
config.DownloadLimitSpeed = 0 |
||||
config.DownloadLimiter = nil |
||||
return nil |
||||
} |
||||
|
||||
var err error |
||||
config.DownloadLimiter, err = GetOssLimiter(downloadSpeed) |
||||
if err == nil { |
||||
config.DownloadLimitSpeed = downloadSpeed |
||||
} |
||||
return err |
||||
} |
||||
|
||||
// WriteLog output log function
|
||||
func (config *Config) WriteLog(LogLevel int, format string, a ...interface{}) { |
||||
if config.LogLevel < LogLevel || config.Logger == nil { |
||||
return |
||||
} |
||||
|
||||
var logBuffer bytes.Buffer |
||||
logBuffer.WriteString(LogTag[LogLevel-1]) |
||||
logBuffer.WriteString(fmt.Sprintf(format, a...)) |
||||
config.Logger.Printf("%s", logBuffer.String()) |
||||
} |
||||
|
||||
// for get Credentials
|
||||
func (config *Config) GetCredentials() Credentials { |
||||
return config.CredentialsProvider.GetCredentials() |
||||
} |
||||
|
||||
// getDefaultOssConfig gets the default configuration.
|
||||
func getDefaultOssConfig() *Config { |
||||
config := Config{} |
||||
|
||||
config.Endpoint = "" |
||||
config.AccessKeyID = "" |
||||
config.AccessKeySecret = "" |
||||
config.RetryTimes = 5 |
||||
config.IsDebug = false |
||||
config.UserAgent = userAgent() |
||||
config.Timeout = 60 // Seconds
|
||||
config.SecurityToken = "" |
||||
config.IsCname = false |
||||
|
||||
config.HTTPTimeout.ConnectTimeout = time.Second * 30 // 30s
|
||||
config.HTTPTimeout.ReadWriteTimeout = time.Second * 60 // 60s
|
||||
config.HTTPTimeout.HeaderTimeout = time.Second * 60 // 60s
|
||||
config.HTTPTimeout.LongTimeout = time.Second * 300 // 300s
|
||||
config.HTTPTimeout.IdleConnTimeout = time.Second * 50 // 50s
|
||||
config.HTTPMaxConns.MaxIdleConns = 100 |
||||
config.HTTPMaxConns.MaxIdleConnsPerHost = 100 |
||||
|
||||
config.IsUseProxy = false |
||||
config.ProxyHost = "" |
||||
config.IsAuthProxy = false |
||||
config.ProxyUser = "" |
||||
config.ProxyPassword = "" |
||||
|
||||
config.MD5Threshold = 16 * 1024 * 1024 // 16MB
|
||||
config.IsEnableMD5 = false |
||||
config.IsEnableCRC = true |
||||
|
||||
config.LogLevel = LogOff |
||||
config.Logger = log.New(os.Stdout, "", log.LstdFlags) |
||||
|
||||
provider := &defaultCredentialsProvider{config: &config} |
||||
config.CredentialsProvider = provider |
||||
|
||||
config.AuthVersion = AuthV1 |
||||
config.RedirectEnabled = true |
||||
config.InsecureSkipVerify = false |
||||
|
||||
return &config |
||||
} |
||||
@ -0,0 +1,852 @@ |
||||
package oss |
||||
|
||||
import ( |
||||
"bytes" |
||||
"crypto/md5" |
||||
"encoding/base64" |
||||
"encoding/json" |
||||
"encoding/xml" |
||||
"fmt" |
||||
"hash" |
||||
"io" |
||||
"io/ioutil" |
||||
"net" |
||||
"net/http" |
||||
"net/url" |
||||
"os" |
||||
"sort" |
||||
"strconv" |
||||
"strings" |
||||
"time" |
||||
) |
||||
|
||||
// Conn defines OSS Conn
|
||||
type Conn struct { |
||||
config *Config |
||||
url *urlMaker |
||||
client *http.Client |
||||
} |
||||
|
||||
var signKeyList = []string{"acl", "uploads", "location", "cors", |
||||
"logging", "website", "referer", "lifecycle", |
||||
"delete", "append", "tagging", "objectMeta", |
||||
"uploadId", "partNumber", "security-token", |
||||
"position", "img", "style", "styleName", |
||||
"replication", "replicationProgress", |
||||
"replicationLocation", "cname", "bucketInfo", |
||||
"comp", "qos", "live", "status", "vod", |
||||
"startTime", "endTime", "symlink", |
||||
"x-oss-process", "response-content-type", "x-oss-traffic-limit", |
||||
"response-content-language", "response-expires", |
||||
"response-cache-control", "response-content-disposition", |
||||
"response-content-encoding", "udf", "udfName", "udfImage", |
||||
"udfId", "udfImageDesc", "udfApplication", "comp", |
||||
"udfApplicationLog", "restore", "callback", "callback-var", "qosInfo", |
||||
"policy", "stat", "encryption", "versions", "versioning", "versionId", "requestPayment", |
||||
"x-oss-request-payer", "sequential", |
||||
"inventory", "inventoryId", "continuation-token", "asyncFetch", |
||||
"worm", "wormId", "wormExtend", "withHashContext", |
||||
"x-oss-enable-md5", "x-oss-enable-sha1", "x-oss-enable-sha256", |
||||
"x-oss-hash-ctx", "x-oss-md5-ctx", "transferAcceleration", |
||||
"regionList", |
||||
} |
||||
|
||||
// init initializes Conn
|
||||
func (conn *Conn) init(config *Config, urlMaker *urlMaker, client *http.Client) error { |
||||
if client == nil { |
||||
// New transport
|
||||
transport := newTransport(conn, config) |
||||
|
||||
// Proxy
|
||||
if conn.config.IsUseProxy { |
||||
proxyURL, err := url.Parse(config.ProxyHost) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
if config.IsAuthProxy { |
||||
if config.ProxyPassword != "" { |
||||
proxyURL.User = url.UserPassword(config.ProxyUser, config.ProxyPassword) |
||||
} else { |
||||
proxyURL.User = url.User(config.ProxyUser) |
||||
} |
||||
} |
||||
transport.Proxy = http.ProxyURL(proxyURL) |
||||
} |
||||
client = &http.Client{Transport: transport} |
||||
if !config.RedirectEnabled { |
||||
disableHTTPRedirect(client) |
||||
} |
||||
} |
||||
|
||||
conn.config = config |
||||
conn.url = urlMaker |
||||
conn.client = client |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// Do sends request and returns the response
|
||||
func (conn Conn) Do(method, bucketName, objectName string, params map[string]interface{}, headers map[string]string, |
||||
data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) { |
||||
urlParams := conn.getURLParams(params) |
||||
subResource := conn.getSubResource(params) |
||||
uri := conn.url.getURL(bucketName, objectName, urlParams) |
||||
resource := conn.getResource(bucketName, objectName, subResource) |
||||
return conn.doRequest(method, uri, resource, headers, data, initCRC, listener) |
||||
} |
||||
|
||||
// DoURL sends the request with signed URL and returns the response result.
|
||||
func (conn Conn) DoURL(method HTTPMethod, signedURL string, headers map[string]string, |
||||
data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) { |
||||
// Get URI from signedURL
|
||||
uri, err := url.ParseRequestURI(signedURL) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
m := strings.ToUpper(string(method)) |
||||
req := &http.Request{ |
||||
Method: m, |
||||
URL: uri, |
||||
Proto: "HTTP/1.1", |
||||
ProtoMajor: 1, |
||||
ProtoMinor: 1, |
||||
Header: make(http.Header), |
||||
Host: uri.Host, |
||||
} |
||||
|
||||
tracker := &readerTracker{completedBytes: 0} |
||||
fd, crc := conn.handleBody(req, data, initCRC, listener, tracker) |
||||
if fd != nil { |
||||
defer func() { |
||||
fd.Close() |
||||
os.Remove(fd.Name()) |
||||
}() |
||||
} |
||||
|
||||
if conn.config.IsAuthProxy { |
||||
auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword |
||||
basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth)) |
||||
req.Header.Set("Proxy-Authorization", basic) |
||||
} |
||||
|
||||
req.Header.Set(HTTPHeaderHost, req.Host) |
||||
req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent) |
||||
|
||||
if headers != nil { |
||||
for k, v := range headers { |
||||
req.Header.Set(k, v) |
||||
} |
||||
} |
||||
|
||||
// Transfer started
|
||||
event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength, 0) |
||||
publishProgress(listener, event) |
||||
|
||||
if conn.config.LogLevel >= Debug { |
||||
conn.LoggerHTTPReq(req) |
||||
} |
||||
|
||||
resp, err := conn.client.Do(req) |
||||
if err != nil { |
||||
// Transfer failed
|
||||
event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength, 0) |
||||
publishProgress(listener, event) |
||||
conn.config.WriteLog(Debug, "[Resp:%p]http error:%s\n", req, err.Error()) |
||||
return nil, err |
||||
} |
||||
|
||||
if conn.config.LogLevel >= Debug { |
||||
//print out http resp
|
||||
conn.LoggerHTTPResp(req, resp) |
||||
} |
||||
|
||||
// Transfer completed
|
||||
event = newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength, 0) |
||||
publishProgress(listener, event) |
||||
|
||||
return conn.handleResponse(resp, crc) |
||||
} |
||||
|
||||
func (conn Conn) getURLParams(params map[string]interface{}) string { |
||||
// Sort
|
||||
keys := make([]string, 0, len(params)) |
||||
for k := range params { |
||||
keys = append(keys, k) |
||||
} |
||||
sort.Strings(keys) |
||||
|
||||
// Serialize
|
||||
var buf bytes.Buffer |
||||
for _, k := range keys { |
||||
if buf.Len() > 0 { |
||||
buf.WriteByte('&') |
||||
} |
||||
buf.WriteString(url.QueryEscape(k)) |
||||
if params[k] != nil && params[k].(string) != "" { |
||||
buf.WriteString("=" + strings.Replace(url.QueryEscape(params[k].(string)), "+", "%20", -1)) |
||||
} |
||||
} |
||||
|
||||
return buf.String() |
||||
} |
||||
|
||||
func (conn Conn) getSubResource(params map[string]interface{}) string { |
||||
// Sort
|
||||
keys := make([]string, 0, len(params)) |
||||
signParams := make(map[string]string) |
||||
for k := range params { |
||||
if conn.config.AuthVersion == AuthV2 { |
||||
encodedKey := url.QueryEscape(k) |
||||
keys = append(keys, encodedKey) |
||||
if params[k] != nil && params[k] != "" { |
||||
signParams[encodedKey] = strings.Replace(url.QueryEscape(params[k].(string)), "+", "%20", -1) |
||||
} |
||||
} else if conn.isParamSign(k) { |
||||
keys = append(keys, k) |
||||
if params[k] != nil { |
||||
signParams[k] = params[k].(string) |
||||
} |
||||
} |
||||
} |
||||
sort.Strings(keys) |
||||
|
||||
// Serialize
|
||||
var buf bytes.Buffer |
||||
for _, k := range keys { |
||||
if buf.Len() > 0 { |
||||
buf.WriteByte('&') |
||||
} |
||||
buf.WriteString(k) |
||||
if _, ok := signParams[k]; ok { |
||||
if signParams[k] != "" { |
||||
buf.WriteString("=" + signParams[k]) |
||||
} |
||||
} |
||||
} |
||||
return buf.String() |
||||
} |
||||
|
||||
func (conn Conn) isParamSign(paramKey string) bool { |
||||
for _, k := range signKeyList { |
||||
if paramKey == k { |
||||
return true |
||||
} |
||||
} |
||||
return false |
||||
} |
||||
|
||||
// getResource gets canonicalized resource
|
||||
func (conn Conn) getResource(bucketName, objectName, subResource string) string { |
||||
if subResource != "" { |
||||
subResource = "?" + subResource |
||||
} |
||||
if bucketName == "" { |
||||
if conn.config.AuthVersion == AuthV2 { |
||||
return url.QueryEscape("/") + subResource |
||||
} |
||||
return fmt.Sprintf("/%s%s", bucketName, subResource) |
||||
} |
||||
if conn.config.AuthVersion == AuthV2 { |
||||
return url.QueryEscape("/"+bucketName+"/") + strings.Replace(url.QueryEscape(objectName), "+", "%20", -1) + subResource |
||||
} |
||||
return fmt.Sprintf("/%s/%s%s", bucketName, objectName, subResource) |
||||
} |
||||
|
||||
func (conn Conn) doRequest(method string, uri *url.URL, canonicalizedResource string, headers map[string]string, |
||||
data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) { |
||||
method = strings.ToUpper(method) |
||||
req := &http.Request{ |
||||
Method: method, |
||||
URL: uri, |
||||
Proto: "HTTP/1.1", |
||||
ProtoMajor: 1, |
||||
ProtoMinor: 1, |
||||
Header: make(http.Header), |
||||
Host: uri.Host, |
||||
} |
||||
|
||||
tracker := &readerTracker{completedBytes: 0} |
||||
fd, crc := conn.handleBody(req, data, initCRC, listener, tracker) |
||||
if fd != nil { |
||||
defer func() { |
||||
fd.Close() |
||||
os.Remove(fd.Name()) |
||||
}() |
||||
} |
||||
|
||||
if conn.config.IsAuthProxy { |
||||
auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword |
||||
basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth)) |
||||
req.Header.Set("Proxy-Authorization", basic) |
||||
} |
||||
|
||||
date := time.Now().UTC().Format(http.TimeFormat) |
||||
req.Header.Set(HTTPHeaderDate, date) |
||||
req.Header.Set(HTTPHeaderHost, req.Host) |
||||
req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent) |
||||
|
||||
akIf := conn.config.GetCredentials() |
||||
if akIf.GetSecurityToken() != "" { |
||||
req.Header.Set(HTTPHeaderOssSecurityToken, akIf.GetSecurityToken()) |
||||
} |
||||
|
||||
if headers != nil { |
||||
for k, v := range headers { |
||||
req.Header.Set(k, v) |
||||
} |
||||
} |
||||
|
||||
conn.signHeader(req, canonicalizedResource) |
||||
|
||||
// Transfer started
|
||||
event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength, 0) |
||||
publishProgress(listener, event) |
||||
|
||||
if conn.config.LogLevel >= Debug { |
||||
conn.LoggerHTTPReq(req) |
||||
} |
||||
|
||||
resp, err := conn.client.Do(req) |
||||
|
||||
if err != nil { |
||||
// Transfer failed
|
||||
event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength, 0) |
||||
publishProgress(listener, event) |
||||
conn.config.WriteLog(Debug, "[Resp:%p]http error:%s\n", req, err.Error()) |
||||
return nil, err |
||||
} |
||||
|
||||
if conn.config.LogLevel >= Debug { |
||||
//print out http resp
|
||||
conn.LoggerHTTPResp(req, resp) |
||||
} |
||||
|
||||
// Transfer completed
|
||||
event = newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength, 0) |
||||
publishProgress(listener, event) |
||||
|
||||
return conn.handleResponse(resp, crc) |
||||
} |
||||
|
||||
func (conn Conn) signURL(method HTTPMethod, bucketName, objectName string, expiration int64, params map[string]interface{}, headers map[string]string) string { |
||||
akIf := conn.config.GetCredentials() |
||||
if akIf.GetSecurityToken() != "" { |
||||
params[HTTPParamSecurityToken] = akIf.GetSecurityToken() |
||||
} |
||||
|
||||
m := strings.ToUpper(string(method)) |
||||
req := &http.Request{ |
||||
Method: m, |
||||
Header: make(http.Header), |
||||
} |
||||
|
||||
if conn.config.IsAuthProxy { |
||||
auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword |
||||
basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth)) |
||||
req.Header.Set("Proxy-Authorization", basic) |
||||
} |
||||
|
||||
req.Header.Set(HTTPHeaderDate, strconv.FormatInt(expiration, 10)) |
||||
req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent) |
||||
|
||||
if headers != nil { |
||||
for k, v := range headers { |
||||
req.Header.Set(k, v) |
||||
} |
||||
} |
||||
|
||||
if conn.config.AuthVersion == AuthV2 { |
||||
params[HTTPParamSignatureVersion] = "OSS2" |
||||
params[HTTPParamExpiresV2] = strconv.FormatInt(expiration, 10) |
||||
params[HTTPParamAccessKeyIDV2] = conn.config.AccessKeyID |
||||
additionalList, _ := conn.getAdditionalHeaderKeys(req) |
||||
if len(additionalList) > 0 { |
||||
params[HTTPParamAdditionalHeadersV2] = strings.Join(additionalList, ";") |
||||
} |
||||
} |
||||
|
||||
subResource := conn.getSubResource(params) |
||||
canonicalizedResource := conn.getResource(bucketName, objectName, subResource) |
||||
signedStr := conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret()) |
||||
|
||||
if conn.config.AuthVersion == AuthV1 { |
||||
params[HTTPParamExpires] = strconv.FormatInt(expiration, 10) |
||||
params[HTTPParamAccessKeyID] = akIf.GetAccessKeyID() |
||||
params[HTTPParamSignature] = signedStr |
||||
} else if conn.config.AuthVersion == AuthV2 { |
||||
params[HTTPParamSignatureV2] = signedStr |
||||
} |
||||
urlParams := conn.getURLParams(params) |
||||
return conn.url.getSignURL(bucketName, objectName, urlParams) |
||||
} |
||||
|
||||
func (conn Conn) signRtmpURL(bucketName, channelName, playlistName string, expiration int64) string { |
||||
params := map[string]interface{}{} |
||||
if playlistName != "" { |
||||
params[HTTPParamPlaylistName] = playlistName |
||||
} |
||||
expireStr := strconv.FormatInt(expiration, 10) |
||||
params[HTTPParamExpires] = expireStr |
||||
|
||||
akIf := conn.config.GetCredentials() |
||||
if akIf.GetAccessKeyID() != "" { |
||||
params[HTTPParamAccessKeyID] = akIf.GetAccessKeyID() |
||||
if akIf.GetSecurityToken() != "" { |
||||
params[HTTPParamSecurityToken] = akIf.GetSecurityToken() |
||||
} |
||||
signedStr := conn.getRtmpSignedStr(bucketName, channelName, playlistName, expiration, akIf.GetAccessKeySecret(), params) |
||||
params[HTTPParamSignature] = signedStr |
||||
} |
||||
|
||||
urlParams := conn.getURLParams(params) |
||||
return conn.url.getSignRtmpURL(bucketName, channelName, urlParams) |
||||
} |
||||
|
||||
// handleBody handles request body
|
||||
func (conn Conn) handleBody(req *http.Request, body io.Reader, initCRC uint64, |
||||
listener ProgressListener, tracker *readerTracker) (*os.File, hash.Hash64) { |
||||
var file *os.File |
||||
var crc hash.Hash64 |
||||
reader := body |
||||
readerLen, err := GetReaderLen(reader) |
||||
if err == nil { |
||||
req.ContentLength = readerLen |
||||
} |
||||
req.Header.Set(HTTPHeaderContentLength, strconv.FormatInt(req.ContentLength, 10)) |
||||
|
||||
// MD5
|
||||
if body != nil && conn.config.IsEnableMD5 && req.Header.Get(HTTPHeaderContentMD5) == "" { |
||||
md5 := "" |
||||
reader, md5, file, _ = calcMD5(body, req.ContentLength, conn.config.MD5Threshold) |
||||
req.Header.Set(HTTPHeaderContentMD5, md5) |
||||
} |
||||
|
||||
// CRC
|
||||
if reader != nil && conn.config.IsEnableCRC { |
||||
crc = NewCRC(CrcTable(), initCRC) |
||||
reader = TeeReader(reader, crc, req.ContentLength, listener, tracker) |
||||
} |
||||
|
||||
// HTTP body
|
||||
rc, ok := reader.(io.ReadCloser) |
||||
if !ok && reader != nil { |
||||
rc = ioutil.NopCloser(reader) |
||||
} |
||||
|
||||
if conn.isUploadLimitReq(req) { |
||||
limitReader := &LimitSpeedReader{ |
||||
reader: rc, |
||||
ossLimiter: conn.config.UploadLimiter, |
||||
} |
||||
req.Body = limitReader |
||||
} else { |
||||
req.Body = rc |
||||
} |
||||
return file, crc |
||||
} |
||||
|
||||
// isUploadLimitReq: judge limit upload speed or not
|
||||
func (conn Conn) isUploadLimitReq(req *http.Request) bool { |
||||
if conn.config.UploadLimitSpeed == 0 || conn.config.UploadLimiter == nil { |
||||
return false |
||||
} |
||||
|
||||
if req.Method != "GET" && req.Method != "DELETE" && req.Method != "HEAD" { |
||||
if req.ContentLength > 0 { |
||||
return true |
||||
} |
||||
} |
||||
return false |
||||
} |
||||
|
||||
func tryGetFileSize(f *os.File) int64 { |
||||
fInfo, _ := f.Stat() |
||||
return fInfo.Size() |
||||
} |
||||
|
||||
// handleResponse handles response
|
||||
func (conn Conn) handleResponse(resp *http.Response, crc hash.Hash64) (*Response, error) { |
||||
var cliCRC uint64 |
||||
var srvCRC uint64 |
||||
|
||||
statusCode := resp.StatusCode |
||||
if statusCode/100 != 2 { |
||||
if statusCode >= 400 && statusCode <= 505 { |
||||
// 4xx and 5xx indicate that the operation has error occurred
|
||||
var respBody []byte |
||||
respBody, err := readResponseBody(resp) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
if len(respBody) == 0 { |
||||
err = ServiceError{ |
||||
StatusCode: statusCode, |
||||
RequestID: resp.Header.Get(HTTPHeaderOssRequestID), |
||||
} |
||||
} else { |
||||
// Response contains storage service error object, unmarshal
|
||||
srvErr, errIn := serviceErrFromXML(respBody, resp.StatusCode, |
||||
resp.Header.Get(HTTPHeaderOssRequestID)) |
||||
if errIn != nil { // error unmarshaling the error response
|
||||
err = fmt.Errorf("oss: service returned invalid response body, status = %s, RequestId = %s", resp.Status, resp.Header.Get(HTTPHeaderOssRequestID)) |
||||
} else { |
||||
err = srvErr |
||||
} |
||||
} |
||||
|
||||
return &Response{ |
||||
StatusCode: resp.StatusCode, |
||||
Headers: resp.Header, |
||||
Body: ioutil.NopCloser(bytes.NewReader(respBody)), // restore the body
|
||||
}, err |
||||
} else if statusCode >= 300 && statusCode <= 307 { |
||||
// OSS use 3xx, but response has no body
|
||||
err := fmt.Errorf("oss: service returned %d,%s", resp.StatusCode, resp.Status) |
||||
return &Response{ |
||||
StatusCode: resp.StatusCode, |
||||
Headers: resp.Header, |
||||
Body: resp.Body, |
||||
}, err |
||||
} else { |
||||
// (0,300) [308,400) [506,)
|
||||
// Other extended http StatusCode
|
||||
var respBody []byte |
||||
respBody, err := readResponseBody(resp) |
||||
if err != nil { |
||||
return &Response{StatusCode: resp.StatusCode, Headers: resp.Header, Body: ioutil.NopCloser(bytes.NewReader(respBody))}, err |
||||
} |
||||
|
||||
if len(respBody) == 0 { |
||||
err = ServiceError{ |
||||
StatusCode: statusCode, |
||||
RequestID: resp.Header.Get(HTTPHeaderOssRequestID), |
||||
} |
||||
} else { |
||||
// Response contains storage service error object, unmarshal
|
||||
srvErr, errIn := serviceErrFromXML(respBody, resp.StatusCode, |
||||
resp.Header.Get(HTTPHeaderOssRequestID)) |
||||
if errIn != nil { // error unmarshaling the error response
|
||||
err = fmt.Errorf("unkown response body, status = %s, RequestId = %s", resp.Status, resp.Header.Get(HTTPHeaderOssRequestID)) |
||||
} else { |
||||
err = srvErr |
||||
} |
||||
} |
||||
|
||||
return &Response{ |
||||
StatusCode: resp.StatusCode, |
||||
Headers: resp.Header, |
||||
Body: ioutil.NopCloser(bytes.NewReader(respBody)), // restore the body
|
||||
}, err |
||||
} |
||||
} else { |
||||
if conn.config.IsEnableCRC && crc != nil { |
||||
cliCRC = crc.Sum64() |
||||
} |
||||
srvCRC, _ = strconv.ParseUint(resp.Header.Get(HTTPHeaderOssCRC64), 10, 64) |
||||
|
||||
realBody := resp.Body |
||||
if conn.isDownloadLimitResponse(resp) { |
||||
limitReader := &LimitSpeedReader{ |
||||
reader: realBody, |
||||
ossLimiter: conn.config.DownloadLimiter, |
||||
} |
||||
realBody = limitReader |
||||
} |
||||
|
||||
// 2xx, successful
|
||||
return &Response{ |
||||
StatusCode: resp.StatusCode, |
||||
Headers: resp.Header, |
||||
Body: realBody, |
||||
ClientCRC: cliCRC, |
||||
ServerCRC: srvCRC, |
||||
}, nil |
||||
} |
||||
} |
||||
|
||||
// isUploadLimitReq: judge limit upload speed or not
|
||||
func (conn Conn) isDownloadLimitResponse(resp *http.Response) bool { |
||||
if resp == nil || conn.config.DownloadLimitSpeed == 0 || conn.config.DownloadLimiter == nil { |
||||
return false |
||||
} |
||||
|
||||
if strings.EqualFold(resp.Request.Method, "GET") { |
||||
return true |
||||
} |
||||
return false |
||||
} |
||||
|
||||
// LoggerHTTPReq Print the header information of the http request
|
||||
func (conn Conn) LoggerHTTPReq(req *http.Request) { |
||||
var logBuffer bytes.Buffer |
||||
logBuffer.WriteString(fmt.Sprintf("[Req:%p]Method:%s\t", req, req.Method)) |
||||
logBuffer.WriteString(fmt.Sprintf("Host:%s\t", req.URL.Host)) |
||||
logBuffer.WriteString(fmt.Sprintf("Path:%s\t", req.URL.Path)) |
||||
logBuffer.WriteString(fmt.Sprintf("Query:%s\t", req.URL.RawQuery)) |
||||
logBuffer.WriteString(fmt.Sprintf("Header info:")) |
||||
|
||||
for k, v := range req.Header { |
||||
var valueBuffer bytes.Buffer |
||||
for j := 0; j < len(v); j++ { |
||||
if j > 0 { |
||||
valueBuffer.WriteString(" ") |
||||
} |
||||
valueBuffer.WriteString(v[j]) |
||||
} |
||||
logBuffer.WriteString(fmt.Sprintf("\t%s:%s", k, valueBuffer.String())) |
||||
} |
||||
conn.config.WriteLog(Debug, "%s\n", logBuffer.String()) |
||||
} |
||||
|
||||
// LoggerHTTPResp Print Response to http request
|
||||
func (conn Conn) LoggerHTTPResp(req *http.Request, resp *http.Response) { |
||||
var logBuffer bytes.Buffer |
||||
logBuffer.WriteString(fmt.Sprintf("[Resp:%p]StatusCode:%d\t", req, resp.StatusCode)) |
||||
logBuffer.WriteString(fmt.Sprintf("Header info:")) |
||||
for k, v := range resp.Header { |
||||
var valueBuffer bytes.Buffer |
||||
for j := 0; j < len(v); j++ { |
||||
if j > 0 { |
||||
valueBuffer.WriteString(" ") |
||||
} |
||||
valueBuffer.WriteString(v[j]) |
||||
} |
||||
logBuffer.WriteString(fmt.Sprintf("\t%s:%s", k, valueBuffer.String())) |
||||
} |
||||
conn.config.WriteLog(Debug, "%s\n", logBuffer.String()) |
||||
} |
||||
|
||||
func calcMD5(body io.Reader, contentLen, md5Threshold int64) (reader io.Reader, b64 string, tempFile *os.File, err error) { |
||||
if contentLen == 0 || contentLen > md5Threshold { |
||||
// Huge body, use temporary file
|
||||
tempFile, err = ioutil.TempFile(os.TempDir(), TempFilePrefix) |
||||
if tempFile != nil { |
||||
io.Copy(tempFile, body) |
||||
tempFile.Seek(0, os.SEEK_SET) |
||||
md5 := md5.New() |
||||
io.Copy(md5, tempFile) |
||||
sum := md5.Sum(nil) |
||||
b64 = base64.StdEncoding.EncodeToString(sum[:]) |
||||
tempFile.Seek(0, os.SEEK_SET) |
||||
reader = tempFile |
||||
} |
||||
} else { |
||||
// Small body, use memory
|
||||
buf, _ := ioutil.ReadAll(body) |
||||
sum := md5.Sum(buf) |
||||
b64 = base64.StdEncoding.EncodeToString(sum[:]) |
||||
reader = bytes.NewReader(buf) |
||||
} |
||||
return |
||||
} |
||||
|
||||
func readResponseBody(resp *http.Response) ([]byte, error) { |
||||
defer resp.Body.Close() |
||||
out, err := ioutil.ReadAll(resp.Body) |
||||
if err == io.EOF { |
||||
err = nil |
||||
} |
||||
return out, err |
||||
} |
||||
|
||||
func serviceErrFromXML(body []byte, statusCode int, requestID string) (ServiceError, error) { |
||||
var storageErr ServiceError |
||||
|
||||
if err := xml.Unmarshal(body, &storageErr); err != nil { |
||||
return storageErr, err |
||||
} |
||||
|
||||
storageErr.StatusCode = statusCode |
||||
storageErr.RequestID = requestID |
||||
storageErr.RawMessage = string(body) |
||||
return storageErr, nil |
||||
} |
||||
|
||||
func xmlUnmarshal(body io.Reader, v interface{}) error { |
||||
data, err := ioutil.ReadAll(body) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
return xml.Unmarshal(data, v) |
||||
} |
||||
|
||||
func jsonUnmarshal(body io.Reader, v interface{}) error { |
||||
data, err := ioutil.ReadAll(body) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
return json.Unmarshal(data, v) |
||||
} |
||||
|
||||
// timeoutConn handles HTTP timeout
|
||||
type timeoutConn struct { |
||||
conn net.Conn |
||||
timeout time.Duration |
||||
longTimeout time.Duration |
||||
} |
||||
|
||||
func newTimeoutConn(conn net.Conn, timeout time.Duration, longTimeout time.Duration) *timeoutConn { |
||||
conn.SetReadDeadline(time.Now().Add(longTimeout)) |
||||
return &timeoutConn{ |
||||
conn: conn, |
||||
timeout: timeout, |
||||
longTimeout: longTimeout, |
||||
} |
||||
} |
||||
|
||||
func (c *timeoutConn) Read(b []byte) (n int, err error) { |
||||
c.SetReadDeadline(time.Now().Add(c.timeout)) |
||||
n, err = c.conn.Read(b) |
||||
c.SetReadDeadline(time.Now().Add(c.longTimeout)) |
||||
return n, err |
||||
} |
||||
|
||||
func (c *timeoutConn) Write(b []byte) (n int, err error) { |
||||
c.SetWriteDeadline(time.Now().Add(c.timeout)) |
||||
n, err = c.conn.Write(b) |
||||
c.SetReadDeadline(time.Now().Add(c.longTimeout)) |
||||
return n, err |
||||
} |
||||
|
||||
func (c *timeoutConn) Close() error { |
||||
return c.conn.Close() |
||||
} |
||||
|
||||
func (c *timeoutConn) LocalAddr() net.Addr { |
||||
return c.conn.LocalAddr() |
||||
} |
||||
|
||||
func (c *timeoutConn) RemoteAddr() net.Addr { |
||||
return c.conn.RemoteAddr() |
||||
} |
||||
|
||||
func (c *timeoutConn) SetDeadline(t time.Time) error { |
||||
return c.conn.SetDeadline(t) |
||||
} |
||||
|
||||
func (c *timeoutConn) SetReadDeadline(t time.Time) error { |
||||
return c.conn.SetReadDeadline(t) |
||||
} |
||||
|
||||
func (c *timeoutConn) SetWriteDeadline(t time.Time) error { |
||||
return c.conn.SetWriteDeadline(t) |
||||
} |
||||
|
||||
// UrlMaker builds URL and resource
|
||||
const ( |
||||
urlTypeCname = 1 |
||||
urlTypeIP = 2 |
||||
urlTypeAliyun = 3 |
||||
) |
||||
|
||||
type urlMaker struct { |
||||
Scheme string // HTTP or HTTPS
|
||||
NetLoc string // Host or IP
|
||||
Type int // 1 CNAME, 2 IP, 3 ALIYUN
|
||||
IsProxy bool // Proxy
|
||||
} |
||||
|
||||
// Init parses endpoint
|
||||
func (um *urlMaker) Init(endpoint string, isCname bool, isProxy bool) error { |
||||
if strings.HasPrefix(endpoint, "http://") { |
||||
um.Scheme = "http" |
||||
um.NetLoc = endpoint[len("http://"):] |
||||
} else if strings.HasPrefix(endpoint, "https://") { |
||||
um.Scheme = "https" |
||||
um.NetLoc = endpoint[len("https://"):] |
||||
} else { |
||||
um.Scheme = "http" |
||||
um.NetLoc = endpoint |
||||
} |
||||
|
||||
//use url.Parse() to get real host
|
||||
strUrl := um.Scheme + "://" + um.NetLoc |
||||
url, err := url.Parse(strUrl) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
um.NetLoc = url.Host |
||||
host, _, err := net.SplitHostPort(um.NetLoc) |
||||
if err != nil { |
||||
host = um.NetLoc |
||||
if host[0] == '[' && host[len(host)-1] == ']' { |
||||
host = host[1 : len(host)-1] |
||||
} |
||||
} |
||||
|
||||
ip := net.ParseIP(host) |
||||
if ip != nil { |
||||
um.Type = urlTypeIP |
||||
} else if isCname { |
||||
um.Type = urlTypeCname |
||||
} else { |
||||
um.Type = urlTypeAliyun |
||||
} |
||||
um.IsProxy = isProxy |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// getURL gets URL
|
||||
func (um urlMaker) getURL(bucket, object, params string) *url.URL { |
||||
host, path := um.buildURL(bucket, object) |
||||
addr := "" |
||||
if params == "" { |
||||
addr = fmt.Sprintf("%s://%s%s", um.Scheme, host, path) |
||||
} else { |
||||
addr = fmt.Sprintf("%s://%s%s?%s", um.Scheme, host, path, params) |
||||
} |
||||
uri, _ := url.ParseRequestURI(addr) |
||||
return uri |
||||
} |
||||
|
||||
// getSignURL gets sign URL
|
||||
func (um urlMaker) getSignURL(bucket, object, params string) string { |
||||
host, path := um.buildURL(bucket, object) |
||||
return fmt.Sprintf("%s://%s%s?%s", um.Scheme, host, path, params) |
||||
} |
||||
|
||||
// getSignRtmpURL Build Sign Rtmp URL
|
||||
func (um urlMaker) getSignRtmpURL(bucket, channelName, params string) string { |
||||
host, path := um.buildURL(bucket, "live") |
||||
|
||||
channelName = url.QueryEscape(channelName) |
||||
channelName = strings.Replace(channelName, "+", "%20", -1) |
||||
|
||||
return fmt.Sprintf("rtmp://%s%s/%s?%s", host, path, channelName, params) |
||||
} |
||||
|
||||
// buildURL builds URL
|
||||
func (um urlMaker) buildURL(bucket, object string) (string, string) { |
||||
var host = "" |
||||
var path = "" |
||||
|
||||
object = url.QueryEscape(object) |
||||
object = strings.Replace(object, "+", "%20", -1) |
||||
|
||||
if um.Type == urlTypeCname { |
||||
host = um.NetLoc |
||||
path = "/" + object |
||||
} else if um.Type == urlTypeIP { |
||||
if bucket == "" { |
||||
host = um.NetLoc |
||||
path = "/" |
||||
} else { |
||||
host = um.NetLoc |
||||
path = fmt.Sprintf("/%s/%s", bucket, object) |
||||
} |
||||
} else { |
||||
if bucket == "" { |
||||
host = um.NetLoc |
||||
path = "/" |
||||
} else { |
||||
host = bucket + "." + um.NetLoc |
||||
path = "/" + object |
||||
} |
||||
} |
||||
|
||||
return host, path |
||||
} |
||||
@ -0,0 +1,258 @@ |
||||
package oss |
||||
|
||||
import "os" |
||||
|
||||
// ACLType bucket/object ACL
|
||||
type ACLType string |
||||
|
||||
const ( |
||||
// ACLPrivate definition : private read and write
|
||||
ACLPrivate ACLType = "private" |
||||
|
||||
// ACLPublicRead definition : public read and private write
|
||||
ACLPublicRead ACLType = "public-read" |
||||
|
||||
// ACLPublicReadWrite definition : public read and public write
|
||||
ACLPublicReadWrite ACLType = "public-read-write" |
||||
|
||||
// ACLDefault Object. It's only applicable for object.
|
||||
ACLDefault ACLType = "default" |
||||
) |
||||
|
||||
// bucket versioning status
|
||||
type VersioningStatus string |
||||
|
||||
const ( |
||||
// Versioning Status definition: Enabled
|
||||
VersionEnabled VersioningStatus = "Enabled" |
||||
|
||||
// Versioning Status definition: Suspended
|
||||
VersionSuspended VersioningStatus = "Suspended" |
||||
) |
||||
|
||||
// MetadataDirectiveType specifying whether use the metadata of source object when copying object.
|
||||
type MetadataDirectiveType string |
||||
|
||||
const ( |
||||
// MetaCopy the target object's metadata is copied from the source one
|
||||
MetaCopy MetadataDirectiveType = "COPY" |
||||
|
||||
// MetaReplace the target object's metadata is created as part of the copy request (not same as the source one)
|
||||
MetaReplace MetadataDirectiveType = "REPLACE" |
||||
) |
||||
|
||||
// TaggingDirectiveType specifying whether use the tagging of source object when copying object.
|
||||
type TaggingDirectiveType string |
||||
|
||||
const ( |
||||
// TaggingCopy the target object's tagging is copied from the source one
|
||||
TaggingCopy TaggingDirectiveType = "COPY" |
||||
|
||||
// TaggingReplace the target object's tagging is created as part of the copy request (not same as the source one)
|
||||
TaggingReplace TaggingDirectiveType = "REPLACE" |
||||
) |
||||
|
||||
// AlgorithmType specifying the server side encryption algorithm name
|
||||
type AlgorithmType string |
||||
|
||||
const ( |
||||
KMSAlgorithm AlgorithmType = "KMS" |
||||
AESAlgorithm AlgorithmType = "AES256" |
||||
SM4Algorithm AlgorithmType = "SM4" |
||||
) |
||||
|
||||
// StorageClassType bucket storage type
|
||||
type StorageClassType string |
||||
|
||||
const ( |
||||
// StorageStandard standard
|
||||
StorageStandard StorageClassType = "Standard" |
||||
|
||||
// StorageIA infrequent access
|
||||
StorageIA StorageClassType = "IA" |
||||
|
||||
// StorageArchive archive
|
||||
StorageArchive StorageClassType = "Archive" |
||||
|
||||
// StorageColdArchive cold archive
|
||||
StorageColdArchive StorageClassType = "ColdArchive" |
||||
) |
||||
|
||||
//RedundancyType bucket data Redundancy type
|
||||
type DataRedundancyType string |
||||
|
||||
const ( |
||||
// RedundancyLRS Local redundancy, default value
|
||||
RedundancyLRS DataRedundancyType = "LRS" |
||||
|
||||
// RedundancyZRS Same city redundancy
|
||||
RedundancyZRS DataRedundancyType = "ZRS" |
||||
) |
||||
|
||||
//ObjecthashFuncType
|
||||
type ObjecthashFuncType string |
||||
|
||||
const ( |
||||
HashFuncSha1 ObjecthashFuncType = "SHA-1" |
||||
HashFuncSha256 ObjecthashFuncType = "SHA-256" |
||||
) |
||||
|
||||
// PayerType the type of request payer
|
||||
type PayerType string |
||||
|
||||
const ( |
||||
// Requester the requester who send the request
|
||||
Requester PayerType = "Requester" |
||||
|
||||
// BucketOwner the requester who send the request
|
||||
BucketOwner PayerType = "BucketOwner" |
||||
) |
||||
|
||||
//RestoreMode the restore mode for coldArchive object
|
||||
type RestoreMode string |
||||
|
||||
const ( |
||||
//RestoreExpedited object will be restored in 1 hour
|
||||
RestoreExpedited RestoreMode = "Expedited" |
||||
|
||||
//RestoreStandard object will be restored in 2-5 hours
|
||||
RestoreStandard RestoreMode = "Standard" |
||||
|
||||
//RestoreBulk object will be restored in 5-10 hours
|
||||
RestoreBulk RestoreMode = "Bulk" |
||||
) |
||||
|
||||
// HTTPMethod HTTP request method
|
||||
type HTTPMethod string |
||||
|
||||
const ( |
||||
// HTTPGet HTTP GET
|
||||
HTTPGet HTTPMethod = "GET" |
||||
|
||||
// HTTPPut HTTP PUT
|
||||
HTTPPut HTTPMethod = "PUT" |
||||
|
||||
// HTTPHead HTTP HEAD
|
||||
HTTPHead HTTPMethod = "HEAD" |
||||
|
||||
// HTTPPost HTTP POST
|
||||
HTTPPost HTTPMethod = "POST" |
||||
|
||||
// HTTPDelete HTTP DELETE
|
||||
HTTPDelete HTTPMethod = "DELETE" |
||||
) |
||||
|
||||
// HTTP headers
|
||||
const ( |
||||
HTTPHeaderAcceptEncoding string = "Accept-Encoding" |
||||
HTTPHeaderAuthorization = "Authorization" |
||||
HTTPHeaderCacheControl = "Cache-Control" |
||||
HTTPHeaderContentDisposition = "Content-Disposition" |
||||
HTTPHeaderContentEncoding = "Content-Encoding" |
||||
HTTPHeaderContentLength = "Content-Length" |
||||
HTTPHeaderContentMD5 = "Content-MD5" |
||||
HTTPHeaderContentType = "Content-Type" |
||||
HTTPHeaderContentLanguage = "Content-Language" |
||||
HTTPHeaderDate = "Date" |
||||
HTTPHeaderEtag = "ETag" |
||||
HTTPHeaderExpires = "Expires" |
||||
HTTPHeaderHost = "Host" |
||||
HTTPHeaderLastModified = "Last-Modified" |
||||
HTTPHeaderRange = "Range" |
||||
HTTPHeaderLocation = "Location" |
||||
HTTPHeaderOrigin = "Origin" |
||||
HTTPHeaderServer = "Server" |
||||
HTTPHeaderUserAgent = "User-Agent" |
||||
HTTPHeaderIfModifiedSince = "If-Modified-Since" |
||||
HTTPHeaderIfUnmodifiedSince = "If-Unmodified-Since" |
||||
HTTPHeaderIfMatch = "If-Match" |
||||
HTTPHeaderIfNoneMatch = "If-None-Match" |
||||
HTTPHeaderACReqMethod = "Access-Control-Request-Method" |
||||
HTTPHeaderACReqHeaders = "Access-Control-Request-Headers" |
||||
|
||||
HTTPHeaderOssACL = "X-Oss-Acl" |
||||
HTTPHeaderOssMetaPrefix = "X-Oss-Meta-" |
||||
HTTPHeaderOssObjectACL = "X-Oss-Object-Acl" |
||||
HTTPHeaderOssSecurityToken = "X-Oss-Security-Token" |
||||
HTTPHeaderOssServerSideEncryption = "X-Oss-Server-Side-Encryption" |
||||
HTTPHeaderOssServerSideEncryptionKeyID = "X-Oss-Server-Side-Encryption-Key-Id" |
||||
HTTPHeaderOssServerSideDataEncryption = "X-Oss-Server-Side-Data-Encryption" |
||||
HTTPHeaderSSECAlgorithm = "X-Oss-Server-Side-Encryption-Customer-Algorithm" |
||||
HTTPHeaderSSECKey = "X-Oss-Server-Side-Encryption-Customer-Key" |
||||
HTTPHeaderSSECKeyMd5 = "X-Oss-Server-Side-Encryption-Customer-Key-MD5" |
||||
HTTPHeaderOssCopySource = "X-Oss-Copy-Source" |
||||
HTTPHeaderOssCopySourceRange = "X-Oss-Copy-Source-Range" |
||||
HTTPHeaderOssCopySourceIfMatch = "X-Oss-Copy-Source-If-Match" |
||||
HTTPHeaderOssCopySourceIfNoneMatch = "X-Oss-Copy-Source-If-None-Match" |
||||
HTTPHeaderOssCopySourceIfModifiedSince = "X-Oss-Copy-Source-If-Modified-Since" |
||||
HTTPHeaderOssCopySourceIfUnmodifiedSince = "X-Oss-Copy-Source-If-Unmodified-Since" |
||||
HTTPHeaderOssMetadataDirective = "X-Oss-Metadata-Directive" |
||||
HTTPHeaderOssNextAppendPosition = "X-Oss-Next-Append-Position" |
||||
HTTPHeaderOssRequestID = "X-Oss-Request-Id" |
||||
HTTPHeaderOssCRC64 = "X-Oss-Hash-Crc64ecma" |
||||
HTTPHeaderOssSymlinkTarget = "X-Oss-Symlink-Target" |
||||
HTTPHeaderOssStorageClass = "X-Oss-Storage-Class" |
||||
HTTPHeaderOssCallback = "X-Oss-Callback" |
||||
HTTPHeaderOssCallbackVar = "X-Oss-Callback-Var" |
||||
HTTPHeaderOssRequester = "X-Oss-Request-Payer" |
||||
HTTPHeaderOssTagging = "X-Oss-Tagging" |
||||
HTTPHeaderOssTaggingDirective = "X-Oss-Tagging-Directive" |
||||
HTTPHeaderOssTrafficLimit = "X-Oss-Traffic-Limit" |
||||
HTTPHeaderOssForbidOverWrite = "X-Oss-Forbid-Overwrite" |
||||
HTTPHeaderOssRangeBehavior = "X-Oss-Range-Behavior" |
||||
HTTPHeaderOssTaskID = "X-Oss-Task-Id" |
||||
HTTPHeaderOssHashCtx = "X-Oss-Hash-Ctx" |
||||
HTTPHeaderOssMd5Ctx = "X-Oss-Md5-Ctx" |
||||
HTTPHeaderAllowSameActionOverLap = "X-Oss-Allow-Same-Action-Overlap" |
||||
) |
||||
|
||||
// HTTP Param
|
||||
const ( |
||||
HTTPParamExpires = "Expires" |
||||
HTTPParamAccessKeyID = "OSSAccessKeyId" |
||||
HTTPParamSignature = "Signature" |
||||
HTTPParamSecurityToken = "security-token" |
||||
HTTPParamPlaylistName = "playlistName" |
||||
|
||||
HTTPParamSignatureVersion = "x-oss-signature-version" |
||||
HTTPParamExpiresV2 = "x-oss-expires" |
||||
HTTPParamAccessKeyIDV2 = "x-oss-access-key-id" |
||||
HTTPParamSignatureV2 = "x-oss-signature" |
||||
HTTPParamAdditionalHeadersV2 = "x-oss-additional-headers" |
||||
) |
||||
|
||||
// Other constants
|
||||
const ( |
||||
MaxPartSize = 5 * 1024 * 1024 * 1024 // Max part size, 5GB
|
||||
MinPartSize = 100 * 1024 // Min part size, 100KB
|
||||
|
||||
FilePermMode = os.FileMode(0664) // Default file permission
|
||||
|
||||
TempFilePrefix = "oss-go-temp-" // Temp file prefix
|
||||
TempFileSuffix = ".temp" // Temp file suffix
|
||||
|
||||
CheckpointFileSuffix = ".cp" // Checkpoint file suffix
|
||||
|
||||
NullVersion = "null" |
||||
|
||||
Version = "v2.2.2" // Go SDK version
|
||||
) |
||||
|
||||
// FrameType
|
||||
const ( |
||||
DataFrameType = 8388609 |
||||
ContinuousFrameType = 8388612 |
||||
EndFrameType = 8388613 |
||||
MetaEndFrameCSVType = 8388614 |
||||
MetaEndFrameJSONType = 8388615 |
||||
) |
||||
|
||||
// AuthVersion the version of auth
|
||||
type AuthVersionType string |
||||
|
||||
const ( |
||||
// AuthV1 v1
|
||||
AuthV1 AuthVersionType = "v1" |
||||
// AuthV2 v2
|
||||
AuthV2 AuthVersionType = "v2" |
||||
) |
||||
@ -0,0 +1,123 @@ |
||||
package oss |
||||
|
||||
import ( |
||||
"hash" |
||||
"hash/crc64" |
||||
) |
||||
|
||||
// digest represents the partial evaluation of a checksum.
|
||||
type digest struct { |
||||
crc uint64 |
||||
tab *crc64.Table |
||||
} |
||||
|
||||
// NewCRC creates a new hash.Hash64 computing the CRC64 checksum
|
||||
// using the polynomial represented by the Table.
|
||||
func NewCRC(tab *crc64.Table, init uint64) hash.Hash64 { return &digest{init, tab} } |
||||
|
||||
// Size returns the number of bytes sum will return.
|
||||
func (d *digest) Size() int { return crc64.Size } |
||||
|
||||
// BlockSize returns the hash's underlying block size.
|
||||
// The Write method must be able to accept any amount
|
||||
// of data, but it may operate more efficiently if all writes
|
||||
// are a multiple of the block size.
|
||||
func (d *digest) BlockSize() int { return 1 } |
||||
|
||||
// Reset resets the hash to its initial state.
|
||||
func (d *digest) Reset() { d.crc = 0 } |
||||
|
||||
// Write (via the embedded io.Writer interface) adds more data to the running hash.
|
||||
// It never returns an error.
|
||||
func (d *digest) Write(p []byte) (n int, err error) { |
||||
d.crc = crc64.Update(d.crc, d.tab, p) |
||||
return len(p), nil |
||||
} |
||||
|
||||
// Sum64 returns CRC64 value.
|
||||
func (d *digest) Sum64() uint64 { return d.crc } |
||||
|
||||
// Sum returns hash value.
|
||||
func (d *digest) Sum(in []byte) []byte { |
||||
s := d.Sum64() |
||||
return append(in, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s)) |
||||
} |
||||
|
||||
// gf2Dim dimension of GF(2) vectors (length of CRC)
|
||||
const gf2Dim int = 64 |
||||
|
||||
func gf2MatrixTimes(mat []uint64, vec uint64) uint64 { |
||||
var sum uint64 |
||||
for i := 0; vec != 0; i++ { |
||||
if vec&1 != 0 { |
||||
sum ^= mat[i] |
||||
} |
||||
|
||||
vec >>= 1 |
||||
} |
||||
return sum |
||||
} |
||||
|
||||
func gf2MatrixSquare(square []uint64, mat []uint64) { |
||||
for n := 0; n < gf2Dim; n++ { |
||||
square[n] = gf2MatrixTimes(mat, mat[n]) |
||||
} |
||||
} |
||||
|
||||
// CRC64Combine combines CRC64
|
||||
func CRC64Combine(crc1 uint64, crc2 uint64, len2 uint64) uint64 { |
||||
var even [gf2Dim]uint64 // Even-power-of-two zeros operator
|
||||
var odd [gf2Dim]uint64 // Odd-power-of-two zeros operator
|
||||
|
||||
// Degenerate case
|
||||
if len2 == 0 { |
||||
return crc1 |
||||
} |
||||
|
||||
// Put operator for one zero bit in odd
|
||||
odd[0] = crc64.ECMA // CRC64 polynomial
|
||||
var row uint64 = 1 |
||||
for n := 1; n < gf2Dim; n++ { |
||||
odd[n] = row |
||||
row <<= 1 |
||||
} |
||||
|
||||
// Put operator for two zero bits in even
|
||||
gf2MatrixSquare(even[:], odd[:]) |
||||
|
||||
// Put operator for four zero bits in odd
|
||||
gf2MatrixSquare(odd[:], even[:]) |
||||
|
||||
// Apply len2 zeros to crc1, first square will put the operator for one zero byte, eight zero bits, in even
|
||||
for { |
||||
// Apply zeros operator for this bit of len2
|
||||
gf2MatrixSquare(even[:], odd[:]) |
||||
|
||||
if len2&1 != 0 { |
||||
crc1 = gf2MatrixTimes(even[:], crc1) |
||||
} |
||||
|
||||
len2 >>= 1 |
||||
|
||||
// If no more bits set, then done
|
||||
if len2 == 0 { |
||||
break |
||||
} |
||||
|
||||
// Another iteration of the loop with odd and even swapped
|
||||
gf2MatrixSquare(odd[:], even[:]) |
||||
if len2&1 != 0 { |
||||
crc1 = gf2MatrixTimes(odd[:], crc1) |
||||
} |
||||
len2 >>= 1 |
||||
|
||||
// If no more bits set, then done
|
||||
if len2 == 0 { |
||||
break |
||||
} |
||||
} |
||||
|
||||
// Return combined CRC
|
||||
crc1 ^= crc2 |
||||
return crc1 |
||||
} |
||||
@ -0,0 +1,567 @@ |
||||
package oss |
||||
|
||||
import ( |
||||
"crypto/md5" |
||||
"encoding/base64" |
||||
"encoding/json" |
||||
"errors" |
||||
"fmt" |
||||
"hash" |
||||
"hash/crc64" |
||||
"io" |
||||
"io/ioutil" |
||||
"net/http" |
||||
"os" |
||||
"path/filepath" |
||||
"strconv" |
||||
"time" |
||||
) |
||||
|
||||
// DownloadFile downloads files with multipart download.
|
||||
//
|
||||
// objectKey the object key.
|
||||
// filePath the local file to download from objectKey in OSS.
|
||||
// partSize the part size in bytes.
|
||||
// options object's constraints, check out GetObject for the reference.
|
||||
//
|
||||
// error it's nil when the call succeeds, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) DownloadFile(objectKey, filePath string, partSize int64, options ...Option) error { |
||||
if partSize < 1 { |
||||
return errors.New("oss: part size smaller than 1") |
||||
} |
||||
|
||||
uRange, err := GetRangeConfig(options) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
cpConf := getCpConfig(options) |
||||
routines := getRoutines(options) |
||||
|
||||
var strVersionId string |
||||
versionId, _ := FindOption(options, "versionId", nil) |
||||
if versionId != nil { |
||||
strVersionId = versionId.(string) |
||||
} |
||||
|
||||
if cpConf != nil && cpConf.IsEnable { |
||||
cpFilePath := getDownloadCpFilePath(cpConf, bucket.BucketName, objectKey, strVersionId, filePath) |
||||
if cpFilePath != "" { |
||||
return bucket.downloadFileWithCp(objectKey, filePath, partSize, options, cpFilePath, routines, uRange) |
||||
} |
||||
} |
||||
|
||||
return bucket.downloadFile(objectKey, filePath, partSize, options, routines, uRange) |
||||
} |
||||
|
||||
func getDownloadCpFilePath(cpConf *cpConfig, srcBucket, srcObject, versionId, destFile string) string { |
||||
if cpConf.FilePath == "" && cpConf.DirPath != "" { |
||||
src := fmt.Sprintf("oss://%v/%v", srcBucket, srcObject) |
||||
absPath, _ := filepath.Abs(destFile) |
||||
cpFileName := getCpFileName(src, absPath, versionId) |
||||
cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName |
||||
} |
||||
return cpConf.FilePath |
||||
} |
||||
|
||||
// downloadWorkerArg is download worker's parameters
|
||||
type downloadWorkerArg struct { |
||||
bucket *Bucket |
||||
key string |
||||
filePath string |
||||
options []Option |
||||
hook downloadPartHook |
||||
enableCRC bool |
||||
} |
||||
|
||||
// downloadPartHook is hook for test
|
||||
type downloadPartHook func(part downloadPart) error |
||||
|
||||
var downloadPartHooker downloadPartHook = defaultDownloadPartHook |
||||
|
||||
func defaultDownloadPartHook(part downloadPart) error { |
||||
return nil |
||||
} |
||||
|
||||
// defaultDownloadProgressListener defines default ProgressListener, shields the ProgressListener in options of GetObject.
|
||||
type defaultDownloadProgressListener struct { |
||||
} |
||||
|
||||
// ProgressChanged no-ops
|
||||
func (listener *defaultDownloadProgressListener) ProgressChanged(event *ProgressEvent) { |
||||
} |
||||
|
||||
// downloadWorker
|
||||
func downloadWorker(id int, arg downloadWorkerArg, jobs <-chan downloadPart, results chan<- downloadPart, failed chan<- error, die <-chan bool) { |
||||
for part := range jobs { |
||||
if err := arg.hook(part); err != nil { |
||||
failed <- err |
||||
break |
||||
} |
||||
|
||||
// Resolve options
|
||||
r := Range(part.Start, part.End) |
||||
p := Progress(&defaultDownloadProgressListener{}) |
||||
|
||||
var respHeader http.Header |
||||
opts := make([]Option, len(arg.options)+3) |
||||
// Append orderly, can not be reversed!
|
||||
opts = append(opts, arg.options...) |
||||
opts = append(opts, r, p, GetResponseHeader(&respHeader)) |
||||
|
||||
rd, err := arg.bucket.GetObject(arg.key, opts...) |
||||
if err != nil { |
||||
failed <- err |
||||
break |
||||
} |
||||
defer rd.Close() |
||||
|
||||
var crcCalc hash.Hash64 |
||||
if arg.enableCRC { |
||||
crcCalc = crc64.New(CrcTable()) |
||||
contentLen := part.End - part.Start + 1 |
||||
rd = ioutil.NopCloser(TeeReader(rd, crcCalc, contentLen, nil, nil)) |
||||
} |
||||
defer rd.Close() |
||||
|
||||
select { |
||||
case <-die: |
||||
return |
||||
default: |
||||
} |
||||
|
||||
fd, err := os.OpenFile(arg.filePath, os.O_WRONLY, FilePermMode) |
||||
if err != nil { |
||||
failed <- err |
||||
break |
||||
} |
||||
|
||||
_, err = fd.Seek(part.Start-part.Offset, os.SEEK_SET) |
||||
if err != nil { |
||||
fd.Close() |
||||
failed <- err |
||||
break |
||||
} |
||||
|
||||
startT := time.Now().UnixNano() / 1000 / 1000 / 1000 |
||||
_, err = io.Copy(fd, rd) |
||||
endT := time.Now().UnixNano() / 1000 / 1000 / 1000 |
||||
if err != nil { |
||||
arg.bucket.Client.Config.WriteLog(Debug, "download part error,cost:%d second,part number:%d,request id:%s,error:%s.\n", endT-startT, part.Index, GetRequestId(respHeader), err.Error()) |
||||
fd.Close() |
||||
failed <- err |
||||
break |
||||
} |
||||
|
||||
if arg.enableCRC { |
||||
part.CRC64 = crcCalc.Sum64() |
||||
} |
||||
|
||||
fd.Close() |
||||
results <- part |
||||
} |
||||
} |
||||
|
||||
// downloadScheduler
|
||||
func downloadScheduler(jobs chan downloadPart, parts []downloadPart) { |
||||
for _, part := range parts { |
||||
jobs <- part |
||||
} |
||||
close(jobs) |
||||
} |
||||
|
||||
// downloadPart defines download part
|
||||
type downloadPart struct { |
||||
Index int // Part number, starting from 0
|
||||
Start int64 // Start index
|
||||
End int64 // End index
|
||||
Offset int64 // Offset
|
||||
CRC64 uint64 // CRC check value of part
|
||||
} |
||||
|
||||
// getDownloadParts gets download parts
|
||||
func getDownloadParts(objectSize, partSize int64, uRange *UnpackedRange) []downloadPart { |
||||
parts := []downloadPart{} |
||||
part := downloadPart{} |
||||
i := 0 |
||||
start, end := AdjustRange(uRange, objectSize) |
||||
for offset := start; offset < end; offset += partSize { |
||||
part.Index = i |
||||
part.Start = offset |
||||
part.End = GetPartEnd(offset, end, partSize) |
||||
part.Offset = start |
||||
part.CRC64 = 0 |
||||
parts = append(parts, part) |
||||
i++ |
||||
} |
||||
return parts |
||||
} |
||||
|
||||
// getObjectBytes gets object bytes length
|
||||
func getObjectBytes(parts []downloadPart) int64 { |
||||
var ob int64 |
||||
for _, part := range parts { |
||||
ob += (part.End - part.Start + 1) |
||||
} |
||||
return ob |
||||
} |
||||
|
||||
// combineCRCInParts caculates the total CRC of continuous parts
|
||||
func combineCRCInParts(dps []downloadPart) uint64 { |
||||
if dps == nil || len(dps) == 0 { |
||||
return 0 |
||||
} |
||||
|
||||
crc := dps[0].CRC64 |
||||
for i := 1; i < len(dps); i++ { |
||||
crc = CRC64Combine(crc, dps[i].CRC64, (uint64)(dps[i].End-dps[i].Start+1)) |
||||
} |
||||
|
||||
return crc |
||||
} |
||||
|
||||
// downloadFile downloads file concurrently without checkpoint.
|
||||
func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, options []Option, routines int, uRange *UnpackedRange) error { |
||||
tempFilePath := filePath + TempFileSuffix |
||||
listener := GetProgressListener(options) |
||||
|
||||
// If the file does not exist, create one. If exists, the download will overwrite it.
|
||||
fd, err := os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
fd.Close() |
||||
|
||||
// Get the object detailed meta for object whole size
|
||||
// must delete header:range to get whole object size
|
||||
skipOptions := DeleteOption(options, HTTPHeaderRange) |
||||
meta, err := bucket.GetObjectDetailedMeta(objectKey, skipOptions...) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
enableCRC := false |
||||
expectedCRC := (uint64)(0) |
||||
if bucket.GetConfig().IsEnableCRC && meta.Get(HTTPHeaderOssCRC64) != "" { |
||||
if uRange == nil || (!uRange.HasStart && !uRange.HasEnd) { |
||||
enableCRC = true |
||||
expectedCRC, _ = strconv.ParseUint(meta.Get(HTTPHeaderOssCRC64), 10, 64) |
||||
} |
||||
} |
||||
|
||||
// Get the parts of the file
|
||||
parts := getDownloadParts(objectSize, partSize, uRange) |
||||
jobs := make(chan downloadPart, len(parts)) |
||||
results := make(chan downloadPart, len(parts)) |
||||
failed := make(chan error) |
||||
die := make(chan bool) |
||||
|
||||
var completedBytes int64 |
||||
totalBytes := getObjectBytes(parts) |
||||
event := newProgressEvent(TransferStartedEvent, 0, totalBytes, 0) |
||||
publishProgress(listener, event) |
||||
|
||||
// Start the download workers
|
||||
arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker, enableCRC} |
||||
for w := 1; w <= routines; w++ { |
||||
go downloadWorker(w, arg, jobs, results, failed, die) |
||||
} |
||||
|
||||
// Download parts concurrently
|
||||
go downloadScheduler(jobs, parts) |
||||
|
||||
// Waiting for parts download finished
|
||||
completed := 0 |
||||
for completed < len(parts) { |
||||
select { |
||||
case part := <-results: |
||||
completed++ |
||||
downBytes := (part.End - part.Start + 1) |
||||
completedBytes += downBytes |
||||
parts[part.Index].CRC64 = part.CRC64 |
||||
event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes, downBytes) |
||||
publishProgress(listener, event) |
||||
case err := <-failed: |
||||
close(die) |
||||
event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes, 0) |
||||
publishProgress(listener, event) |
||||
return err |
||||
} |
||||
|
||||
if completed >= len(parts) { |
||||
break |
||||
} |
||||
} |
||||
|
||||
event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes, 0) |
||||
publishProgress(listener, event) |
||||
|
||||
if enableCRC { |
||||
actualCRC := combineCRCInParts(parts) |
||||
err = CheckDownloadCRC(actualCRC, expectedCRC) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
} |
||||
|
||||
return os.Rename(tempFilePath, filePath) |
||||
} |
||||
|
||||
// ----- Concurrent download with chcekpoint -----
|
||||
|
||||
const downloadCpMagic = "92611BED-89E2-46B6-89E5-72F273D4B0A3" |
||||
|
||||
type downloadCheckpoint struct { |
||||
Magic string // Magic
|
||||
MD5 string // Checkpoint content MD5
|
||||
FilePath string // Local file
|
||||
Object string // Key
|
||||
ObjStat objectStat // Object status
|
||||
Parts []downloadPart // All download parts
|
||||
PartStat []bool // Parts' download status
|
||||
Start int64 // Start point of the file
|
||||
End int64 // End point of the file
|
||||
enableCRC bool // Whether has CRC check
|
||||
CRC uint64 // CRC check value
|
||||
} |
||||
|
||||
type objectStat struct { |
||||
Size int64 // Object size
|
||||
LastModified string // Last modified time
|
||||
Etag string // Etag
|
||||
} |
||||
|
||||
// isValid flags of checkpoint data is valid. It returns true when the data is valid and the checkpoint is valid and the object is not updated.
|
||||
func (cp downloadCheckpoint) isValid(meta http.Header, uRange *UnpackedRange) (bool, error) { |
||||
// Compare the CP's Magic and the MD5
|
||||
cpb := cp |
||||
cpb.MD5 = "" |
||||
js, _ := json.Marshal(cpb) |
||||
sum := md5.Sum(js) |
||||
b64 := base64.StdEncoding.EncodeToString(sum[:]) |
||||
|
||||
if cp.Magic != downloadCpMagic || b64 != cp.MD5 { |
||||
return false, nil |
||||
} |
||||
|
||||
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64) |
||||
if err != nil { |
||||
return false, err |
||||
} |
||||
|
||||
// Compare the object size, last modified time and etag
|
||||
if cp.ObjStat.Size != objectSize || |
||||
cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) || |
||||
cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) { |
||||
return false, nil |
||||
} |
||||
|
||||
// Check the download range
|
||||
if uRange != nil { |
||||
start, end := AdjustRange(uRange, objectSize) |
||||
if start != cp.Start || end != cp.End { |
||||
return false, nil |
||||
} |
||||
} |
||||
|
||||
return true, nil |
||||
} |
||||
|
||||
// load checkpoint from local file
|
||||
func (cp *downloadCheckpoint) load(filePath string) error { |
||||
contents, err := ioutil.ReadFile(filePath) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
err = json.Unmarshal(contents, cp) |
||||
return err |
||||
} |
||||
|
||||
// dump funciton dumps to file
|
||||
func (cp *downloadCheckpoint) dump(filePath string) error { |
||||
bcp := *cp |
||||
|
||||
// Calculate MD5
|
||||
bcp.MD5 = "" |
||||
js, err := json.Marshal(bcp) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
sum := md5.Sum(js) |
||||
b64 := base64.StdEncoding.EncodeToString(sum[:]) |
||||
bcp.MD5 = b64 |
||||
|
||||
// Serialize
|
||||
js, err = json.Marshal(bcp) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
// Dump
|
||||
return ioutil.WriteFile(filePath, js, FilePermMode) |
||||
} |
||||
|
||||
// todoParts gets unfinished parts
|
||||
func (cp downloadCheckpoint) todoParts() []downloadPart { |
||||
dps := []downloadPart{} |
||||
for i, ps := range cp.PartStat { |
||||
if !ps { |
||||
dps = append(dps, cp.Parts[i]) |
||||
} |
||||
} |
||||
return dps |
||||
} |
||||
|
||||
// getCompletedBytes gets completed size
|
||||
func (cp downloadCheckpoint) getCompletedBytes() int64 { |
||||
var completedBytes int64 |
||||
for i, part := range cp.Parts { |
||||
if cp.PartStat[i] { |
||||
completedBytes += (part.End - part.Start + 1) |
||||
} |
||||
} |
||||
return completedBytes |
||||
} |
||||
|
||||
// prepare initiates download tasks
|
||||
func (cp *downloadCheckpoint) prepare(meta http.Header, bucket *Bucket, objectKey, filePath string, partSize int64, uRange *UnpackedRange) error { |
||||
// CP
|
||||
cp.Magic = downloadCpMagic |
||||
cp.FilePath = filePath |
||||
cp.Object = objectKey |
||||
|
||||
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
cp.ObjStat.Size = objectSize |
||||
cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified) |
||||
cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag) |
||||
|
||||
if bucket.GetConfig().IsEnableCRC && meta.Get(HTTPHeaderOssCRC64) != "" { |
||||
if uRange == nil || (!uRange.HasStart && !uRange.HasEnd) { |
||||
cp.enableCRC = true |
||||
cp.CRC, _ = strconv.ParseUint(meta.Get(HTTPHeaderOssCRC64), 10, 64) |
||||
} |
||||
} |
||||
|
||||
// Parts
|
||||
cp.Parts = getDownloadParts(objectSize, partSize, uRange) |
||||
cp.PartStat = make([]bool, len(cp.Parts)) |
||||
for i := range cp.PartStat { |
||||
cp.PartStat[i] = false |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func (cp *downloadCheckpoint) complete(cpFilePath, downFilepath string) error { |
||||
err := os.Rename(downFilepath, cp.FilePath) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
return os.Remove(cpFilePath) |
||||
} |
||||
|
||||
// downloadFileWithCp downloads files with checkpoint.
|
||||
func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int, uRange *UnpackedRange) error { |
||||
tempFilePath := filePath + TempFileSuffix |
||||
listener := GetProgressListener(options) |
||||
|
||||
// Load checkpoint data.
|
||||
dcp := downloadCheckpoint{} |
||||
err := dcp.load(cpFilePath) |
||||
if err != nil { |
||||
os.Remove(cpFilePath) |
||||
} |
||||
|
||||
// Get the object detailed meta for object whole size
|
||||
// must delete header:range to get whole object size
|
||||
skipOptions := DeleteOption(options, HTTPHeaderRange) |
||||
meta, err := bucket.GetObjectDetailedMeta(objectKey, skipOptions...) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
// Load error or data invalid. Re-initialize the download.
|
||||
valid, err := dcp.isValid(meta, uRange) |
||||
if err != nil || !valid { |
||||
if err = dcp.prepare(meta, &bucket, objectKey, filePath, partSize, uRange); err != nil { |
||||
return err |
||||
} |
||||
os.Remove(cpFilePath) |
||||
} |
||||
|
||||
// Create the file if not exists. Otherwise the parts download will overwrite it.
|
||||
fd, err := os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
fd.Close() |
||||
|
||||
// Unfinished parts
|
||||
parts := dcp.todoParts() |
||||
jobs := make(chan downloadPart, len(parts)) |
||||
results := make(chan downloadPart, len(parts)) |
||||
failed := make(chan error) |
||||
die := make(chan bool) |
||||
|
||||
completedBytes := dcp.getCompletedBytes() |
||||
event := newProgressEvent(TransferStartedEvent, completedBytes, dcp.ObjStat.Size, 0) |
||||
publishProgress(listener, event) |
||||
|
||||
// Start the download workers routine
|
||||
arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker, dcp.enableCRC} |
||||
for w := 1; w <= routines; w++ { |
||||
go downloadWorker(w, arg, jobs, results, failed, die) |
||||
} |
||||
|
||||
// Concurrently downloads parts
|
||||
go downloadScheduler(jobs, parts) |
||||
|
||||
// Wait for the parts download finished
|
||||
completed := 0 |
||||
for completed < len(parts) { |
||||
select { |
||||
case part := <-results: |
||||
completed++ |
||||
dcp.PartStat[part.Index] = true |
||||
dcp.Parts[part.Index].CRC64 = part.CRC64 |
||||
dcp.dump(cpFilePath) |
||||
downBytes := (part.End - part.Start + 1) |
||||
completedBytes += downBytes |
||||
event = newProgressEvent(TransferDataEvent, completedBytes, dcp.ObjStat.Size, downBytes) |
||||
publishProgress(listener, event) |
||||
case err := <-failed: |
||||
close(die) |
||||
event = newProgressEvent(TransferFailedEvent, completedBytes, dcp.ObjStat.Size, 0) |
||||
publishProgress(listener, event) |
||||
return err |
||||
} |
||||
|
||||
if completed >= len(parts) { |
||||
break |
||||
} |
||||
} |
||||
|
||||
event = newProgressEvent(TransferCompletedEvent, completedBytes, dcp.ObjStat.Size, 0) |
||||
publishProgress(listener, event) |
||||
|
||||
if dcp.enableCRC { |
||||
actualCRC := combineCRCInParts(dcp.Parts) |
||||
err = CheckDownloadCRC(actualCRC, dcp.CRC) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
} |
||||
|
||||
return dcp.complete(cpFilePath, tempFilePath) |
||||
} |
||||
@ -0,0 +1,94 @@ |
||||
package oss |
||||
|
||||
import ( |
||||
"encoding/xml" |
||||
"fmt" |
||||
"net/http" |
||||
"strings" |
||||
) |
||||
|
||||
// ServiceError contains fields of the error response from Oss Service REST API.
|
||||
type ServiceError struct { |
||||
XMLName xml.Name `xml:"Error"` |
||||
Code string `xml:"Code"` // The error code returned from OSS to the caller
|
||||
Message string `xml:"Message"` // The detail error message from OSS
|
||||
RequestID string `xml:"RequestId"` // The UUID used to uniquely identify the request
|
||||
HostID string `xml:"HostId"` // The OSS server cluster's Id
|
||||
Endpoint string `xml:"Endpoint"` |
||||
RawMessage string // The raw messages from OSS
|
||||
StatusCode int // HTTP status code
|
||||
} |
||||
|
||||
// Error implements interface error
|
||||
func (e ServiceError) Error() string { |
||||
if e.Endpoint == "" { |
||||
return fmt.Sprintf("oss: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=\"%s\", RequestId=%s", |
||||
e.StatusCode, e.Code, e.Message, e.RequestID) |
||||
} |
||||
return fmt.Sprintf("oss: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=\"%s\", RequestId=%s, Endpoint=%s", |
||||
e.StatusCode, e.Code, e.Message, e.RequestID, e.Endpoint) |
||||
} |
||||
|
||||
// UnexpectedStatusCodeError is returned when a storage service responds with neither an error
|
||||
// nor with an HTTP status code indicating success.
|
||||
type UnexpectedStatusCodeError struct { |
||||
allowed []int // The expected HTTP stats code returned from OSS
|
||||
got int // The actual HTTP status code from OSS
|
||||
} |
||||
|
||||
// Error implements interface error
|
||||
func (e UnexpectedStatusCodeError) Error() string { |
||||
s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) } |
||||
|
||||
got := s(e.got) |
||||
expected := []string{} |
||||
for _, v := range e.allowed { |
||||
expected = append(expected, s(v)) |
||||
} |
||||
return fmt.Sprintf("oss: status code from service response is %s; was expecting %s", |
||||
got, strings.Join(expected, " or ")) |
||||
} |
||||
|
||||
// Got is the actual status code returned by oss.
|
||||
func (e UnexpectedStatusCodeError) Got() int { |
||||
return e.got |
||||
} |
||||
|
||||
// CheckRespCode returns UnexpectedStatusError if the given response code is not
|
||||
// one of the allowed status codes; otherwise nil.
|
||||
func CheckRespCode(respCode int, allowed []int) error { |
||||
for _, v := range allowed { |
||||
if respCode == v { |
||||
return nil |
||||
} |
||||
} |
||||
return UnexpectedStatusCodeError{allowed, respCode} |
||||
} |
||||
|
||||
// CRCCheckError is returned when crc check is inconsistent between client and server
|
||||
type CRCCheckError struct { |
||||
clientCRC uint64 // Calculated CRC64 in client
|
||||
serverCRC uint64 // Calculated CRC64 in server
|
||||
operation string // Upload operations such as PutObject/AppendObject/UploadPart, etc
|
||||
requestID string // The request id of this operation
|
||||
} |
||||
|
||||
// Error implements interface error
|
||||
func (e CRCCheckError) Error() string { |
||||
return fmt.Sprintf("oss: the crc of %s is inconsistent, client %d but server %d; request id is %s", |
||||
e.operation, e.clientCRC, e.serverCRC, e.requestID) |
||||
} |
||||
|
||||
func CheckDownloadCRC(clientCRC, serverCRC uint64) error { |
||||
if clientCRC == serverCRC { |
||||
return nil |
||||
} |
||||
return CRCCheckError{clientCRC, serverCRC, "DownloadFile", ""} |
||||
} |
||||
|
||||
func CheckCRC(resp *Response, operation string) error { |
||||
if resp.Headers.Get(HTTPHeaderOssCRC64) == "" || resp.ClientCRC == resp.ServerCRC { |
||||
return nil |
||||
} |
||||
return CRCCheckError{resp.ClientCRC, resp.ServerCRC, operation, resp.Headers.Get(HTTPHeaderOssRequestID)} |
||||
} |
||||
@ -0,0 +1,28 @@ |
||||
// +build !go1.7
|
||||
|
||||
// "golang.org/x/time/rate" is depended on golang context package go1.7 onward
|
||||
// this file is only for build,not supports limit upload speed
|
||||
package oss |
||||
|
||||
import ( |
||||
"fmt" |
||||
"io" |
||||
) |
||||
|
||||
const ( |
||||
perTokenBandwidthSize int = 1024 |
||||
) |
||||
|
||||
type OssLimiter struct { |
||||
} |
||||
|
||||
type LimitSpeedReader struct { |
||||
io.ReadCloser |
||||
reader io.Reader |
||||
ossLimiter *OssLimiter |
||||
} |
||||
|
||||
func GetOssLimiter(uploadSpeed int) (ossLimiter *OssLimiter, err error) { |
||||
err = fmt.Errorf("rate.Limiter is not supported below version go1.7") |
||||
return nil, err |
||||
} |
||||
@ -0,0 +1,90 @@ |
||||
// +build go1.7
|
||||
|
||||
package oss |
||||
|
||||
import ( |
||||
"fmt" |
||||
"io" |
||||
"math" |
||||
"time" |
||||
|
||||
"golang.org/x/time/rate" |
||||
) |
||||
|
||||
const ( |
||||
perTokenBandwidthSize int = 1024 |
||||
) |
||||
|
||||
// OssLimiter wrapper rate.Limiter
|
||||
type OssLimiter struct { |
||||
limiter *rate.Limiter |
||||
} |
||||
|
||||
// GetOssLimiter create OssLimiter
|
||||
// uploadSpeed KB/s
|
||||
func GetOssLimiter(uploadSpeed int) (ossLimiter *OssLimiter, err error) { |
||||
limiter := rate.NewLimiter(rate.Limit(uploadSpeed), uploadSpeed) |
||||
|
||||
// first consume the initial full token,the limiter will behave more accurately
|
||||
limiter.AllowN(time.Now(), uploadSpeed) |
||||
|
||||
return &OssLimiter{ |
||||
limiter: limiter, |
||||
}, nil |
||||
} |
||||
|
||||
// LimitSpeedReader for limit bandwidth upload
|
||||
type LimitSpeedReader struct { |
||||
io.ReadCloser |
||||
reader io.Reader |
||||
ossLimiter *OssLimiter |
||||
} |
||||
|
||||
// Read
|
||||
func (r *LimitSpeedReader) Read(p []byte) (n int, err error) { |
||||
n = 0 |
||||
err = nil |
||||
start := 0 |
||||
burst := r.ossLimiter.limiter.Burst() |
||||
var end int |
||||
var tmpN int |
||||
var tc int |
||||
for start < len(p) { |
||||
if start+burst*perTokenBandwidthSize < len(p) { |
||||
end = start + burst*perTokenBandwidthSize |
||||
} else { |
||||
end = len(p) |
||||
} |
||||
|
||||
tmpN, err = r.reader.Read(p[start:end]) |
||||
if tmpN > 0 { |
||||
n += tmpN |
||||
start = n |
||||
} |
||||
|
||||
if err != nil { |
||||
return |
||||
} |
||||
|
||||
tc = int(math.Ceil(float64(tmpN) / float64(perTokenBandwidthSize))) |
||||
now := time.Now() |
||||
re := r.ossLimiter.limiter.ReserveN(now, tc) |
||||
if !re.OK() { |
||||
err = fmt.Errorf("LimitSpeedReader.Read() failure,ReserveN error,start:%d,end:%d,burst:%d,perTokenBandwidthSize:%d", |
||||
start, end, burst, perTokenBandwidthSize) |
||||
return |
||||
} |
||||
timeDelay := re.Delay() |
||||
time.Sleep(timeDelay) |
||||
} |
||||
return |
||||
} |
||||
|
||||
// Close ...
|
||||
func (r *LimitSpeedReader) Close() error { |
||||
rc, ok := r.reader.(io.ReadCloser) |
||||
if ok { |
||||
return rc.Close() |
||||
} |
||||
return nil |
||||
} |
||||
@ -0,0 +1,257 @@ |
||||
package oss |
||||
|
||||
import ( |
||||
"bytes" |
||||
"encoding/xml" |
||||
"fmt" |
||||
"io" |
||||
"net/http" |
||||
"strconv" |
||||
"time" |
||||
) |
||||
|
||||
//
|
||||
// CreateLiveChannel create a live-channel
|
||||
//
|
||||
// channelName the name of the channel
|
||||
// config configuration of the channel
|
||||
//
|
||||
// CreateLiveChannelResult the result of create live-channel
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) CreateLiveChannel(channelName string, config LiveChannelConfiguration) (CreateLiveChannelResult, error) { |
||||
var out CreateLiveChannelResult |
||||
|
||||
bs, err := xml.Marshal(config) |
||||
if err != nil { |
||||
return out, err |
||||
} |
||||
|
||||
buffer := new(bytes.Buffer) |
||||
buffer.Write(bs) |
||||
|
||||
params := map[string]interface{}{} |
||||
params["live"] = nil |
||||
resp, err := bucket.do("PUT", channelName, params, nil, buffer, nil) |
||||
if err != nil { |
||||
return out, err |
||||
} |
||||
defer resp.Body.Close() |
||||
|
||||
err = xmlUnmarshal(resp.Body, &out) |
||||
return out, err |
||||
} |
||||
|
||||
//
|
||||
// PutLiveChannelStatus Set the status of the live-channel: enabled/disabled
|
||||
//
|
||||
// channelName the name of the channel
|
||||
// status enabled/disabled
|
||||
//
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) PutLiveChannelStatus(channelName, status string) error { |
||||
params := map[string]interface{}{} |
||||
params["live"] = nil |
||||
params["status"] = status |
||||
|
||||
resp, err := bucket.do("PUT", channelName, params, nil, nil, nil) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer resp.Body.Close() |
||||
|
||||
return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) |
||||
} |
||||
|
||||
// PostVodPlaylist create an playlist based on the specified playlist name, startTime and endTime
|
||||
//
|
||||
// channelName the name of the channel
|
||||
// playlistName the name of the playlist, must end with ".m3u8"
|
||||
// startTime the start time of the playlist
|
||||
// endTime the endtime of the playlist
|
||||
//
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) PostVodPlaylist(channelName, playlistName string, startTime, endTime time.Time) error { |
||||
params := map[string]interface{}{} |
||||
params["vod"] = nil |
||||
params["startTime"] = strconv.FormatInt(startTime.Unix(), 10) |
||||
params["endTime"] = strconv.FormatInt(endTime.Unix(), 10) |
||||
|
||||
key := fmt.Sprintf("%s/%s", channelName, playlistName) |
||||
resp, err := bucket.do("POST", key, params, nil, nil, nil) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer resp.Body.Close() |
||||
|
||||
return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) |
||||
} |
||||
|
||||
// GetVodPlaylist get the playlist based on the specified channelName, startTime and endTime
|
||||
//
|
||||
// channelName the name of the channel
|
||||
// startTime the start time of the playlist
|
||||
// endTime the endtime of the playlist
|
||||
//
|
||||
// io.ReadCloser reader instance for reading data from response. It must be called close() after the usage and only valid when error is nil.
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) GetVodPlaylist(channelName string, startTime, endTime time.Time) (io.ReadCloser, error) { |
||||
params := map[string]interface{}{} |
||||
params["vod"] = nil |
||||
params["startTime"] = strconv.FormatInt(startTime.Unix(), 10) |
||||
params["endTime"] = strconv.FormatInt(endTime.Unix(), 10) |
||||
|
||||
resp, err := bucket.do("GET", channelName, params, nil, nil, nil) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
return resp.Body, nil |
||||
} |
||||
|
||||
//
|
||||
// GetLiveChannelStat Get the state of the live-channel
|
||||
//
|
||||
// channelName the name of the channel
|
||||
//
|
||||
// LiveChannelStat the state of the live-channel
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) GetLiveChannelStat(channelName string) (LiveChannelStat, error) { |
||||
var out LiveChannelStat |
||||
params := map[string]interface{}{} |
||||
params["live"] = nil |
||||
params["comp"] = "stat" |
||||
|
||||
resp, err := bucket.do("GET", channelName, params, nil, nil, nil) |
||||
if err != nil { |
||||
return out, err |
||||
} |
||||
defer resp.Body.Close() |
||||
|
||||
err = xmlUnmarshal(resp.Body, &out) |
||||
return out, err |
||||
} |
||||
|
||||
//
|
||||
// GetLiveChannelInfo Get the configuration info of the live-channel
|
||||
//
|
||||
// channelName the name of the channel
|
||||
//
|
||||
// LiveChannelConfiguration the configuration info of the live-channel
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) GetLiveChannelInfo(channelName string) (LiveChannelConfiguration, error) { |
||||
var out LiveChannelConfiguration |
||||
params := map[string]interface{}{} |
||||
params["live"] = nil |
||||
|
||||
resp, err := bucket.do("GET", channelName, params, nil, nil, nil) |
||||
if err != nil { |
||||
return out, err |
||||
} |
||||
defer resp.Body.Close() |
||||
|
||||
err = xmlUnmarshal(resp.Body, &out) |
||||
return out, err |
||||
} |
||||
|
||||
//
|
||||
// GetLiveChannelHistory Get push records of live-channel
|
||||
//
|
||||
// channelName the name of the channel
|
||||
//
|
||||
// LiveChannelHistory push records
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) GetLiveChannelHistory(channelName string) (LiveChannelHistory, error) { |
||||
var out LiveChannelHistory |
||||
params := map[string]interface{}{} |
||||
params["live"] = nil |
||||
params["comp"] = "history" |
||||
|
||||
resp, err := bucket.do("GET", channelName, params, nil, nil, nil) |
||||
if err != nil { |
||||
return out, err |
||||
} |
||||
defer resp.Body.Close() |
||||
|
||||
err = xmlUnmarshal(resp.Body, &out) |
||||
return out, err |
||||
} |
||||
|
||||
//
|
||||
// ListLiveChannel list the live-channels
|
||||
//
|
||||
// options Prefix: filter by the name start with the value of "Prefix"
|
||||
// MaxKeys: the maximum count returned
|
||||
// Marker: cursor from which starting list
|
||||
//
|
||||
// ListLiveChannelResult live-channel list
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) ListLiveChannel(options ...Option) (ListLiveChannelResult, error) { |
||||
var out ListLiveChannelResult |
||||
|
||||
params, err := GetRawParams(options) |
||||
if err != nil { |
||||
return out, err |
||||
} |
||||
|
||||
params["live"] = nil |
||||
|
||||
resp, err := bucket.do("GET", "", params, nil, nil, nil) |
||||
if err != nil { |
||||
return out, err |
||||
} |
||||
defer resp.Body.Close() |
||||
|
||||
err = xmlUnmarshal(resp.Body, &out) |
||||
return out, err |
||||
} |
||||
|
||||
//
|
||||
// DeleteLiveChannel Delete the live-channel. When a client trying to stream the live-channel, the operation will fail. it will only delete the live-channel itself and the object generated by the live-channel will not be deleted.
|
||||
//
|
||||
// channelName the name of the channel
|
||||
//
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) DeleteLiveChannel(channelName string) error { |
||||
params := map[string]interface{}{} |
||||
params["live"] = nil |
||||
|
||||
if channelName == "" { |
||||
return fmt.Errorf("invalid argument: channel name is empty") |
||||
} |
||||
|
||||
resp, err := bucket.do("DELETE", channelName, params, nil, nil, nil) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer resp.Body.Close() |
||||
|
||||
return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) |
||||
} |
||||
|
||||
//
|
||||
// SignRtmpURL Generate a RTMP push-stream signature URL for the trusted user to push the RTMP stream to the live-channel.
|
||||
//
|
||||
// channelName the name of the channel
|
||||
// playlistName the name of the playlist, must end with ".m3u8"
|
||||
// expires expiration (in seconds)
|
||||
//
|
||||
// string singed rtmp push stream url
|
||||
// error nil if success, otherwise error
|
||||
//
|
||||
func (bucket Bucket) SignRtmpURL(channelName, playlistName string, expires int64) (string, error) { |
||||
if expires <= 0 { |
||||
return "", fmt.Errorf("invalid argument: %d, expires must greater than 0", expires) |
||||
} |
||||
expiration := time.Now().Unix() + expires |
||||
|
||||
return bucket.Client.Conn.signRtmpURL(bucket.BucketName, channelName, playlistName, expiration), nil |
||||
} |
||||
@ -0,0 +1,572 @@ |
||||
package oss |
||||
|
||||
import ( |
||||
"mime" |
||||
"path" |
||||
"strings" |
||||
) |
||||
|
||||
var extToMimeType = map[string]string{ |
||||
".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", |
||||
".xltx": "application/vnd.openxmlformats-officedocument.spreadsheetml.template", |
||||
".potx": "application/vnd.openxmlformats-officedocument.presentationml.template", |
||||
".ppsx": "application/vnd.openxmlformats-officedocument.presentationml.slideshow", |
||||
".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation", |
||||
".sldx": "application/vnd.openxmlformats-officedocument.presentationml.slide", |
||||
".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", |
||||
".dotx": "application/vnd.openxmlformats-officedocument.wordprocessingml.template", |
||||
".xlam": "application/vnd.ms-excel.addin.macroEnabled.12", |
||||
".xlsb": "application/vnd.ms-excel.sheet.binary.macroEnabled.12", |
||||
".apk": "application/vnd.android.package-archive", |
||||
".hqx": "application/mac-binhex40", |
||||
".cpt": "application/mac-compactpro", |
||||
".doc": "application/msword", |
||||
".ogg": "application/ogg", |
||||
".pdf": "application/pdf", |
||||
".rtf": "text/rtf", |
||||
".mif": "application/vnd.mif", |
||||
".xls": "application/vnd.ms-excel", |
||||
".ppt": "application/vnd.ms-powerpoint", |
||||
".odc": "application/vnd.oasis.opendocument.chart", |
||||
".odb": "application/vnd.oasis.opendocument.database", |
||||
".odf": "application/vnd.oasis.opendocument.formula", |
||||
".odg": "application/vnd.oasis.opendocument.graphics", |
||||
".otg": "application/vnd.oasis.opendocument.graphics-template", |
||||
".odi": "application/vnd.oasis.opendocument.image", |
||||
".odp": "application/vnd.oasis.opendocument.presentation", |
||||
".otp": "application/vnd.oasis.opendocument.presentation-template", |
||||
".ods": "application/vnd.oasis.opendocument.spreadsheet", |
||||
".ots": "application/vnd.oasis.opendocument.spreadsheet-template", |
||||
".odt": "application/vnd.oasis.opendocument.text", |
||||
".odm": "application/vnd.oasis.opendocument.text-master", |
||||
".ott": "application/vnd.oasis.opendocument.text-template", |
||||
".oth": "application/vnd.oasis.opendocument.text-web", |
||||
".sxw": "application/vnd.sun.xml.writer", |
||||
".stw": "application/vnd.sun.xml.writer.template", |
||||
".sxc": "application/vnd.sun.xml.calc", |
||||
".stc": "application/vnd.sun.xml.calc.template", |
||||
".sxd": "application/vnd.sun.xml.draw", |
||||
".std": "application/vnd.sun.xml.draw.template", |
||||
".sxi": "application/vnd.sun.xml.impress", |
||||
".sti": "application/vnd.sun.xml.impress.template", |
||||
".sxg": "application/vnd.sun.xml.writer.global", |
||||
".sxm": "application/vnd.sun.xml.math", |
||||
".sis": "application/vnd.symbian.install", |
||||
".wbxml": "application/vnd.wap.wbxml", |
||||
".wmlc": "application/vnd.wap.wmlc", |
||||
".wmlsc": "application/vnd.wap.wmlscriptc", |
||||
".bcpio": "application/x-bcpio", |
||||
".torrent": "application/x-bittorrent", |
||||
".bz2": "application/x-bzip2", |
||||
".vcd": "application/x-cdlink", |
||||
".pgn": "application/x-chess-pgn", |
||||
".cpio": "application/x-cpio", |
||||
".csh": "application/x-csh", |
||||
".dvi": "application/x-dvi", |
||||
".spl": "application/x-futuresplash", |
||||
".gtar": "application/x-gtar", |
||||
".hdf": "application/x-hdf", |
||||
".jar": "application/x-java-archive", |
||||
".jnlp": "application/x-java-jnlp-file", |
||||
".js": "application/x-javascript", |
||||
".ksp": "application/x-kspread", |
||||
".chrt": "application/x-kchart", |
||||
".kil": "application/x-killustrator", |
||||
".latex": "application/x-latex", |
||||
".rpm": "application/x-rpm", |
||||
".sh": "application/x-sh", |
||||
".shar": "application/x-shar", |
||||
".swf": "application/x-shockwave-flash", |
||||
".sit": "application/x-stuffit", |
||||
".sv4cpio": "application/x-sv4cpio", |
||||
".sv4crc": "application/x-sv4crc", |
||||
".tar": "application/x-tar", |
||||
".tcl": "application/x-tcl", |
||||
".tex": "application/x-tex", |
||||
".man": "application/x-troff-man", |
||||
".me": "application/x-troff-me", |
||||
".ms": "application/x-troff-ms", |
||||
".ustar": "application/x-ustar", |
||||
".src": "application/x-wais-source", |
||||
".zip": "application/zip", |
||||
".m3u": "audio/x-mpegurl", |
||||
".ra": "audio/x-pn-realaudio", |
||||
".wav": "audio/x-wav", |
||||
".wma": "audio/x-ms-wma", |
||||
".wax": "audio/x-ms-wax", |
||||
".pdb": "chemical/x-pdb", |
||||
".xyz": "chemical/x-xyz", |
||||
".bmp": "image/bmp", |
||||
".gif": "image/gif", |
||||
".ief": "image/ief", |
||||
".png": "image/png", |
||||
".wbmp": "image/vnd.wap.wbmp", |
||||
".ras": "image/x-cmu-raster", |
||||
".pnm": "image/x-portable-anymap", |
||||
".pbm": "image/x-portable-bitmap", |
||||
".pgm": "image/x-portable-graymap", |
||||
".ppm": "image/x-portable-pixmap", |
||||
".rgb": "image/x-rgb", |
||||
".xbm": "image/x-xbitmap", |
||||
".xpm": "image/x-xpixmap", |
||||
".xwd": "image/x-xwindowdump", |
||||
".css": "text/css", |
||||
".rtx": "text/richtext", |
||||
".tsv": "text/tab-separated-values", |
||||
".jad": "text/vnd.sun.j2me.app-descriptor", |
||||
".wml": "text/vnd.wap.wml", |
||||
".wmls": "text/vnd.wap.wmlscript", |
||||
".etx": "text/x-setext", |
||||
".mxu": "video/vnd.mpegurl", |
||||
".flv": "video/x-flv", |
||||
".wm": "video/x-ms-wm", |
||||
".wmv": "video/x-ms-wmv", |
||||
".wmx": "video/x-ms-wmx", |
||||
".wvx": "video/x-ms-wvx", |
||||
".avi": "video/x-msvideo", |
||||
".movie": "video/x-sgi-movie", |
||||
".ice": "x-conference/x-cooltalk", |
||||
".3gp": "video/3gpp", |
||||
".ai": "application/postscript", |
||||
".aif": "audio/x-aiff", |
||||
".aifc": "audio/x-aiff", |
||||
".aiff": "audio/x-aiff", |
||||
".asc": "text/plain", |
||||
".atom": "application/atom+xml", |
||||
".au": "audio/basic", |
||||
".bin": "application/octet-stream", |
||||
".cdf": "application/x-netcdf", |
||||
".cgm": "image/cgm", |
||||
".class": "application/octet-stream", |
||||
".dcr": "application/x-director", |
||||
".dif": "video/x-dv", |
||||
".dir": "application/x-director", |
||||
".djv": "image/vnd.djvu", |
||||
".djvu": "image/vnd.djvu", |
||||
".dll": "application/octet-stream", |
||||
".dmg": "application/octet-stream", |
||||
".dms": "application/octet-stream", |
||||
".dtd": "application/xml-dtd", |
||||
".dv": "video/x-dv", |
||||
".dxr": "application/x-director", |
||||
".eps": "application/postscript", |
||||
".exe": "application/octet-stream", |
||||
".ez": "application/andrew-inset", |
||||
".gram": "application/srgs", |
||||
".grxml": "application/srgs+xml", |
||||
".gz": "application/x-gzip", |
||||
".htm": "text/html", |
||||
".html": "text/html", |
||||
".ico": "image/x-icon", |
||||
".ics": "text/calendar", |
||||
".ifb": "text/calendar", |
||||
".iges": "model/iges", |
||||
".igs": "model/iges", |
||||
".jp2": "image/jp2", |
||||
".jpe": "image/jpeg", |
||||
".jpeg": "image/jpeg", |
||||
".jpg": "image/jpeg", |
||||
".kar": "audio/midi", |
||||
".lha": "application/octet-stream", |
||||
".lzh": "application/octet-stream", |
||||
".m4a": "audio/mp4a-latm", |
||||
".m4p": "audio/mp4a-latm", |
||||
".m4u": "video/vnd.mpegurl", |
||||
".m4v": "video/x-m4v", |
||||
".mac": "image/x-macpaint", |
||||
".mathml": "application/mathml+xml", |
||||
".mesh": "model/mesh", |
||||
".mid": "audio/midi", |
||||
".midi": "audio/midi", |
||||
".mov": "video/quicktime", |
||||
".mp2": "audio/mpeg", |
||||
".mp3": "audio/mpeg", |
||||
".mp4": "video/mp4", |
||||
".mpe": "video/mpeg", |
||||
".mpeg": "video/mpeg", |
||||
".mpg": "video/mpeg", |
||||
".mpga": "audio/mpeg", |
||||
".msh": "model/mesh", |
||||
".nc": "application/x-netcdf", |
||||
".oda": "application/oda", |
||||
".ogv": "video/ogv", |
||||
".pct": "image/pict", |
||||
".pic": "image/pict", |
||||
".pict": "image/pict", |
||||
".pnt": "image/x-macpaint", |
||||
".pntg": "image/x-macpaint", |
||||
".ps": "application/postscript", |
||||
".qt": "video/quicktime", |
||||
".qti": "image/x-quicktime", |
||||
".qtif": "image/x-quicktime", |
||||
".ram": "audio/x-pn-realaudio", |
||||
".rdf": "application/rdf+xml", |
||||
".rm": "application/vnd.rn-realmedia", |
||||
".roff": "application/x-troff", |
||||
".sgm": "text/sgml", |
||||
".sgml": "text/sgml", |
||||
".silo": "model/mesh", |
||||
".skd": "application/x-koan", |
||||
".skm": "application/x-koan", |
||||
".skp": "application/x-koan", |
||||
".skt": "application/x-koan", |
||||
".smi": "application/smil", |
||||
".smil": "application/smil", |
||||
".snd": "audio/basic", |
||||
".so": "application/octet-stream", |
||||
".svg": "image/svg+xml", |
||||
".t": "application/x-troff", |
||||
".texi": "application/x-texinfo", |
||||
".texinfo": "application/x-texinfo", |
||||
".tif": "image/tiff", |
||||
".tiff": "image/tiff", |
||||
".tr": "application/x-troff", |
||||
".txt": "text/plain", |
||||
".vrml": "model/vrml", |
||||
".vxml": "application/voicexml+xml", |
||||
".webm": "video/webm", |
||||
".wrl": "model/vrml", |
||||
".xht": "application/xhtml+xml", |
||||
".xhtml": "application/xhtml+xml", |
||||
".xml": "application/xml", |
||||
".xsl": "application/xml", |
||||
".xslt": "application/xslt+xml", |
||||
".xul": "application/vnd.mozilla.xul+xml", |
||||
".webp": "image/webp", |
||||
".323": "text/h323", |
||||
".aab": "application/x-authoware-bin", |
||||
".aam": "application/x-authoware-map", |
||||
".aas": "application/x-authoware-seg", |
||||
".acx": "application/internet-property-stream", |
||||
".als": "audio/X-Alpha5", |
||||
".amc": "application/x-mpeg", |
||||
".ani": "application/octet-stream", |
||||
".asd": "application/astound", |
||||
".asf": "video/x-ms-asf", |
||||
".asn": "application/astound", |
||||
".asp": "application/x-asap", |
||||
".asr": "video/x-ms-asf", |
||||
".asx": "video/x-ms-asf", |
||||
".avb": "application/octet-stream", |
||||
".awb": "audio/amr-wb", |
||||
".axs": "application/olescript", |
||||
".bas": "text/plain", |
||||
".bin ": "application/octet-stream", |
||||
".bld": "application/bld", |
||||
".bld2": "application/bld2", |
||||
".bpk": "application/octet-stream", |
||||
".c": "text/plain", |
||||
".cal": "image/x-cals", |
||||
".cat": "application/vnd.ms-pkiseccat", |
||||
".ccn": "application/x-cnc", |
||||
".cco": "application/x-cocoa", |
||||
".cer": "application/x-x509-ca-cert", |
||||
".cgi": "magnus-internal/cgi", |
||||
".chat": "application/x-chat", |
||||
".clp": "application/x-msclip", |
||||
".cmx": "image/x-cmx", |
||||
".co": "application/x-cult3d-object", |
||||
".cod": "image/cis-cod", |
||||
".conf": "text/plain", |
||||
".cpp": "text/plain", |
||||
".crd": "application/x-mscardfile", |
||||
".crl": "application/pkix-crl", |
||||
".crt": "application/x-x509-ca-cert", |
||||
".csm": "chemical/x-csml", |
||||
".csml": "chemical/x-csml", |
||||
".cur": "application/octet-stream", |
||||
".dcm": "x-lml/x-evm", |
||||
".dcx": "image/x-dcx", |
||||
".der": "application/x-x509-ca-cert", |
||||
".dhtml": "text/html", |
||||
".dot": "application/msword", |
||||
".dwf": "drawing/x-dwf", |
||||
".dwg": "application/x-autocad", |
||||
".dxf": "application/x-autocad", |
||||
".ebk": "application/x-expandedbook", |
||||
".emb": "chemical/x-embl-dl-nucleotide", |
||||
".embl": "chemical/x-embl-dl-nucleotide", |
||||
".epub": "application/epub+zip", |
||||
".eri": "image/x-eri", |
||||
".es": "audio/echospeech", |
||||
".esl": "audio/echospeech", |
||||
".etc": "application/x-earthtime", |
||||
".evm": "x-lml/x-evm", |
||||
".evy": "application/envoy", |
||||
".fh4": "image/x-freehand", |
||||
".fh5": "image/x-freehand", |
||||
".fhc": "image/x-freehand", |
||||
".fif": "application/fractals", |
||||
".flr": "x-world/x-vrml", |
||||
".fm": "application/x-maker", |
||||
".fpx": "image/x-fpx", |
||||
".fvi": "video/isivideo", |
||||
".gau": "chemical/x-gaussian-input", |
||||
".gca": "application/x-gca-compressed", |
||||
".gdb": "x-lml/x-gdb", |
||||
".gps": "application/x-gps", |
||||
".h": "text/plain", |
||||
".hdm": "text/x-hdml", |
||||
".hdml": "text/x-hdml", |
||||
".hlp": "application/winhlp", |
||||
".hta": "application/hta", |
||||
".htc": "text/x-component", |
||||
".hts": "text/html", |
||||
".htt": "text/webviewhtml", |
||||
".ifm": "image/gif", |
||||
".ifs": "image/ifs", |
||||
".iii": "application/x-iphone", |
||||
".imy": "audio/melody", |
||||
".ins": "application/x-internet-signup", |
||||
".ips": "application/x-ipscript", |
||||
".ipx": "application/x-ipix", |
||||
".isp": "application/x-internet-signup", |
||||
".it": "audio/x-mod", |
||||
".itz": "audio/x-mod", |
||||
".ivr": "i-world/i-vrml", |
||||
".j2k": "image/j2k", |
||||
".jam": "application/x-jam", |
||||
".java": "text/plain", |
||||
".jfif": "image/pipeg", |
||||
".jpz": "image/jpeg", |
||||
".jwc": "application/jwc", |
||||
".kjx": "application/x-kjx", |
||||
".lak": "x-lml/x-lak", |
||||
".lcc": "application/fastman", |
||||
".lcl": "application/x-digitalloca", |
||||
".lcr": "application/x-digitalloca", |
||||
".lgh": "application/lgh", |
||||
".lml": "x-lml/x-lml", |
||||
".lmlpack": "x-lml/x-lmlpack", |
||||
".log": "text/plain", |
||||
".lsf": "video/x-la-asf", |
||||
".lsx": "video/x-la-asf", |
||||
".m13": "application/x-msmediaview", |
||||
".m14": "application/x-msmediaview", |
||||
".m15": "audio/x-mod", |
||||
".m3url": "audio/x-mpegurl", |
||||
".m4b": "audio/mp4a-latm", |
||||
".ma1": "audio/ma1", |
||||
".ma2": "audio/ma2", |
||||
".ma3": "audio/ma3", |
||||
".ma5": "audio/ma5", |
||||
".map": "magnus-internal/imagemap", |
||||
".mbd": "application/mbedlet", |
||||
".mct": "application/x-mascot", |
||||
".mdb": "application/x-msaccess", |
||||
".mdz": "audio/x-mod", |
||||
".mel": "text/x-vmel", |
||||
".mht": "message/rfc822", |
||||
".mhtml": "message/rfc822", |
||||
".mi": "application/x-mif", |
||||
".mil": "image/x-cals", |
||||
".mio": "audio/x-mio", |
||||
".mmf": "application/x-skt-lbs", |
||||
".mng": "video/x-mng", |
||||
".mny": "application/x-msmoney", |
||||
".moc": "application/x-mocha", |
||||
".mocha": "application/x-mocha", |
||||
".mod": "audio/x-mod", |
||||
".mof": "application/x-yumekara", |
||||
".mol": "chemical/x-mdl-molfile", |
||||
".mop": "chemical/x-mopac-input", |
||||
".mpa": "video/mpeg", |
||||
".mpc": "application/vnd.mpohun.certificate", |
||||
".mpg4": "video/mp4", |
||||
".mpn": "application/vnd.mophun.application", |
||||
".mpp": "application/vnd.ms-project", |
||||
".mps": "application/x-mapserver", |
||||
".mpv2": "video/mpeg", |
||||
".mrl": "text/x-mrml", |
||||
".mrm": "application/x-mrm", |
||||
".msg": "application/vnd.ms-outlook", |
||||
".mts": "application/metastream", |
||||
".mtx": "application/metastream", |
||||
".mtz": "application/metastream", |
||||
".mvb": "application/x-msmediaview", |
||||
".mzv": "application/metastream", |
||||
".nar": "application/zip", |
||||
".nbmp": "image/nbmp", |
||||
".ndb": "x-lml/x-ndb", |
||||
".ndwn": "application/ndwn", |
||||
".nif": "application/x-nif", |
||||
".nmz": "application/x-scream", |
||||
".nokia-op-logo": "image/vnd.nok-oplogo-color", |
||||
".npx": "application/x-netfpx", |
||||
".nsnd": "audio/nsnd", |
||||
".nva": "application/x-neva1", |
||||
".nws": "message/rfc822", |
||||
".oom": "application/x-AtlasMate-Plugin", |
||||
".p10": "application/pkcs10", |
||||
".p12": "application/x-pkcs12", |
||||
".p7b": "application/x-pkcs7-certificates", |
||||
".p7c": "application/x-pkcs7-mime", |
||||
".p7m": "application/x-pkcs7-mime", |
||||
".p7r": "application/x-pkcs7-certreqresp", |
||||
".p7s": "application/x-pkcs7-signature", |
||||
".pac": "audio/x-pac", |
||||
".pae": "audio/x-epac", |
||||
".pan": "application/x-pan", |
||||
".pcx": "image/x-pcx", |
||||
".pda": "image/x-pda", |
||||
".pfr": "application/font-tdpfr", |
||||
".pfx": "application/x-pkcs12", |
||||
".pko": "application/ynd.ms-pkipko", |
||||
".pm": "application/x-perl", |
||||
".pma": "application/x-perfmon", |
||||
".pmc": "application/x-perfmon", |
||||
".pmd": "application/x-pmd", |
||||
".pml": "application/x-perfmon", |
||||
".pmr": "application/x-perfmon", |
||||
".pmw": "application/x-perfmon", |
||||
".pnz": "image/png", |
||||
".pot,": "application/vnd.ms-powerpoint", |
||||
".pps": "application/vnd.ms-powerpoint", |
||||
".pqf": "application/x-cprplayer", |
||||
".pqi": "application/cprplayer", |
||||
".prc": "application/x-prc", |
||||
".prf": "application/pics-rules", |
||||
".prop": "text/plain", |
||||
".proxy": "application/x-ns-proxy-autoconfig", |
||||
".ptlk": "application/listenup", |
||||
".pub": "application/x-mspublisher", |
||||
".pvx": "video/x-pv-pvx", |
||||
".qcp": "audio/vnd.qcelp", |
||||
".r3t": "text/vnd.rn-realtext3d", |
||||
".rar": "application/octet-stream", |
||||
".rc": "text/plain", |
||||
".rf": "image/vnd.rn-realflash", |
||||
".rlf": "application/x-richlink", |
||||
".rmf": "audio/x-rmf", |
||||
".rmi": "audio/mid", |
||||
".rmm": "audio/x-pn-realaudio", |
||||
".rmvb": "audio/x-pn-realaudio", |
||||
".rnx": "application/vnd.rn-realplayer", |
||||
".rp": "image/vnd.rn-realpix", |
||||
".rt": "text/vnd.rn-realtext", |
||||
".rte": "x-lml/x-gps", |
||||
".rtg": "application/metastream", |
||||
".rv": "video/vnd.rn-realvideo", |
||||
".rwc": "application/x-rogerwilco", |
||||
".s3m": "audio/x-mod", |
||||
".s3z": "audio/x-mod", |
||||
".sca": "application/x-supercard", |
||||
".scd": "application/x-msschedule", |
||||
".sct": "text/scriptlet", |
||||
".sdf": "application/e-score", |
||||
".sea": "application/x-stuffit", |
||||
".setpay": "application/set-payment-initiation", |
||||
".setreg": "application/set-registration-initiation", |
||||
".shtml": "text/html", |
||||
".shtm": "text/html", |
||||
".shw": "application/presentations", |
||||
".si6": "image/si6", |
||||
".si7": "image/vnd.stiwap.sis", |
||||
".si9": "image/vnd.lgtwap.sis", |
||||
".slc": "application/x-salsa", |
||||
".smd": "audio/x-smd", |
||||
".smp": "application/studiom", |
||||
".smz": "audio/x-smd", |
||||
".spc": "application/x-pkcs7-certificates", |
||||
".spr": "application/x-sprite", |
||||
".sprite": "application/x-sprite", |
||||
".sdp": "application/sdp", |
||||
".spt": "application/x-spt", |
||||
".sst": "application/vnd.ms-pkicertstore", |
||||
".stk": "application/hyperstudio", |
||||
".stl": "application/vnd.ms-pkistl", |
||||
".stm": "text/html", |
||||
".svf": "image/vnd", |
||||
".svh": "image/svh", |
||||
".svr": "x-world/x-svr", |
||||
".swfl": "application/x-shockwave-flash", |
||||
".tad": "application/octet-stream", |
||||
".talk": "text/x-speech", |
||||
".taz": "application/x-tar", |
||||
".tbp": "application/x-timbuktu", |
||||
".tbt": "application/x-timbuktu", |
||||
".tgz": "application/x-compressed", |
||||
".thm": "application/vnd.eri.thm", |
||||
".tki": "application/x-tkined", |
||||
".tkined": "application/x-tkined", |
||||
".toc": "application/toc", |
||||
".toy": "image/toy", |
||||
".trk": "x-lml/x-gps", |
||||
".trm": "application/x-msterminal", |
||||
".tsi": "audio/tsplayer", |
||||
".tsp": "application/dsptype", |
||||
".ttf": "application/octet-stream", |
||||
".ttz": "application/t-time", |
||||
".uls": "text/iuls", |
||||
".ult": "audio/x-mod", |
||||
".uu": "application/x-uuencode", |
||||
".uue": "application/x-uuencode", |
||||
".vcf": "text/x-vcard", |
||||
".vdo": "video/vdo", |
||||
".vib": "audio/vib", |
||||
".viv": "video/vivo", |
||||
".vivo": "video/vivo", |
||||
".vmd": "application/vocaltec-media-desc", |
||||
".vmf": "application/vocaltec-media-file", |
||||
".vmi": "application/x-dreamcast-vms-info", |
||||
".vms": "application/x-dreamcast-vms", |
||||
".vox": "audio/voxware", |
||||
".vqe": "audio/x-twinvq-plugin", |
||||
".vqf": "audio/x-twinvq", |
||||
".vql": "audio/x-twinvq", |
||||
".vre": "x-world/x-vream", |
||||
".vrt": "x-world/x-vrt", |
||||
".vrw": "x-world/x-vream", |
||||
".vts": "workbook/formulaone", |
||||
".wcm": "application/vnd.ms-works", |
||||
".wdb": "application/vnd.ms-works", |
||||
".web": "application/vnd.xara", |
||||
".wi": "image/wavelet", |
||||
".wis": "application/x-InstallShield", |
||||
".wks": "application/vnd.ms-works", |
||||
".wmd": "application/x-ms-wmd", |
||||
".wmf": "application/x-msmetafile", |
||||
".wmlscript": "text/vnd.wap.wmlscript", |
||||
".wmz": "application/x-ms-wmz", |
||||
".wpng": "image/x-up-wpng", |
||||
".wps": "application/vnd.ms-works", |
||||
".wpt": "x-lml/x-gps", |
||||
".wri": "application/x-mswrite", |
||||
".wrz": "x-world/x-vrml", |
||||
".ws": "text/vnd.wap.wmlscript", |
||||
".wsc": "application/vnd.wap.wmlscriptc", |
||||
".wv": "video/wavelet", |
||||
".wxl": "application/x-wxl", |
||||
".x-gzip": "application/x-gzip", |
||||
".xaf": "x-world/x-vrml", |
||||
".xar": "application/vnd.xara", |
||||
".xdm": "application/x-xdma", |
||||
".xdma": "application/x-xdma", |
||||
".xdw": "application/vnd.fujixerox.docuworks", |
||||
".xhtm": "application/xhtml+xml", |
||||
".xla": "application/vnd.ms-excel", |
||||
".xlc": "application/vnd.ms-excel", |
||||
".xll": "application/x-excel", |
||||
".xlm": "application/vnd.ms-excel", |
||||
".xlt": "application/vnd.ms-excel", |
||||
".xlw": "application/vnd.ms-excel", |
||||
".xm": "audio/x-mod", |
||||
".xmz": "audio/x-mod", |
||||
".xof": "x-world/x-vrml", |
||||
".xpi": "application/x-xpinstall", |
||||
".xsit": "text/xml", |
||||
".yz1": "application/x-yz1", |
||||
".z": "application/x-compress", |
||||
".zac": "application/x-zaurus-zac", |
||||
".json": "application/json", |
||||
} |
||||
|
||||
// TypeByExtension returns the MIME type associated with the file extension ext.
|
||||
// gets the file's MIME type for HTTP header Content-Type
|
||||
func TypeByExtension(filePath string) string { |
||||
typ := mime.TypeByExtension(path.Ext(filePath)) |
||||
if typ == "" { |
||||
typ = extToMimeType[strings.ToLower(path.Ext(filePath))] |
||||
} |
||||
return typ |
||||
} |
||||
@ -0,0 +1,69 @@ |
||||
package oss |
||||
|
||||
import ( |
||||
"hash" |
||||
"io" |
||||
"net/http" |
||||
) |
||||
|
||||
// Response defines HTTP response from OSS
|
||||
type Response struct { |
||||
StatusCode int |
||||
Headers http.Header |
||||
Body io.ReadCloser |
||||
ClientCRC uint64 |
||||
ServerCRC uint64 |
||||
} |
||||
|
||||
func (r *Response) Read(p []byte) (n int, err error) { |
||||
return r.Body.Read(p) |
||||
} |
||||
|
||||
// Close close http reponse body
|
||||
func (r *Response) Close() error { |
||||
return r.Body.Close() |
||||
} |
||||
|
||||
// PutObjectRequest is the request of DoPutObject
|
||||
type PutObjectRequest struct { |
||||
ObjectKey string |
||||
Reader io.Reader |
||||
} |
||||
|
||||
// GetObjectRequest is the request of DoGetObject
|
||||
type GetObjectRequest struct { |
||||
ObjectKey string |
||||
} |
||||
|
||||
// GetObjectResult is the result of DoGetObject
|
||||
type GetObjectResult struct { |
||||
Response *Response |
||||
ClientCRC hash.Hash64 |
||||
ServerCRC uint64 |
||||
} |
||||
|
||||
// AppendObjectRequest is the requtest of DoAppendObject
|
||||
type AppendObjectRequest struct { |
||||
ObjectKey string |
||||
Reader io.Reader |
||||
Position int64 |
||||
} |
||||
|
||||
// AppendObjectResult is the result of DoAppendObject
|
||||
type AppendObjectResult struct { |
||||
NextPosition int64 |
||||
CRC uint64 |
||||
} |
||||
|
||||
// UploadPartRequest is the request of DoUploadPart
|
||||
type UploadPartRequest struct { |
||||
InitResult *InitiateMultipartUploadResult |
||||
Reader io.Reader |
||||
PartSize int64 |
||||
PartNumber int |
||||
} |
||||
|
||||
// UploadPartResult is the result of DoUploadPart
|
||||
type UploadPartResult struct { |
||||
Part UploadPart |
||||
} |
||||
@ -0,0 +1,474 @@ |
||||
package oss |
||||
|
||||
import ( |
||||
"crypto/md5" |
||||
"encoding/base64" |
||||
"encoding/json" |
||||
"errors" |
||||
"fmt" |
||||
"io/ioutil" |
||||
"net/http" |
||||
"os" |
||||
"strconv" |
||||
) |
||||
|
||||
// CopyFile is multipart copy object
|
||||
//
|
||||
// srcBucketName source bucket name
|
||||
// srcObjectKey source object name
|
||||
// destObjectKey target object name in the form of bucketname.objectkey
|
||||
// partSize the part size in byte.
|
||||
// options object's contraints. Check out function InitiateMultipartUpload.
|
||||
//
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) CopyFile(srcBucketName, srcObjectKey, destObjectKey string, partSize int64, options ...Option) error { |
||||
destBucketName := bucket.BucketName |
||||
if partSize < MinPartSize || partSize > MaxPartSize { |
||||
return errors.New("oss: part size invalid range (1024KB, 5GB]") |
||||
} |
||||
|
||||
cpConf := getCpConfig(options) |
||||
routines := getRoutines(options) |
||||
|
||||
var strVersionId string |
||||
versionId, _ := FindOption(options, "versionId", nil) |
||||
if versionId != nil { |
||||
strVersionId = versionId.(string) |
||||
} |
||||
|
||||
if cpConf != nil && cpConf.IsEnable { |
||||
cpFilePath := getCopyCpFilePath(cpConf, srcBucketName, srcObjectKey, destBucketName, destObjectKey, strVersionId) |
||||
if cpFilePath != "" { |
||||
return bucket.copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey, partSize, options, cpFilePath, routines) |
||||
} |
||||
} |
||||
|
||||
return bucket.copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey, |
||||
partSize, options, routines) |
||||
} |
||||
|
||||
func getCopyCpFilePath(cpConf *cpConfig, srcBucket, srcObject, destBucket, destObject, versionId string) string { |
||||
if cpConf.FilePath == "" && cpConf.DirPath != "" { |
||||
dest := fmt.Sprintf("oss://%v/%v", destBucket, destObject) |
||||
src := fmt.Sprintf("oss://%v/%v", srcBucket, srcObject) |
||||
cpFileName := getCpFileName(src, dest, versionId) |
||||
cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName |
||||
} |
||||
return cpConf.FilePath |
||||
} |
||||
|
||||
// ----- Concurrently copy without checkpoint ---------
|
||||
|
||||
// copyWorkerArg defines the copy worker arguments
|
||||
type copyWorkerArg struct { |
||||
bucket *Bucket |
||||
imur InitiateMultipartUploadResult |
||||
srcBucketName string |
||||
srcObjectKey string |
||||
options []Option |
||||
hook copyPartHook |
||||
} |
||||
|
||||
// copyPartHook is the hook for testing purpose
|
||||
type copyPartHook func(part copyPart) error |
||||
|
||||
var copyPartHooker copyPartHook = defaultCopyPartHook |
||||
|
||||
func defaultCopyPartHook(part copyPart) error { |
||||
return nil |
||||
} |
||||
|
||||
// copyWorker copies worker
|
||||
func copyWorker(id int, arg copyWorkerArg, jobs <-chan copyPart, results chan<- UploadPart, failed chan<- error, die <-chan bool) { |
||||
for chunk := range jobs { |
||||
if err := arg.hook(chunk); err != nil { |
||||
failed <- err |
||||
break |
||||
} |
||||
chunkSize := chunk.End - chunk.Start + 1 |
||||
part, err := arg.bucket.UploadPartCopy(arg.imur, arg.srcBucketName, arg.srcObjectKey, |
||||
chunk.Start, chunkSize, chunk.Number, arg.options...) |
||||
if err != nil { |
||||
failed <- err |
||||
break |
||||
} |
||||
select { |
||||
case <-die: |
||||
return |
||||
default: |
||||
} |
||||
results <- part |
||||
} |
||||
} |
||||
|
||||
// copyScheduler
|
||||
func copyScheduler(jobs chan copyPart, parts []copyPart) { |
||||
for _, part := range parts { |
||||
jobs <- part |
||||
} |
||||
close(jobs) |
||||
} |
||||
|
||||
// copyPart structure
|
||||
type copyPart struct { |
||||
Number int // Part number (from 1 to 10,000)
|
||||
Start int64 // The start index in the source file.
|
||||
End int64 // The end index in the source file
|
||||
} |
||||
|
||||
// getCopyParts calculates copy parts
|
||||
func getCopyParts(objectSize, partSize int64) []copyPart { |
||||
parts := []copyPart{} |
||||
part := copyPart{} |
||||
i := 0 |
||||
for offset := int64(0); offset < objectSize; offset += partSize { |
||||
part.Number = i + 1 |
||||
part.Start = offset |
||||
part.End = GetPartEnd(offset, objectSize, partSize) |
||||
parts = append(parts, part) |
||||
i++ |
||||
} |
||||
return parts |
||||
} |
||||
|
||||
// getSrcObjectBytes gets the source file size
|
||||
func getSrcObjectBytes(parts []copyPart) int64 { |
||||
var ob int64 |
||||
for _, part := range parts { |
||||
ob += (part.End - part.Start + 1) |
||||
} |
||||
return ob |
||||
} |
||||
|
||||
// copyFile is a concurrently copy without checkpoint
|
||||
func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey string, |
||||
partSize int64, options []Option, routines int) error { |
||||
descBucket, err := bucket.Client.Bucket(destBucketName) |
||||
srcBucket, err := bucket.Client.Bucket(srcBucketName) |
||||
listener := GetProgressListener(options) |
||||
|
||||
// choice valid options
|
||||
headerOptions := ChoiceHeadObjectOption(options) |
||||
partOptions := ChoiceTransferPartOption(options) |
||||
completeOptions := ChoiceCompletePartOption(options) |
||||
abortOptions := ChoiceAbortPartOption(options) |
||||
|
||||
meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey, headerOptions...) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
// Get copy parts
|
||||
parts := getCopyParts(objectSize, partSize) |
||||
// Initialize the multipart upload
|
||||
imur, err := descBucket.InitiateMultipartUpload(destObjectKey, options...) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
jobs := make(chan copyPart, len(parts)) |
||||
results := make(chan UploadPart, len(parts)) |
||||
failed := make(chan error) |
||||
die := make(chan bool) |
||||
|
||||
var completedBytes int64 |
||||
totalBytes := getSrcObjectBytes(parts) |
||||
event := newProgressEvent(TransferStartedEvent, 0, totalBytes, 0) |
||||
publishProgress(listener, event) |
||||
|
||||
// Start to copy workers
|
||||
arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, partOptions, copyPartHooker} |
||||
for w := 1; w <= routines; w++ { |
||||
go copyWorker(w, arg, jobs, results, failed, die) |
||||
} |
||||
|
||||
// Start the scheduler
|
||||
go copyScheduler(jobs, parts) |
||||
|
||||
// Wait for the parts finished.
|
||||
completed := 0 |
||||
ups := make([]UploadPart, len(parts)) |
||||
for completed < len(parts) { |
||||
select { |
||||
case part := <-results: |
||||
completed++ |
||||
ups[part.PartNumber-1] = part |
||||
copyBytes := (parts[part.PartNumber-1].End - parts[part.PartNumber-1].Start + 1) |
||||
completedBytes += copyBytes |
||||
event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes, copyBytes) |
||||
publishProgress(listener, event) |
||||
case err := <-failed: |
||||
close(die) |
||||
descBucket.AbortMultipartUpload(imur, abortOptions...) |
||||
event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes, 0) |
||||
publishProgress(listener, event) |
||||
return err |
||||
} |
||||
|
||||
if completed >= len(parts) { |
||||
break |
||||
} |
||||
} |
||||
|
||||
event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes, 0) |
||||
publishProgress(listener, event) |
||||
|
||||
// Complete the multipart upload
|
||||
_, err = descBucket.CompleteMultipartUpload(imur, ups, completeOptions...) |
||||
if err != nil { |
||||
bucket.AbortMultipartUpload(imur, abortOptions...) |
||||
return err |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// ----- Concurrently copy with checkpoint -----
|
||||
|
||||
const copyCpMagic = "84F1F18C-FF1D-403B-A1D8-9DEB5F65910A" |
||||
|
||||
type copyCheckpoint struct { |
||||
Magic string // Magic
|
||||
MD5 string // CP content MD5
|
||||
SrcBucketName string // Source bucket
|
||||
SrcObjectKey string // Source object
|
||||
DestBucketName string // Target bucket
|
||||
DestObjectKey string // Target object
|
||||
CopyID string // Copy ID
|
||||
ObjStat objectStat // Object stat
|
||||
Parts []copyPart // Copy parts
|
||||
CopyParts []UploadPart // The uploaded parts
|
||||
PartStat []bool // The part status
|
||||
} |
||||
|
||||
// isValid checks if the data is valid which means CP is valid and object is not updated.
|
||||
func (cp copyCheckpoint) isValid(meta http.Header) (bool, error) { |
||||
// Compare CP's magic number and the MD5.
|
||||
cpb := cp |
||||
cpb.MD5 = "" |
||||
js, _ := json.Marshal(cpb) |
||||
sum := md5.Sum(js) |
||||
b64 := base64.StdEncoding.EncodeToString(sum[:]) |
||||
|
||||
if cp.Magic != downloadCpMagic || b64 != cp.MD5 { |
||||
return false, nil |
||||
} |
||||
|
||||
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64) |
||||
if err != nil { |
||||
return false, err |
||||
} |
||||
|
||||
// Compare the object size and last modified time and etag.
|
||||
if cp.ObjStat.Size != objectSize || |
||||
cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) || |
||||
cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) { |
||||
return false, nil |
||||
} |
||||
|
||||
return true, nil |
||||
} |
||||
|
||||
// load loads from the checkpoint file
|
||||
func (cp *copyCheckpoint) load(filePath string) error { |
||||
contents, err := ioutil.ReadFile(filePath) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
err = json.Unmarshal(contents, cp) |
||||
return err |
||||
} |
||||
|
||||
// update updates the parts status
|
||||
func (cp *copyCheckpoint) update(part UploadPart) { |
||||
cp.CopyParts[part.PartNumber-1] = part |
||||
cp.PartStat[part.PartNumber-1] = true |
||||
} |
||||
|
||||
// dump dumps the CP to the file
|
||||
func (cp *copyCheckpoint) dump(filePath string) error { |
||||
bcp := *cp |
||||
|
||||
// Calculate MD5
|
||||
bcp.MD5 = "" |
||||
js, err := json.Marshal(bcp) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
sum := md5.Sum(js) |
||||
b64 := base64.StdEncoding.EncodeToString(sum[:]) |
||||
bcp.MD5 = b64 |
||||
|
||||
// Serialization
|
||||
js, err = json.Marshal(bcp) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
// Dump
|
||||
return ioutil.WriteFile(filePath, js, FilePermMode) |
||||
} |
||||
|
||||
// todoParts returns unfinished parts
|
||||
func (cp copyCheckpoint) todoParts() []copyPart { |
||||
dps := []copyPart{} |
||||
for i, ps := range cp.PartStat { |
||||
if !ps { |
||||
dps = append(dps, cp.Parts[i]) |
||||
} |
||||
} |
||||
return dps |
||||
} |
||||
|
||||
// getCompletedBytes returns finished bytes count
|
||||
func (cp copyCheckpoint) getCompletedBytes() int64 { |
||||
var completedBytes int64 |
||||
for i, part := range cp.Parts { |
||||
if cp.PartStat[i] { |
||||
completedBytes += (part.End - part.Start + 1) |
||||
} |
||||
} |
||||
return completedBytes |
||||
} |
||||
|
||||
// prepare initializes the multipart upload
|
||||
func (cp *copyCheckpoint) prepare(meta http.Header, srcBucket *Bucket, srcObjectKey string, destBucket *Bucket, destObjectKey string, |
||||
partSize int64, options []Option) error { |
||||
// CP
|
||||
cp.Magic = copyCpMagic |
||||
cp.SrcBucketName = srcBucket.BucketName |
||||
cp.SrcObjectKey = srcObjectKey |
||||
cp.DestBucketName = destBucket.BucketName |
||||
cp.DestObjectKey = destObjectKey |
||||
|
||||
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
cp.ObjStat.Size = objectSize |
||||
cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified) |
||||
cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag) |
||||
|
||||
// Parts
|
||||
cp.Parts = getCopyParts(objectSize, partSize) |
||||
cp.PartStat = make([]bool, len(cp.Parts)) |
||||
for i := range cp.PartStat { |
||||
cp.PartStat[i] = false |
||||
} |
||||
cp.CopyParts = make([]UploadPart, len(cp.Parts)) |
||||
|
||||
// Init copy
|
||||
imur, err := destBucket.InitiateMultipartUpload(destObjectKey, options...) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
cp.CopyID = imur.UploadID |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func (cp *copyCheckpoint) complete(bucket *Bucket, parts []UploadPart, cpFilePath string, options []Option) error { |
||||
imur := InitiateMultipartUploadResult{Bucket: cp.DestBucketName, |
||||
Key: cp.DestObjectKey, UploadID: cp.CopyID} |
||||
_, err := bucket.CompleteMultipartUpload(imur, parts, options...) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
os.Remove(cpFilePath) |
||||
return err |
||||
} |
||||
|
||||
// copyFileWithCp is concurrently copy with checkpoint
|
||||
func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey string, |
||||
partSize int64, options []Option, cpFilePath string, routines int) error { |
||||
descBucket, err := bucket.Client.Bucket(destBucketName) |
||||
srcBucket, err := bucket.Client.Bucket(srcBucketName) |
||||
listener := GetProgressListener(options) |
||||
|
||||
// Load CP data
|
||||
ccp := copyCheckpoint{} |
||||
err = ccp.load(cpFilePath) |
||||
if err != nil { |
||||
os.Remove(cpFilePath) |
||||
} |
||||
|
||||
// choice valid options
|
||||
headerOptions := ChoiceHeadObjectOption(options) |
||||
partOptions := ChoiceTransferPartOption(options) |
||||
completeOptions := ChoiceCompletePartOption(options) |
||||
|
||||
meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey, headerOptions...) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
// Load error or the CP data is invalid---reinitialize
|
||||
valid, err := ccp.isValid(meta) |
||||
if err != nil || !valid { |
||||
if err = ccp.prepare(meta, srcBucket, srcObjectKey, descBucket, destObjectKey, partSize, options); err != nil { |
||||
return err |
||||
} |
||||
os.Remove(cpFilePath) |
||||
} |
||||
|
||||
// Unfinished parts
|
||||
parts := ccp.todoParts() |
||||
imur := InitiateMultipartUploadResult{ |
||||
Bucket: destBucketName, |
||||
Key: destObjectKey, |
||||
UploadID: ccp.CopyID} |
||||
|
||||
jobs := make(chan copyPart, len(parts)) |
||||
results := make(chan UploadPart, len(parts)) |
||||
failed := make(chan error) |
||||
die := make(chan bool) |
||||
|
||||
completedBytes := ccp.getCompletedBytes() |
||||
event := newProgressEvent(TransferStartedEvent, completedBytes, ccp.ObjStat.Size, 0) |
||||
publishProgress(listener, event) |
||||
|
||||
// Start the worker coroutines
|
||||
arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, partOptions, copyPartHooker} |
||||
for w := 1; w <= routines; w++ { |
||||
go copyWorker(w, arg, jobs, results, failed, die) |
||||
} |
||||
|
||||
// Start the scheduler
|
||||
go copyScheduler(jobs, parts) |
||||
|
||||
// Wait for the parts completed.
|
||||
completed := 0 |
||||
for completed < len(parts) { |
||||
select { |
||||
case part := <-results: |
||||
completed++ |
||||
ccp.update(part) |
||||
ccp.dump(cpFilePath) |
||||
copyBytes := (parts[part.PartNumber-1].End - parts[part.PartNumber-1].Start + 1) |
||||
completedBytes += copyBytes |
||||
event = newProgressEvent(TransferDataEvent, completedBytes, ccp.ObjStat.Size, copyBytes) |
||||
publishProgress(listener, event) |
||||
case err := <-failed: |
||||
close(die) |
||||
event = newProgressEvent(TransferFailedEvent, completedBytes, ccp.ObjStat.Size, 0) |
||||
publishProgress(listener, event) |
||||
return err |
||||
} |
||||
|
||||
if completed >= len(parts) { |
||||
break |
||||
} |
||||
} |
||||
|
||||
event = newProgressEvent(TransferCompletedEvent, completedBytes, ccp.ObjStat.Size, 0) |
||||
publishProgress(listener, event) |
||||
|
||||
return ccp.complete(descBucket, ccp.CopyParts, cpFilePath, completeOptions) |
||||
} |
||||
@ -0,0 +1,305 @@ |
||||
package oss |
||||
|
||||
import ( |
||||
"bytes" |
||||
"encoding/xml" |
||||
"io" |
||||
"net/http" |
||||
"net/url" |
||||
"os" |
||||
"sort" |
||||
"strconv" |
||||
) |
||||
|
||||
// InitiateMultipartUpload initializes multipart upload
|
||||
//
|
||||
// objectKey object name
|
||||
// options the object constricts for upload. The valid options are CacheControl, ContentDisposition, ContentEncoding, Expires,
|
||||
// ServerSideEncryption, Meta, check out the following link:
|
||||
// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/InitiateMultipartUpload.html
|
||||
//
|
||||
// InitiateMultipartUploadResult the return value of the InitiateMultipartUpload, which is used for calls later on such as UploadPartFromFile,UploadPartCopy.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) InitiateMultipartUpload(objectKey string, options ...Option) (InitiateMultipartUploadResult, error) { |
||||
var imur InitiateMultipartUploadResult |
||||
opts := AddContentType(options, objectKey) |
||||
params, _ := GetRawParams(options) |
||||
paramKeys := []string{"sequential", "withHashContext", "x-oss-enable-md5", "x-oss-enable-sha1", "x-oss-enable-sha256"} |
||||
ConvertEmptyValueToNil(params, paramKeys) |
||||
params["uploads"] = nil |
||||
|
||||
resp, err := bucket.do("POST", objectKey, params, opts, nil, nil) |
||||
if err != nil { |
||||
return imur, err |
||||
} |
||||
defer resp.Body.Close() |
||||
|
||||
err = xmlUnmarshal(resp.Body, &imur) |
||||
return imur, err |
||||
} |
||||
|
||||
// UploadPart uploads parts
|
||||
//
|
||||
// After initializing a Multipart Upload, the upload Id and object key could be used for uploading the parts.
|
||||
// Each part has its part number (ranges from 1 to 10,000). And for each upload Id, the part number identifies the position of the part in the whole file.
|
||||
// And thus with the same part number and upload Id, another part upload will overwrite the data.
|
||||
// Except the last one, minimal part size is 100KB. There's no limit on the last part size.
|
||||
//
|
||||
// imur the returned value of InitiateMultipartUpload.
|
||||
// reader io.Reader the reader for the part's data.
|
||||
// size the part size.
|
||||
// partNumber the part number (ranges from 1 to 10,000). Invalid part number will lead to InvalidArgument error.
|
||||
//
|
||||
// UploadPart the return value of the upload part. It consists of PartNumber and ETag. It's valid when error is nil.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) UploadPart(imur InitiateMultipartUploadResult, reader io.Reader, |
||||
partSize int64, partNumber int, options ...Option) (UploadPart, error) { |
||||
request := &UploadPartRequest{ |
||||
InitResult: &imur, |
||||
Reader: reader, |
||||
PartSize: partSize, |
||||
PartNumber: partNumber, |
||||
} |
||||
|
||||
result, err := bucket.DoUploadPart(request, options) |
||||
|
||||
return result.Part, err |
||||
} |
||||
|
||||
// UploadPartFromFile uploads part from the file.
|
||||
//
|
||||
// imur the return value of a successful InitiateMultipartUpload.
|
||||
// filePath the local file path to upload.
|
||||
// startPosition the start position in the local file.
|
||||
// partSize the part size.
|
||||
// partNumber the part number (from 1 to 10,000)
|
||||
//
|
||||
// UploadPart the return value consists of PartNumber and ETag.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) UploadPartFromFile(imur InitiateMultipartUploadResult, filePath string, |
||||
startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) { |
||||
var part = UploadPart{} |
||||
fd, err := os.Open(filePath) |
||||
if err != nil { |
||||
return part, err |
||||
} |
||||
defer fd.Close() |
||||
fd.Seek(startPosition, os.SEEK_SET) |
||||
|
||||
request := &UploadPartRequest{ |
||||
InitResult: &imur, |
||||
Reader: fd, |
||||
PartSize: partSize, |
||||
PartNumber: partNumber, |
||||
} |
||||
|
||||
result, err := bucket.DoUploadPart(request, options) |
||||
|
||||
return result.Part, err |
||||
} |
||||
|
||||
// DoUploadPart does the actual part upload.
|
||||
//
|
||||
// request part upload request
|
||||
//
|
||||
// UploadPartResult the result of uploading part.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) DoUploadPart(request *UploadPartRequest, options []Option) (*UploadPartResult, error) { |
||||
listener := GetProgressListener(options) |
||||
options = append(options, ContentLength(request.PartSize)) |
||||
params := map[string]interface{}{} |
||||
params["partNumber"] = strconv.Itoa(request.PartNumber) |
||||
params["uploadId"] = request.InitResult.UploadID |
||||
resp, err := bucket.do("PUT", request.InitResult.Key, params, options, |
||||
&io.LimitedReader{R: request.Reader, N: request.PartSize}, listener) |
||||
if err != nil { |
||||
return &UploadPartResult{}, err |
||||
} |
||||
defer resp.Body.Close() |
||||
|
||||
part := UploadPart{ |
||||
ETag: resp.Headers.Get(HTTPHeaderEtag), |
||||
PartNumber: request.PartNumber, |
||||
} |
||||
|
||||
if bucket.GetConfig().IsEnableCRC { |
||||
err = CheckCRC(resp, "DoUploadPart") |
||||
if err != nil { |
||||
return &UploadPartResult{part}, err |
||||
} |
||||
} |
||||
|
||||
return &UploadPartResult{part}, nil |
||||
} |
||||
|
||||
// UploadPartCopy uploads part copy
|
||||
//
|
||||
// imur the return value of InitiateMultipartUpload
|
||||
// copySrc source Object name
|
||||
// startPosition the part's start index in the source file
|
||||
// partSize the part size
|
||||
// partNumber the part number, ranges from 1 to 10,000. If it exceeds the range OSS returns InvalidArgument error.
|
||||
// options the constraints of source object for the copy. The copy happens only when these contraints are met. Otherwise it returns error.
|
||||
// CopySourceIfNoneMatch, CopySourceIfModifiedSince CopySourceIfUnmodifiedSince, check out the following link for the detail
|
||||
// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/UploadPartCopy.html
|
||||
//
|
||||
// UploadPart the return value consists of PartNumber and ETag.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) UploadPartCopy(imur InitiateMultipartUploadResult, srcBucketName, srcObjectKey string, |
||||
startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) { |
||||
var out UploadPartCopyResult |
||||
var part UploadPart |
||||
var opts []Option |
||||
|
||||
//first find version id
|
||||
versionIdKey := "versionId" |
||||
versionId, _ := FindOption(options, versionIdKey, nil) |
||||
if versionId == nil { |
||||
opts = []Option{CopySource(srcBucketName, url.QueryEscape(srcObjectKey)), |
||||
CopySourceRange(startPosition, partSize)} |
||||
} else { |
||||
opts = []Option{CopySourceVersion(srcBucketName, url.QueryEscape(srcObjectKey), versionId.(string)), |
||||
CopySourceRange(startPosition, partSize)} |
||||
options = DeleteOption(options, versionIdKey) |
||||
} |
||||
|
||||
opts = append(opts, options...) |
||||
|
||||
params := map[string]interface{}{} |
||||
params["partNumber"] = strconv.Itoa(partNumber) |
||||
params["uploadId"] = imur.UploadID |
||||
resp, err := bucket.do("PUT", imur.Key, params, opts, nil, nil) |
||||
if err != nil { |
||||
return part, err |
||||
} |
||||
defer resp.Body.Close() |
||||
|
||||
err = xmlUnmarshal(resp.Body, &out) |
||||
if err != nil { |
||||
return part, err |
||||
} |
||||
part.ETag = out.ETag |
||||
part.PartNumber = partNumber |
||||
|
||||
return part, nil |
||||
} |
||||
|
||||
// CompleteMultipartUpload completes the multipart upload.
|
||||
//
|
||||
// imur the return value of InitiateMultipartUpload.
|
||||
// parts the array of return value of UploadPart/UploadPartFromFile/UploadPartCopy.
|
||||
//
|
||||
// CompleteMultipartUploadResponse the return value when the call succeeds. Only valid when the error is nil.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) CompleteMultipartUpload(imur InitiateMultipartUploadResult, |
||||
parts []UploadPart, options ...Option) (CompleteMultipartUploadResult, error) { |
||||
var out CompleteMultipartUploadResult |
||||
|
||||
sort.Sort(UploadParts(parts)) |
||||
cxml := completeMultipartUploadXML{} |
||||
cxml.Part = parts |
||||
bs, err := xml.Marshal(cxml) |
||||
if err != nil { |
||||
return out, err |
||||
} |
||||
buffer := new(bytes.Buffer) |
||||
buffer.Write(bs) |
||||
|
||||
params := map[string]interface{}{} |
||||
params["uploadId"] = imur.UploadID |
||||
resp, err := bucket.do("POST", imur.Key, params, options, buffer, nil) |
||||
if err != nil { |
||||
return out, err |
||||
} |
||||
defer resp.Body.Close() |
||||
|
||||
err = xmlUnmarshal(resp.Body, &out) |
||||
return out, err |
||||
} |
||||
|
||||
// AbortMultipartUpload aborts the multipart upload.
|
||||
//
|
||||
// imur the return value of InitiateMultipartUpload.
|
||||
//
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) AbortMultipartUpload(imur InitiateMultipartUploadResult, options ...Option) error { |
||||
params := map[string]interface{}{} |
||||
params["uploadId"] = imur.UploadID |
||||
resp, err := bucket.do("DELETE", imur.Key, params, options, nil, nil) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer resp.Body.Close() |
||||
return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) |
||||
} |
||||
|
||||
// ListUploadedParts lists the uploaded parts.
|
||||
//
|
||||
// imur the return value of InitiateMultipartUpload.
|
||||
//
|
||||
// ListUploadedPartsResponse the return value if it succeeds, only valid when error is nil.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) ListUploadedParts(imur InitiateMultipartUploadResult, options ...Option) (ListUploadedPartsResult, error) { |
||||
var out ListUploadedPartsResult |
||||
options = append(options, EncodingType("url")) |
||||
|
||||
params := map[string]interface{}{} |
||||
params, err := GetRawParams(options) |
||||
if err != nil { |
||||
return out, err |
||||
} |
||||
|
||||
params["uploadId"] = imur.UploadID |
||||
resp, err := bucket.do("GET", imur.Key, params, options, nil, nil) |
||||
if err != nil { |
||||
return out, err |
||||
} |
||||
defer resp.Body.Close() |
||||
|
||||
err = xmlUnmarshal(resp.Body, &out) |
||||
if err != nil { |
||||
return out, err |
||||
} |
||||
err = decodeListUploadedPartsResult(&out) |
||||
return out, err |
||||
} |
||||
|
||||
// ListMultipartUploads lists all ongoing multipart upload tasks
|
||||
//
|
||||
// options listObject's filter. Prefix specifies the returned object's prefix; KeyMarker specifies the returned object's start point in lexicographic order;
|
||||
// MaxKeys specifies the max entries to return; Delimiter is the character for grouping object keys.
|
||||
//
|
||||
// ListMultipartUploadResponse the return value if it succeeds, only valid when error is nil.
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) ListMultipartUploads(options ...Option) (ListMultipartUploadResult, error) { |
||||
var out ListMultipartUploadResult |
||||
|
||||
options = append(options, EncodingType("url")) |
||||
params, err := GetRawParams(options) |
||||
if err != nil { |
||||
return out, err |
||||
} |
||||
params["uploads"] = nil |
||||
|
||||
resp, err := bucket.do("GET", "", params, options, nil, nil) |
||||
if err != nil { |
||||
return out, err |
||||
} |
||||
defer resp.Body.Close() |
||||
|
||||
err = xmlUnmarshal(resp.Body, &out) |
||||
if err != nil { |
||||
return out, err |
||||
} |
||||
err = decodeListMultipartUploadResult(&out) |
||||
return out, err |
||||
} |
||||
@ -0,0 +1,689 @@ |
||||
package oss |
||||
|
||||
import ( |
||||
"fmt" |
||||
"net/http" |
||||
"net/url" |
||||
"strconv" |
||||
"strings" |
||||
"time" |
||||
) |
||||
|
||||
type optionType string |
||||
|
||||
const ( |
||||
optionParam optionType = "HTTPParameter" // URL parameter
|
||||
optionHTTP optionType = "HTTPHeader" // HTTP header
|
||||
optionArg optionType = "FuncArgument" // Function argument
|
||||
) |
||||
|
||||
const ( |
||||
deleteObjectsQuiet = "delete-objects-quiet" |
||||
routineNum = "x-routine-num" |
||||
checkpointConfig = "x-cp-config" |
||||
initCRC64 = "init-crc64" |
||||
progressListener = "x-progress-listener" |
||||
storageClass = "storage-class" |
||||
responseHeader = "x-response-header" |
||||
redundancyType = "redundancy-type" |
||||
objectHashFunc = "object-hash-func" |
||||
) |
||||
|
||||
type ( |
||||
optionValue struct { |
||||
Value interface{} |
||||
Type optionType |
||||
} |
||||
|
||||
// Option HTTP option
|
||||
Option func(map[string]optionValue) error |
||||
) |
||||
|
||||
// ACL is an option to set X-Oss-Acl header
|
||||
func ACL(acl ACLType) Option { |
||||
return setHeader(HTTPHeaderOssACL, string(acl)) |
||||
} |
||||
|
||||
// ContentType is an option to set Content-Type header
|
||||
func ContentType(value string) Option { |
||||
return setHeader(HTTPHeaderContentType, value) |
||||
} |
||||
|
||||
// ContentLength is an option to set Content-Length header
|
||||
func ContentLength(length int64) Option { |
||||
return setHeader(HTTPHeaderContentLength, strconv.FormatInt(length, 10)) |
||||
} |
||||
|
||||
// CacheControl is an option to set Cache-Control header
|
||||
func CacheControl(value string) Option { |
||||
return setHeader(HTTPHeaderCacheControl, value) |
||||
} |
||||
|
||||
// ContentDisposition is an option to set Content-Disposition header
|
||||
func ContentDisposition(value string) Option { |
||||
return setHeader(HTTPHeaderContentDisposition, value) |
||||
} |
||||
|
||||
// ContentEncoding is an option to set Content-Encoding header
|
||||
func ContentEncoding(value string) Option { |
||||
return setHeader(HTTPHeaderContentEncoding, value) |
||||
} |
||||
|
||||
// ContentLanguage is an option to set Content-Language header
|
||||
func ContentLanguage(value string) Option { |
||||
return setHeader(HTTPHeaderContentLanguage, value) |
||||
} |
||||
|
||||
// ContentMD5 is an option to set Content-MD5 header
|
||||
func ContentMD5(value string) Option { |
||||
return setHeader(HTTPHeaderContentMD5, value) |
||||
} |
||||
|
||||
// Expires is an option to set Expires header
|
||||
func Expires(t time.Time) Option { |
||||
return setHeader(HTTPHeaderExpires, t.Format(http.TimeFormat)) |
||||
} |
||||
|
||||
// Meta is an option to set Meta header
|
||||
func Meta(key, value string) Option { |
||||
return setHeader(HTTPHeaderOssMetaPrefix+key, value) |
||||
} |
||||
|
||||
// Range is an option to set Range header, [start, end]
|
||||
func Range(start, end int64) Option { |
||||
return setHeader(HTTPHeaderRange, fmt.Sprintf("bytes=%d-%d", start, end)) |
||||
} |
||||
|
||||
// NormalizedRange is an option to set Range header, such as 1024-2048 or 1024- or -2048
|
||||
func NormalizedRange(nr string) Option { |
||||
return setHeader(HTTPHeaderRange, fmt.Sprintf("bytes=%s", strings.TrimSpace(nr))) |
||||
} |
||||
|
||||
// AcceptEncoding is an option to set Accept-Encoding header
|
||||
func AcceptEncoding(value string) Option { |
||||
return setHeader(HTTPHeaderAcceptEncoding, value) |
||||
} |
||||
|
||||
// IfModifiedSince is an option to set If-Modified-Since header
|
||||
func IfModifiedSince(t time.Time) Option { |
||||
return setHeader(HTTPHeaderIfModifiedSince, t.Format(http.TimeFormat)) |
||||
} |
||||
|
||||
// IfUnmodifiedSince is an option to set If-Unmodified-Since header
|
||||
func IfUnmodifiedSince(t time.Time) Option { |
||||
return setHeader(HTTPHeaderIfUnmodifiedSince, t.Format(http.TimeFormat)) |
||||
} |
||||
|
||||
// IfMatch is an option to set If-Match header
|
||||
func IfMatch(value string) Option { |
||||
return setHeader(HTTPHeaderIfMatch, value) |
||||
} |
||||
|
||||
// IfNoneMatch is an option to set IfNoneMatch header
|
||||
func IfNoneMatch(value string) Option { |
||||
return setHeader(HTTPHeaderIfNoneMatch, value) |
||||
} |
||||
|
||||
// CopySource is an option to set X-Oss-Copy-Source header
|
||||
func CopySource(sourceBucket, sourceObject string) Option { |
||||
return setHeader(HTTPHeaderOssCopySource, "/"+sourceBucket+"/"+sourceObject) |
||||
} |
||||
|
||||
// CopySourceVersion is an option to set X-Oss-Copy-Source header,include versionId
|
||||
func CopySourceVersion(sourceBucket, sourceObject string, versionId string) Option { |
||||
return setHeader(HTTPHeaderOssCopySource, "/"+sourceBucket+"/"+sourceObject+"?"+"versionId="+versionId) |
||||
} |
||||
|
||||
// CopySourceRange is an option to set X-Oss-Copy-Source header
|
||||
func CopySourceRange(startPosition, partSize int64) Option { |
||||
val := "bytes=" + strconv.FormatInt(startPosition, 10) + "-" + |
||||
strconv.FormatInt((startPosition+partSize-1), 10) |
||||
return setHeader(HTTPHeaderOssCopySourceRange, val) |
||||
} |
||||
|
||||
// CopySourceIfMatch is an option to set X-Oss-Copy-Source-If-Match header
|
||||
func CopySourceIfMatch(value string) Option { |
||||
return setHeader(HTTPHeaderOssCopySourceIfMatch, value) |
||||
} |
||||
|
||||
// CopySourceIfNoneMatch is an option to set X-Oss-Copy-Source-If-None-Match header
|
||||
func CopySourceIfNoneMatch(value string) Option { |
||||
return setHeader(HTTPHeaderOssCopySourceIfNoneMatch, value) |
||||
} |
||||
|
||||
// CopySourceIfModifiedSince is an option to set X-Oss-CopySource-If-Modified-Since header
|
||||
func CopySourceIfModifiedSince(t time.Time) Option { |
||||
return setHeader(HTTPHeaderOssCopySourceIfModifiedSince, t.Format(http.TimeFormat)) |
||||
} |
||||
|
||||
// CopySourceIfUnmodifiedSince is an option to set X-Oss-Copy-Source-If-Unmodified-Since header
|
||||
func CopySourceIfUnmodifiedSince(t time.Time) Option { |
||||
return setHeader(HTTPHeaderOssCopySourceIfUnmodifiedSince, t.Format(http.TimeFormat)) |
||||
} |
||||
|
||||
// MetadataDirective is an option to set X-Oss-Metadata-Directive header
|
||||
func MetadataDirective(directive MetadataDirectiveType) Option { |
||||
return setHeader(HTTPHeaderOssMetadataDirective, string(directive)) |
||||
} |
||||
|
||||
// ServerSideEncryption is an option to set X-Oss-Server-Side-Encryption header
|
||||
func ServerSideEncryption(value string) Option { |
||||
return setHeader(HTTPHeaderOssServerSideEncryption, value) |
||||
} |
||||
|
||||
// ServerSideEncryptionKeyID is an option to set X-Oss-Server-Side-Encryption-Key-Id header
|
||||
func ServerSideEncryptionKeyID(value string) Option { |
||||
return setHeader(HTTPHeaderOssServerSideEncryptionKeyID, value) |
||||
} |
||||
|
||||
// ServerSideDataEncryption is an option to set X-Oss-Server-Side-Data-Encryption header
|
||||
func ServerSideDataEncryption(value string) Option { |
||||
return setHeader(HTTPHeaderOssServerSideDataEncryption, value) |
||||
} |
||||
|
||||
// SSECAlgorithm is an option to set X-Oss-Server-Side-Encryption-Customer-Algorithm header
|
||||
func SSECAlgorithm(value string) Option { |
||||
return setHeader(HTTPHeaderSSECAlgorithm, value) |
||||
} |
||||
|
||||
// SSECKey is an option to set X-Oss-Server-Side-Encryption-Customer-Key header
|
||||
func SSECKey(value string) Option { |
||||
return setHeader(HTTPHeaderSSECKey, value) |
||||
} |
||||
|
||||
// SSECKeyMd5 is an option to set X-Oss-Server-Side-Encryption-Customer-Key-Md5 header
|
||||
func SSECKeyMd5(value string) Option { |
||||
return setHeader(HTTPHeaderSSECKeyMd5, value) |
||||
} |
||||
|
||||
// ObjectACL is an option to set X-Oss-Object-Acl header
|
||||
func ObjectACL(acl ACLType) Option { |
||||
return setHeader(HTTPHeaderOssObjectACL, string(acl)) |
||||
} |
||||
|
||||
// symlinkTarget is an option to set X-Oss-Symlink-Target
|
||||
func symlinkTarget(targetObjectKey string) Option { |
||||
return setHeader(HTTPHeaderOssSymlinkTarget, targetObjectKey) |
||||
} |
||||
|
||||
// Origin is an option to set Origin header
|
||||
func Origin(value string) Option { |
||||
return setHeader(HTTPHeaderOrigin, value) |
||||
} |
||||
|
||||
// ObjectStorageClass is an option to set the storage class of object
|
||||
func ObjectStorageClass(storageClass StorageClassType) Option { |
||||
return setHeader(HTTPHeaderOssStorageClass, string(storageClass)) |
||||
} |
||||
|
||||
// Callback is an option to set callback values
|
||||
func Callback(callback string) Option { |
||||
return setHeader(HTTPHeaderOssCallback, callback) |
||||
} |
||||
|
||||
// CallbackVar is an option to set callback user defined values
|
||||
func CallbackVar(callbackVar string) Option { |
||||
return setHeader(HTTPHeaderOssCallbackVar, callbackVar) |
||||
} |
||||
|
||||
// RequestPayer is an option to set payer who pay for the request
|
||||
func RequestPayer(payerType PayerType) Option { |
||||
return setHeader(HTTPHeaderOssRequester, strings.ToLower(string(payerType))) |
||||
} |
||||
|
||||
// RequestPayerParam is an option to set payer who pay for the request
|
||||
func RequestPayerParam(payerType PayerType) Option { |
||||
return addParam(strings.ToLower(HTTPHeaderOssRequester), strings.ToLower(string(payerType))) |
||||
} |
||||
|
||||
// SetTagging is an option to set object tagging
|
||||
func SetTagging(tagging Tagging) Option { |
||||
if len(tagging.Tags) == 0 { |
||||
return nil |
||||
} |
||||
|
||||
taggingValue := "" |
||||
for index, tag := range tagging.Tags { |
||||
if index != 0 { |
||||
taggingValue += "&" |
||||
} |
||||
taggingValue += url.QueryEscape(tag.Key) + "=" + url.QueryEscape(tag.Value) |
||||
} |
||||
return setHeader(HTTPHeaderOssTagging, taggingValue) |
||||
} |
||||
|
||||
// TaggingDirective is an option to set X-Oss-Metadata-Directive header
|
||||
func TaggingDirective(directive TaggingDirectiveType) Option { |
||||
return setHeader(HTTPHeaderOssTaggingDirective, string(directive)) |
||||
} |
||||
|
||||
// ACReqMethod is an option to set Access-Control-Request-Method header
|
||||
func ACReqMethod(value string) Option { |
||||
return setHeader(HTTPHeaderACReqMethod, value) |
||||
} |
||||
|
||||
// ACReqHeaders is an option to set Access-Control-Request-Headers header
|
||||
func ACReqHeaders(value string) Option { |
||||
return setHeader(HTTPHeaderACReqHeaders, value) |
||||
} |
||||
|
||||
// TrafficLimitHeader is an option to set X-Oss-Traffic-Limit
|
||||
func TrafficLimitHeader(value int64) Option { |
||||
return setHeader(HTTPHeaderOssTrafficLimit, strconv.FormatInt(value, 10)) |
||||
} |
||||
|
||||
// UserAgentHeader is an option to set HTTPHeaderUserAgent
|
||||
func UserAgentHeader(ua string) Option { |
||||
return setHeader(HTTPHeaderUserAgent, ua) |
||||
} |
||||
|
||||
// ForbidOverWrite is an option to set X-Oss-Forbid-Overwrite
|
||||
func ForbidOverWrite(forbidWrite bool) Option { |
||||
if forbidWrite { |
||||
return setHeader(HTTPHeaderOssForbidOverWrite, "true") |
||||
} else { |
||||
return setHeader(HTTPHeaderOssForbidOverWrite, "false") |
||||
} |
||||
} |
||||
|
||||
// RangeBehavior is an option to set Range value, such as "standard"
|
||||
func RangeBehavior(value string) Option { |
||||
return setHeader(HTTPHeaderOssRangeBehavior, value) |
||||
} |
||||
|
||||
func PartHashCtxHeader(value string) Option { |
||||
return setHeader(HTTPHeaderOssHashCtx, value) |
||||
} |
||||
|
||||
func PartMd5CtxHeader(value string) Option { |
||||
return setHeader(HTTPHeaderOssMd5Ctx, value) |
||||
} |
||||
|
||||
func PartHashCtxParam(value string) Option { |
||||
return addParam("x-oss-hash-ctx", value) |
||||
} |
||||
|
||||
func PartMd5CtxParam(value string) Option { |
||||
return addParam("x-oss-md5-ctx", value) |
||||
} |
||||
|
||||
// Delimiter is an option to set delimiler parameter
|
||||
func Delimiter(value string) Option { |
||||
return addParam("delimiter", value) |
||||
} |
||||
|
||||
// Marker is an option to set marker parameter
|
||||
func Marker(value string) Option { |
||||
return addParam("marker", value) |
||||
} |
||||
|
||||
// MaxKeys is an option to set maxkeys parameter
|
||||
func MaxKeys(value int) Option { |
||||
return addParam("max-keys", strconv.Itoa(value)) |
||||
} |
||||
|
||||
// Prefix is an option to set prefix parameter
|
||||
func Prefix(value string) Option { |
||||
return addParam("prefix", value) |
||||
} |
||||
|
||||
// EncodingType is an option to set encoding-type parameter
|
||||
func EncodingType(value string) Option { |
||||
return addParam("encoding-type", value) |
||||
} |
||||
|
||||
// MaxUploads is an option to set max-uploads parameter
|
||||
func MaxUploads(value int) Option { |
||||
return addParam("max-uploads", strconv.Itoa(value)) |
||||
} |
||||
|
||||
// KeyMarker is an option to set key-marker parameter
|
||||
func KeyMarker(value string) Option { |
||||
return addParam("key-marker", value) |
||||
} |
||||
|
||||
// VersionIdMarker is an option to set version-id-marker parameter
|
||||
func VersionIdMarker(value string) Option { |
||||
return addParam("version-id-marker", value) |
||||
} |
||||
|
||||
// VersionId is an option to set versionId parameter
|
||||
func VersionId(value string) Option { |
||||
return addParam("versionId", value) |
||||
} |
||||
|
||||
// TagKey is an option to set tag key parameter
|
||||
func TagKey(value string) Option { |
||||
return addParam("tag-key", value) |
||||
} |
||||
|
||||
// TagValue is an option to set tag value parameter
|
||||
func TagValue(value string) Option { |
||||
return addParam("tag-value", value) |
||||
} |
||||
|
||||
// UploadIDMarker is an option to set upload-id-marker parameter
|
||||
func UploadIDMarker(value string) Option { |
||||
return addParam("upload-id-marker", value) |
||||
} |
||||
|
||||
// MaxParts is an option to set max-parts parameter
|
||||
func MaxParts(value int) Option { |
||||
return addParam("max-parts", strconv.Itoa(value)) |
||||
} |
||||
|
||||
// PartNumberMarker is an option to set part-number-marker parameter
|
||||
func PartNumberMarker(value int) Option { |
||||
return addParam("part-number-marker", strconv.Itoa(value)) |
||||
} |
||||
|
||||
// Sequential is an option to set sequential parameter for InitiateMultipartUpload
|
||||
func Sequential() Option { |
||||
return addParam("sequential", "") |
||||
} |
||||
|
||||
// WithHashContext is an option to set withHashContext parameter for InitiateMultipartUpload
|
||||
func WithHashContext() Option { |
||||
return addParam("withHashContext", "") |
||||
} |
||||
|
||||
// EnableMd5 is an option to set x-oss-enable-md5 parameter for InitiateMultipartUpload
|
||||
func EnableMd5() Option { |
||||
return addParam("x-oss-enable-md5", "") |
||||
} |
||||
|
||||
// EnableSha1 is an option to set x-oss-enable-sha1 parameter for InitiateMultipartUpload
|
||||
func EnableSha1() Option { |
||||
return addParam("x-oss-enable-sha1", "") |
||||
} |
||||
|
||||
// EnableSha256 is an option to set x-oss-enable-sha256 parameter for InitiateMultipartUpload
|
||||
func EnableSha256() Option { |
||||
return addParam("x-oss-enable-sha256", "") |
||||
} |
||||
|
||||
// ListType is an option to set List-type parameter for ListObjectsV2
|
||||
func ListType(value int) Option { |
||||
return addParam("list-type", strconv.Itoa(value)) |
||||
} |
||||
|
||||
// StartAfter is an option to set start-after parameter for ListObjectsV2
|
||||
func StartAfter(value string) Option { |
||||
return addParam("start-after", value) |
||||
} |
||||
|
||||
// ContinuationToken is an option to set Continuation-token parameter for ListObjectsV2
|
||||
func ContinuationToken(value string) Option { |
||||
if value == "" { |
||||
return addParam("continuation-token", nil) |
||||
} |
||||
return addParam("continuation-token", value) |
||||
} |
||||
|
||||
// FetchOwner is an option to set Fetch-owner parameter for ListObjectsV2
|
||||
func FetchOwner(value bool) Option { |
||||
if value { |
||||
return addParam("fetch-owner", "true") |
||||
} |
||||
return addParam("fetch-owner", "false") |
||||
} |
||||
|
||||
// DeleteObjectsQuiet false:DeleteObjects in verbose mode; true:DeleteObjects in quite mode. Default is false.
|
||||
func DeleteObjectsQuiet(isQuiet bool) Option { |
||||
return addArg(deleteObjectsQuiet, isQuiet) |
||||
} |
||||
|
||||
// StorageClass bucket storage class
|
||||
func StorageClass(value StorageClassType) Option { |
||||
return addArg(storageClass, value) |
||||
} |
||||
|
||||
// RedundancyType bucket data redundancy type
|
||||
func RedundancyType(value DataRedundancyType) Option { |
||||
return addArg(redundancyType, value) |
||||
} |
||||
|
||||
// RedundancyType bucket data redundancy type
|
||||
func ObjectHashFunc(value ObjecthashFuncType) Option { |
||||
return addArg(objectHashFunc, value) |
||||
} |
||||
|
||||
// Checkpoint configuration
|
||||
type cpConfig struct { |
||||
IsEnable bool |
||||
FilePath string |
||||
DirPath string |
||||
} |
||||
|
||||
// Checkpoint sets the isEnable flag and checkpoint file path for DownloadFile/UploadFile.
|
||||
func Checkpoint(isEnable bool, filePath string) Option { |
||||
return addArg(checkpointConfig, &cpConfig{IsEnable: isEnable, FilePath: filePath}) |
||||
} |
||||
|
||||
// CheckpointDir sets the isEnable flag and checkpoint dir path for DownloadFile/UploadFile.
|
||||
func CheckpointDir(isEnable bool, dirPath string) Option { |
||||
return addArg(checkpointConfig, &cpConfig{IsEnable: isEnable, DirPath: dirPath}) |
||||
} |
||||
|
||||
// Routines DownloadFile/UploadFile routine count
|
||||
func Routines(n int) Option { |
||||
return addArg(routineNum, n) |
||||
} |
||||
|
||||
// InitCRC Init AppendObject CRC
|
||||
func InitCRC(initCRC uint64) Option { |
||||
return addArg(initCRC64, initCRC) |
||||
} |
||||
|
||||
// Progress set progress listener
|
||||
func Progress(listener ProgressListener) Option { |
||||
return addArg(progressListener, listener) |
||||
} |
||||
|
||||
// GetResponseHeader for get response http header
|
||||
func GetResponseHeader(respHeader *http.Header) Option { |
||||
return addArg(responseHeader, respHeader) |
||||
} |
||||
|
||||
// ResponseContentType is an option to set response-content-type param
|
||||
func ResponseContentType(value string) Option { |
||||
return addParam("response-content-type", value) |
||||
} |
||||
|
||||
// ResponseContentLanguage is an option to set response-content-language param
|
||||
func ResponseContentLanguage(value string) Option { |
||||
return addParam("response-content-language", value) |
||||
} |
||||
|
||||
// ResponseExpires is an option to set response-expires param
|
||||
func ResponseExpires(value string) Option { |
||||
return addParam("response-expires", value) |
||||
} |
||||
|
||||
// ResponseCacheControl is an option to set response-cache-control param
|
||||
func ResponseCacheControl(value string) Option { |
||||
return addParam("response-cache-control", value) |
||||
} |
||||
|
||||
// ResponseContentDisposition is an option to set response-content-disposition param
|
||||
func ResponseContentDisposition(value string) Option { |
||||
return addParam("response-content-disposition", value) |
||||
} |
||||
|
||||
// ResponseContentEncoding is an option to set response-content-encoding param
|
||||
func ResponseContentEncoding(value string) Option { |
||||
return addParam("response-content-encoding", value) |
||||
} |
||||
|
||||
// Process is an option to set x-oss-process param
|
||||
func Process(value string) Option { |
||||
return addParam("x-oss-process", value) |
||||
} |
||||
|
||||
// TrafficLimitParam is a option to set x-oss-traffic-limit
|
||||
func TrafficLimitParam(value int64) Option { |
||||
return addParam("x-oss-traffic-limit", strconv.FormatInt(value, 10)) |
||||
} |
||||
|
||||
// SetHeader Allow users to set personalized http headers
|
||||
func SetHeader(key string, value interface{}) Option { |
||||
return setHeader(key, value) |
||||
} |
||||
|
||||
// AddParam Allow users to set personalized http params
|
||||
func AddParam(key string, value interface{}) Option { |
||||
return addParam(key, value) |
||||
} |
||||
|
||||
func setHeader(key string, value interface{}) Option { |
||||
return func(params map[string]optionValue) error { |
||||
if value == nil { |
||||
return nil |
||||
} |
||||
params[key] = optionValue{value, optionHTTP} |
||||
return nil |
||||
} |
||||
} |
||||
|
||||
func addParam(key string, value interface{}) Option { |
||||
return func(params map[string]optionValue) error { |
||||
if value == nil { |
||||
return nil |
||||
} |
||||
params[key] = optionValue{value, optionParam} |
||||
return nil |
||||
} |
||||
} |
||||
|
||||
func addArg(key string, value interface{}) Option { |
||||
return func(params map[string]optionValue) error { |
||||
if value == nil { |
||||
return nil |
||||
} |
||||
params[key] = optionValue{value, optionArg} |
||||
return nil |
||||
} |
||||
} |
||||
|
||||
func handleOptions(headers map[string]string, options []Option) error { |
||||
params := map[string]optionValue{} |
||||
for _, option := range options { |
||||
if option != nil { |
||||
if err := option(params); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
} |
||||
|
||||
for k, v := range params { |
||||
if v.Type == optionHTTP { |
||||
headers[k] = v.Value.(string) |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func GetRawParams(options []Option) (map[string]interface{}, error) { |
||||
// Option
|
||||
params := map[string]optionValue{} |
||||
for _, option := range options { |
||||
if option != nil { |
||||
if err := option(params); err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
} |
||||
|
||||
paramsm := map[string]interface{}{} |
||||
// Serialize
|
||||
for k, v := range params { |
||||
if v.Type == optionParam { |
||||
vs := params[k] |
||||
paramsm[k] = vs.Value.(string) |
||||
} |
||||
} |
||||
|
||||
return paramsm, nil |
||||
} |
||||
|
||||
func FindOption(options []Option, param string, defaultVal interface{}) (interface{}, error) { |
||||
params := map[string]optionValue{} |
||||
for _, option := range options { |
||||
if option != nil { |
||||
if err := option(params); err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
} |
||||
|
||||
if val, ok := params[param]; ok { |
||||
return val.Value, nil |
||||
} |
||||
return defaultVal, nil |
||||
} |
||||
|
||||
func IsOptionSet(options []Option, option string) (bool, interface{}, error) { |
||||
params := map[string]optionValue{} |
||||
for _, option := range options { |
||||
if option != nil { |
||||
if err := option(params); err != nil { |
||||
return false, nil, err |
||||
} |
||||
} |
||||
} |
||||
|
||||
if val, ok := params[option]; ok { |
||||
return true, val.Value, nil |
||||
} |
||||
return false, nil, nil |
||||
} |
||||
|
||||
func DeleteOption(options []Option, strKey string) []Option { |
||||
var outOption []Option |
||||
params := map[string]optionValue{} |
||||
for _, option := range options { |
||||
if option != nil { |
||||
option(params) |
||||
_, exist := params[strKey] |
||||
if !exist { |
||||
outOption = append(outOption, option) |
||||
} else { |
||||
delete(params, strKey) |
||||
} |
||||
} |
||||
} |
||||
return outOption |
||||
} |
||||
|
||||
func GetRequestId(header http.Header) string { |
||||
return header.Get("x-oss-request-id") |
||||
} |
||||
|
||||
func GetVersionId(header http.Header) string { |
||||
return header.Get("x-oss-version-id") |
||||
} |
||||
|
||||
func GetCopySrcVersionId(header http.Header) string { |
||||
return header.Get("x-oss-copy-source-version-id") |
||||
} |
||||
|
||||
func GetDeleteMark(header http.Header) bool { |
||||
value := header.Get("x-oss-delete-marker") |
||||
if strings.ToUpper(value) == "TRUE" { |
||||
return true |
||||
} |
||||
return false |
||||
} |
||||
|
||||
func GetQosDelayTime(header http.Header) string { |
||||
return header.Get("x-oss-qos-delay-time") |
||||
} |
||||
|
||||
// ForbidOverWrite is an option to set X-Oss-Forbid-Overwrite
|
||||
func AllowSameActionOverLap(enabled bool) Option { |
||||
if enabled { |
||||
return setHeader(HTTPHeaderAllowSameActionOverLap, "true") |
||||
} else { |
||||
return setHeader(HTTPHeaderAllowSameActionOverLap, "false") |
||||
} |
||||
} |
||||
@ -0,0 +1,116 @@ |
||||
package oss |
||||
|
||||
import ( |
||||
"io" |
||||
) |
||||
|
||||
// ProgressEventType defines transfer progress event type
|
||||
type ProgressEventType int |
||||
|
||||
const ( |
||||
// TransferStartedEvent transfer started, set TotalBytes
|
||||
TransferStartedEvent ProgressEventType = 1 + iota |
||||
// TransferDataEvent transfer data, set ConsumedBytes anmd TotalBytes
|
||||
TransferDataEvent |
||||
// TransferCompletedEvent transfer completed
|
||||
TransferCompletedEvent |
||||
// TransferFailedEvent transfer encounters an error
|
||||
TransferFailedEvent |
||||
) |
||||
|
||||
// ProgressEvent defines progress event
|
||||
type ProgressEvent struct { |
||||
ConsumedBytes int64 |
||||
TotalBytes int64 |
||||
RwBytes int64 |
||||
EventType ProgressEventType |
||||
} |
||||
|
||||
// ProgressListener listens progress change
|
||||
type ProgressListener interface { |
||||
ProgressChanged(event *ProgressEvent) |
||||
} |
||||
|
||||
// -------------------- Private --------------------
|
||||
|
||||
func newProgressEvent(eventType ProgressEventType, consumed, total int64, rwBytes int64) *ProgressEvent { |
||||
return &ProgressEvent{ |
||||
ConsumedBytes: consumed, |
||||
TotalBytes: total, |
||||
RwBytes: rwBytes, |
||||
EventType: eventType} |
||||
} |
||||
|
||||
// publishProgress
|
||||
func publishProgress(listener ProgressListener, event *ProgressEvent) { |
||||
if listener != nil && event != nil { |
||||
listener.ProgressChanged(event) |
||||
} |
||||
} |
||||
|
||||
type readerTracker struct { |
||||
completedBytes int64 |
||||
} |
||||
|
||||
type teeReader struct { |
||||
reader io.Reader |
||||
writer io.Writer |
||||
listener ProgressListener |
||||
consumedBytes int64 |
||||
totalBytes int64 |
||||
tracker *readerTracker |
||||
} |
||||
|
||||
// TeeReader returns a Reader that writes to w what it reads from r.
|
||||
// All reads from r performed through it are matched with
|
||||
// corresponding writes to w. There is no internal buffering -
|
||||
// the write must complete before the read completes.
|
||||
// Any error encountered while writing is reported as a read error.
|
||||
func TeeReader(reader io.Reader, writer io.Writer, totalBytes int64, listener ProgressListener, tracker *readerTracker) io.ReadCloser { |
||||
return &teeReader{ |
||||
reader: reader, |
||||
writer: writer, |
||||
listener: listener, |
||||
consumedBytes: 0, |
||||
totalBytes: totalBytes, |
||||
tracker: tracker, |
||||
} |
||||
} |
||||
|
||||
func (t *teeReader) Read(p []byte) (n int, err error) { |
||||
n, err = t.reader.Read(p) |
||||
|
||||
// Read encountered error
|
||||
if err != nil && err != io.EOF { |
||||
event := newProgressEvent(TransferFailedEvent, t.consumedBytes, t.totalBytes, 0) |
||||
publishProgress(t.listener, event) |
||||
} |
||||
|
||||
if n > 0 { |
||||
t.consumedBytes += int64(n) |
||||
// CRC
|
||||
if t.writer != nil { |
||||
if n, err := t.writer.Write(p[:n]); err != nil { |
||||
return n, err |
||||
} |
||||
} |
||||
// Progress
|
||||
if t.listener != nil { |
||||
event := newProgressEvent(TransferDataEvent, t.consumedBytes, t.totalBytes, int64(n)) |
||||
publishProgress(t.listener, event) |
||||
} |
||||
// Track
|
||||
if t.tracker != nil { |
||||
t.tracker.completedBytes = t.consumedBytes |
||||
} |
||||
} |
||||
|
||||
return |
||||
} |
||||
|
||||
func (t *teeReader) Close() error { |
||||
if rc, ok := t.reader.(io.ReadCloser); ok { |
||||
return rc.Close() |
||||
} |
||||
return nil |
||||
} |
||||
@ -0,0 +1,11 @@ |
||||
// +build !go1.7
|
||||
|
||||
package oss |
||||
|
||||
import "net/http" |
||||
|
||||
// http.ErrUseLastResponse only is defined go1.7 onward
|
||||
|
||||
func disableHTTPRedirect(client *http.Client) { |
||||
|
||||
} |
||||
@ -0,0 +1,12 @@ |
||||
// +build go1.7
|
||||
|
||||
package oss |
||||
|
||||
import "net/http" |
||||
|
||||
// http.ErrUseLastResponse only is defined go1.7 onward
|
||||
func disableHTTPRedirect(client *http.Client) { |
||||
client.CheckRedirect = func(req *http.Request, via []*http.Request) error { |
||||
return http.ErrUseLastResponse |
||||
} |
||||
} |
||||
@ -0,0 +1,197 @@ |
||||
package oss |
||||
|
||||
import ( |
||||
"bytes" |
||||
"encoding/xml" |
||||
"hash/crc32" |
||||
"io" |
||||
"io/ioutil" |
||||
"net/http" |
||||
"os" |
||||
"strings" |
||||
) |
||||
|
||||
// CreateSelectCsvObjectMeta is Creating csv object meta
|
||||
//
|
||||
// key the object key.
|
||||
// csvMeta the csv file meta
|
||||
// options the options for create csv Meta of the object.
|
||||
//
|
||||
// MetaEndFrameCSV the csv file meta info
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) CreateSelectCsvObjectMeta(key string, csvMeta CsvMetaRequest, options ...Option) (MetaEndFrameCSV, error) { |
||||
var endFrame MetaEndFrameCSV |
||||
params := map[string]interface{}{} |
||||
params["x-oss-process"] = "csv/meta" |
||||
|
||||
csvMeta.encodeBase64() |
||||
bs, err := xml.Marshal(csvMeta) |
||||
if err != nil { |
||||
return endFrame, err |
||||
} |
||||
buffer := new(bytes.Buffer) |
||||
buffer.Write(bs) |
||||
|
||||
resp, err := bucket.DoPostSelectObject(key, params, buffer, options...) |
||||
if err != nil { |
||||
return endFrame, err |
||||
} |
||||
defer resp.Body.Close() |
||||
|
||||
_, err = ioutil.ReadAll(resp) |
||||
|
||||
return resp.Frame.MetaEndFrameCSV, err |
||||
} |
||||
|
||||
// CreateSelectJsonObjectMeta is Creating json object meta
|
||||
//
|
||||
// key the object key.
|
||||
// csvMeta the json file meta
|
||||
// options the options for create json Meta of the object.
|
||||
//
|
||||
// MetaEndFrameJSON the json file meta info
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) CreateSelectJsonObjectMeta(key string, jsonMeta JsonMetaRequest, options ...Option) (MetaEndFrameJSON, error) { |
||||
var endFrame MetaEndFrameJSON |
||||
params := map[string]interface{}{} |
||||
params["x-oss-process"] = "json/meta" |
||||
|
||||
bs, err := xml.Marshal(jsonMeta) |
||||
if err != nil { |
||||
return endFrame, err |
||||
} |
||||
buffer := new(bytes.Buffer) |
||||
buffer.Write(bs) |
||||
|
||||
resp, err := bucket.DoPostSelectObject(key, params, buffer, options...) |
||||
if err != nil { |
||||
return endFrame, err |
||||
} |
||||
defer resp.Body.Close() |
||||
|
||||
_, err = ioutil.ReadAll(resp) |
||||
|
||||
return resp.Frame.MetaEndFrameJSON, err |
||||
} |
||||
|
||||
// SelectObject is the select object api, approve csv and json file.
|
||||
//
|
||||
// key the object key.
|
||||
// selectReq the request data for select object
|
||||
// options the options for select file of the object.
|
||||
//
|
||||
// o.ReadCloser reader instance for reading data from response. It must be called close() after the usage and only valid when error is nil.
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) SelectObject(key string, selectReq SelectRequest, options ...Option) (io.ReadCloser, error) { |
||||
params := map[string]interface{}{} |
||||
if selectReq.InputSerializationSelect.JsonBodyInput.JsonIsEmpty() { |
||||
params["x-oss-process"] = "csv/select" // default select csv file
|
||||
} else { |
||||
params["x-oss-process"] = "json/select" |
||||
} |
||||
selectReq.encodeBase64() |
||||
bs, err := xml.Marshal(selectReq) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
buffer := new(bytes.Buffer) |
||||
buffer.Write(bs) |
||||
resp, err := bucket.DoPostSelectObject(key, params, buffer, options...) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
if selectReq.OutputSerializationSelect.EnablePayloadCrc != nil && *selectReq.OutputSerializationSelect.EnablePayloadCrc == true { |
||||
resp.Frame.EnablePayloadCrc = true |
||||
} |
||||
resp.Frame.OutputRawData = strings.ToUpper(resp.Headers.Get("x-oss-select-output-raw")) == "TRUE" |
||||
|
||||
return resp, err |
||||
} |
||||
|
||||
// DoPostSelectObject is the SelectObject/CreateMeta api, approve csv and json file.
|
||||
//
|
||||
// key the object key.
|
||||
// params the resource of oss approve csv/meta, json/meta, csv/select, json/select.
|
||||
// buf the request data trans to buffer.
|
||||
// options the options for select file of the object.
|
||||
//
|
||||
// SelectObjectResponse the response of select object.
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) DoPostSelectObject(key string, params map[string]interface{}, buf *bytes.Buffer, options ...Option) (*SelectObjectResponse, error) { |
||||
resp, err := bucket.do("POST", key, params, options, buf, nil) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
result := &SelectObjectResponse{ |
||||
Body: resp.Body, |
||||
StatusCode: resp.StatusCode, |
||||
Frame: SelectObjectResult{}, |
||||
} |
||||
result.Headers = resp.Headers |
||||
// result.Frame = SelectObjectResult{}
|
||||
result.ReadTimeOut = bucket.GetConfig().Timeout |
||||
|
||||
// Progress
|
||||
listener := GetProgressListener(options) |
||||
|
||||
// CRC32
|
||||
crcCalc := crc32.NewIEEE() |
||||
result.WriterForCheckCrc32 = crcCalc |
||||
result.Body = TeeReader(resp.Body, nil, 0, listener, nil) |
||||
|
||||
err = CheckRespCode(resp.StatusCode, []int{http.StatusPartialContent, http.StatusOK}) |
||||
|
||||
return result, err |
||||
} |
||||
|
||||
// SelectObjectIntoFile is the selectObject to file api
|
||||
//
|
||||
// key the object key.
|
||||
// fileName saving file's name to localstation.
|
||||
// selectReq the request data for select object
|
||||
// options the options for select file of the object.
|
||||
//
|
||||
// error it's nil if no error, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) SelectObjectIntoFile(key, fileName string, selectReq SelectRequest, options ...Option) error { |
||||
tempFilePath := fileName + TempFileSuffix |
||||
|
||||
params := map[string]interface{}{} |
||||
if selectReq.InputSerializationSelect.JsonBodyInput.JsonIsEmpty() { |
||||
params["x-oss-process"] = "csv/select" // default select csv file
|
||||
} else { |
||||
params["x-oss-process"] = "json/select" |
||||
} |
||||
selectReq.encodeBase64() |
||||
bs, err := xml.Marshal(selectReq) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
buffer := new(bytes.Buffer) |
||||
buffer.Write(bs) |
||||
resp, err := bucket.DoPostSelectObject(key, params, buffer, options...) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer resp.Close() |
||||
|
||||
// If the local file does not exist, create a new one. If it exists, overwrite it.
|
||||
fd, err := os.OpenFile(tempFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, FilePermMode) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
// Copy the data to the local file path.
|
||||
_, err = io.Copy(fd, resp) |
||||
fd.Close() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
return os.Rename(tempFilePath, fileName) |
||||
} |
||||
@ -0,0 +1,364 @@ |
||||
package oss |
||||
|
||||
import ( |
||||
"bytes" |
||||
"encoding/binary" |
||||
"fmt" |
||||
"hash" |
||||
"hash/crc32" |
||||
"io" |
||||
"net/http" |
||||
"time" |
||||
) |
||||
|
||||
// The adapter class for Select object's response.
|
||||
// The response consists of frames. Each frame has the following format:
|
||||
|
||||
// Type | Payload Length | Header Checksum | Payload | Payload Checksum
|
||||
|
||||
// |<4-->| <--4 bytes------><---4 bytes-------><-n/a-----><--4 bytes--------->
|
||||
// And we have three kind of frames.
|
||||
// Data Frame:
|
||||
// Type:8388609
|
||||
// Payload: Offset | Data
|
||||
// <-8 bytes>
|
||||
|
||||
// Continuous Frame
|
||||
// Type:8388612
|
||||
// Payload: Offset (8-bytes)
|
||||
|
||||
// End Frame
|
||||
// Type:8388613
|
||||
// Payload: Offset | total scanned bytes | http status code | error message
|
||||
// <-- 8bytes--><-----8 bytes--------><---4 bytes-------><---variabe--->
|
||||
|
||||
// SelectObjectResponse defines HTTP response from OSS SelectObject
|
||||
type SelectObjectResponse struct { |
||||
StatusCode int |
||||
Headers http.Header |
||||
Body io.ReadCloser |
||||
Frame SelectObjectResult |
||||
ReadTimeOut uint |
||||
ClientCRC32 uint32 |
||||
ServerCRC32 uint32 |
||||
WriterForCheckCrc32 hash.Hash32 |
||||
Finish bool |
||||
} |
||||
|
||||
func (sr *SelectObjectResponse) Read(p []byte) (n int, err error) { |
||||
n, err = sr.readFrames(p) |
||||
return |
||||
} |
||||
|
||||
// Close http reponse body
|
||||
func (sr *SelectObjectResponse) Close() error { |
||||
return sr.Body.Close() |
||||
} |
||||
|
||||
// PostSelectResult is the request of SelectObject
|
||||
type PostSelectResult struct { |
||||
Response *SelectObjectResponse |
||||
} |
||||
|
||||
// readFrames is read Frame
|
||||
func (sr *SelectObjectResponse) readFrames(p []byte) (int, error) { |
||||
var nn int |
||||
var err error |
||||
var checkValid bool |
||||
if sr.Frame.OutputRawData == true { |
||||
nn, err = sr.Body.Read(p) |
||||
return nn, err |
||||
} |
||||
|
||||
if sr.Finish { |
||||
return 0, io.EOF |
||||
} |
||||
|
||||
for { |
||||
// if this Frame is Readed, then not reading Header
|
||||
if sr.Frame.OpenLine != true { |
||||
err = sr.analysisHeader() |
||||
if err != nil { |
||||
return nn, err |
||||
} |
||||
} |
||||
|
||||
if sr.Frame.FrameType == DataFrameType { |
||||
n, err := sr.analysisData(p[nn:]) |
||||
if err != nil { |
||||
return nn, err |
||||
} |
||||
nn += n |
||||
|
||||
// if this Frame is readed all data, then empty the Frame to read it with next frame
|
||||
if sr.Frame.ConsumedBytesLength == sr.Frame.PayloadLength-8 { |
||||
checkValid, err = sr.checkPayloadSum() |
||||
if err != nil || !checkValid { |
||||
return nn, fmt.Errorf("%s", err.Error()) |
||||
} |
||||
sr.emptyFrame() |
||||
} |
||||
|
||||
if nn == len(p) { |
||||
return nn, nil |
||||
} |
||||
} else if sr.Frame.FrameType == ContinuousFrameType { |
||||
checkValid, err = sr.checkPayloadSum() |
||||
if err != nil || !checkValid { |
||||
return nn, fmt.Errorf("%s", err.Error()) |
||||
} |
||||
} else if sr.Frame.FrameType == EndFrameType { |
||||
err = sr.analysisEndFrame() |
||||
if err != nil { |
||||
return nn, err |
||||
} |
||||
checkValid, err = sr.checkPayloadSum() |
||||
if checkValid { |
||||
sr.Finish = true |
||||
} |
||||
return nn, err |
||||
} else if sr.Frame.FrameType == MetaEndFrameCSVType { |
||||
err = sr.analysisMetaEndFrameCSV() |
||||
if err != nil { |
||||
return nn, err |
||||
} |
||||
checkValid, err = sr.checkPayloadSum() |
||||
if checkValid { |
||||
sr.Finish = true |
||||
} |
||||
return nn, err |
||||
} else if sr.Frame.FrameType == MetaEndFrameJSONType { |
||||
err = sr.analysisMetaEndFrameJSON() |
||||
if err != nil { |
||||
return nn, err |
||||
} |
||||
checkValid, err = sr.checkPayloadSum() |
||||
if checkValid { |
||||
sr.Finish = true |
||||
} |
||||
return nn, err |
||||
} |
||||
} |
||||
return nn, nil |
||||
} |
||||
|
||||
type chanReadIO struct { |
||||
readLen int |
||||
err error |
||||
} |
||||
|
||||
func (sr *SelectObjectResponse) readLen(p []byte, timeOut time.Duration) (int, error) { |
||||
r := sr.Body |
||||
ch := make(chan chanReadIO, 1) |
||||
defer close(ch) |
||||
go func(p []byte) { |
||||
var needReadLength int |
||||
readChan := chanReadIO{} |
||||
needReadLength = len(p) |
||||
for { |
||||
n, err := r.Read(p[readChan.readLen:needReadLength]) |
||||
readChan.readLen += n |
||||
if err != nil { |
||||
readChan.err = err |
||||
ch <- readChan |
||||
return |
||||
} |
||||
|
||||
if readChan.readLen == needReadLength { |
||||
break |
||||
} |
||||
} |
||||
ch <- readChan |
||||
}(p) |
||||
|
||||
select { |
||||
case <-time.After(time.Second * timeOut): |
||||
return 0, fmt.Errorf("requestId: %s, readLen timeout, timeout is %d(second),need read:%d", sr.Headers.Get(HTTPHeaderOssRequestID), timeOut, len(p)) |
||||
case result := <-ch: |
||||
return result.readLen, result.err |
||||
} |
||||
} |
||||
|
||||
// analysisHeader is reading selectObject response body's header
|
||||
func (sr *SelectObjectResponse) analysisHeader() error { |
||||
headFrameByte := make([]byte, 20) |
||||
_, err := sr.readLen(headFrameByte, time.Duration(sr.ReadTimeOut)) |
||||
if err != nil { |
||||
return fmt.Errorf("requestId: %s, Read response frame header failure,err:%s", sr.Headers.Get(HTTPHeaderOssRequestID), err.Error()) |
||||
} |
||||
|
||||
frameTypeByte := headFrameByte[0:4] |
||||
sr.Frame.Version = frameTypeByte[0] |
||||
frameTypeByte[0] = 0 |
||||
bytesToInt(frameTypeByte, &sr.Frame.FrameType) |
||||
|
||||
if sr.Frame.FrameType != DataFrameType && sr.Frame.FrameType != ContinuousFrameType && |
||||
sr.Frame.FrameType != EndFrameType && sr.Frame.FrameType != MetaEndFrameCSVType && sr.Frame.FrameType != MetaEndFrameJSONType { |
||||
return fmt.Errorf("requestId: %s, Unexpected frame type: %d", sr.Headers.Get(HTTPHeaderOssRequestID), sr.Frame.FrameType) |
||||
} |
||||
|
||||
payloadLengthByte := headFrameByte[4:8] |
||||
bytesToInt(payloadLengthByte, &sr.Frame.PayloadLength) |
||||
headCheckSumByte := headFrameByte[8:12] |
||||
bytesToInt(headCheckSumByte, &sr.Frame.HeaderCheckSum) |
||||
byteOffset := headFrameByte[12:20] |
||||
bytesToInt(byteOffset, &sr.Frame.Offset) |
||||
sr.Frame.OpenLine = true |
||||
|
||||
err = sr.writerCheckCrc32(byteOffset) |
||||
return err |
||||
} |
||||
|
||||
// analysisData is reading the DataFrameType data of selectObject response body
|
||||
func (sr *SelectObjectResponse) analysisData(p []byte) (int, error) { |
||||
var needReadLength int32 |
||||
lenP := int32(len(p)) |
||||
restByteLength := sr.Frame.PayloadLength - 8 - sr.Frame.ConsumedBytesLength |
||||
if lenP <= restByteLength { |
||||
needReadLength = lenP |
||||
} else { |
||||
needReadLength = restByteLength |
||||
} |
||||
n, err := sr.readLen(p[:needReadLength], time.Duration(sr.ReadTimeOut)) |
||||
if err != nil { |
||||
return n, fmt.Errorf("read frame data error,%s", err.Error()) |
||||
} |
||||
sr.Frame.ConsumedBytesLength += int32(n) |
||||
err = sr.writerCheckCrc32(p[:n]) |
||||
return n, err |
||||
} |
||||
|
||||
// analysisEndFrame is reading the EndFrameType data of selectObject response body
|
||||
func (sr *SelectObjectResponse) analysisEndFrame() error { |
||||
var eF EndFrame |
||||
payLoadBytes := make([]byte, sr.Frame.PayloadLength-8) |
||||
_, err := sr.readLen(payLoadBytes, time.Duration(sr.ReadTimeOut)) |
||||
if err != nil { |
||||
return fmt.Errorf("read end frame error:%s", err.Error()) |
||||
} |
||||
bytesToInt(payLoadBytes[0:8], &eF.TotalScanned) |
||||
bytesToInt(payLoadBytes[8:12], &eF.HTTPStatusCode) |
||||
errMsgLength := sr.Frame.PayloadLength - 20 |
||||
eF.ErrorMsg = string(payLoadBytes[12 : errMsgLength+12]) |
||||
sr.Frame.EndFrame.TotalScanned = eF.TotalScanned |
||||
sr.Frame.EndFrame.HTTPStatusCode = eF.HTTPStatusCode |
||||
sr.Frame.EndFrame.ErrorMsg = eF.ErrorMsg |
||||
err = sr.writerCheckCrc32(payLoadBytes) |
||||
return err |
||||
} |
||||
|
||||
// analysisMetaEndFrameCSV is reading the MetaEndFrameCSVType data of selectObject response body
|
||||
func (sr *SelectObjectResponse) analysisMetaEndFrameCSV() error { |
||||
var mCF MetaEndFrameCSV |
||||
payLoadBytes := make([]byte, sr.Frame.PayloadLength-8) |
||||
_, err := sr.readLen(payLoadBytes, time.Duration(sr.ReadTimeOut)) |
||||
if err != nil { |
||||
return fmt.Errorf("read meta end csv frame error:%s", err.Error()) |
||||
} |
||||
|
||||
bytesToInt(payLoadBytes[0:8], &mCF.TotalScanned) |
||||
bytesToInt(payLoadBytes[8:12], &mCF.Status) |
||||
bytesToInt(payLoadBytes[12:16], &mCF.SplitsCount) |
||||
bytesToInt(payLoadBytes[16:24], &mCF.RowsCount) |
||||
bytesToInt(payLoadBytes[24:28], &mCF.ColumnsCount) |
||||
errMsgLength := sr.Frame.PayloadLength - 36 |
||||
mCF.ErrorMsg = string(payLoadBytes[28 : errMsgLength+28]) |
||||
sr.Frame.MetaEndFrameCSV.ErrorMsg = mCF.ErrorMsg |
||||
sr.Frame.MetaEndFrameCSV.TotalScanned = mCF.TotalScanned |
||||
sr.Frame.MetaEndFrameCSV.Status = mCF.Status |
||||
sr.Frame.MetaEndFrameCSV.SplitsCount = mCF.SplitsCount |
||||
sr.Frame.MetaEndFrameCSV.RowsCount = mCF.RowsCount |
||||
sr.Frame.MetaEndFrameCSV.ColumnsCount = mCF.ColumnsCount |
||||
err = sr.writerCheckCrc32(payLoadBytes) |
||||
return err |
||||
} |
||||
|
||||
// analysisMetaEndFrameJSON is reading the MetaEndFrameJSONType data of selectObject response body
|
||||
func (sr *SelectObjectResponse) analysisMetaEndFrameJSON() error { |
||||
var mJF MetaEndFrameJSON |
||||
payLoadBytes := make([]byte, sr.Frame.PayloadLength-8) |
||||
_, err := sr.readLen(payLoadBytes, time.Duration(sr.ReadTimeOut)) |
||||
if err != nil { |
||||
return fmt.Errorf("read meta end json frame error:%s", err.Error()) |
||||
} |
||||
|
||||
bytesToInt(payLoadBytes[0:8], &mJF.TotalScanned) |
||||
bytesToInt(payLoadBytes[8:12], &mJF.Status) |
||||
bytesToInt(payLoadBytes[12:16], &mJF.SplitsCount) |
||||
bytesToInt(payLoadBytes[16:24], &mJF.RowsCount) |
||||
errMsgLength := sr.Frame.PayloadLength - 32 |
||||
mJF.ErrorMsg = string(payLoadBytes[24 : errMsgLength+24]) |
||||
sr.Frame.MetaEndFrameJSON.ErrorMsg = mJF.ErrorMsg |
||||
sr.Frame.MetaEndFrameJSON.TotalScanned = mJF.TotalScanned |
||||
sr.Frame.MetaEndFrameJSON.Status = mJF.Status |
||||
sr.Frame.MetaEndFrameJSON.SplitsCount = mJF.SplitsCount |
||||
sr.Frame.MetaEndFrameJSON.RowsCount = mJF.RowsCount |
||||
|
||||
err = sr.writerCheckCrc32(payLoadBytes) |
||||
return err |
||||
} |
||||
|
||||
func (sr *SelectObjectResponse) checkPayloadSum() (bool, error) { |
||||
payLoadChecksumByte := make([]byte, 4) |
||||
n, err := sr.readLen(payLoadChecksumByte, time.Duration(sr.ReadTimeOut)) |
||||
if n == 4 { |
||||
bytesToInt(payLoadChecksumByte, &sr.Frame.PayloadChecksum) |
||||
sr.ServerCRC32 = sr.Frame.PayloadChecksum |
||||
sr.ClientCRC32 = sr.WriterForCheckCrc32.Sum32() |
||||
if sr.Frame.EnablePayloadCrc == true && sr.ServerCRC32 != 0 && sr.ServerCRC32 != sr.ClientCRC32 { |
||||
return false, fmt.Errorf("RequestId: %s, Unexpected frame type: %d, client %d but server %d", |
||||
sr.Headers.Get(HTTPHeaderOssRequestID), sr.Frame.FrameType, sr.ClientCRC32, sr.ServerCRC32) |
||||
} |
||||
return true, err |
||||
} |
||||
return false, fmt.Errorf("RequestId:%s, read checksum error:%s", sr.Headers.Get(HTTPHeaderOssRequestID), err.Error()) |
||||
} |
||||
|
||||
func (sr *SelectObjectResponse) writerCheckCrc32(p []byte) (err error) { |
||||
err = nil |
||||
if sr.Frame.EnablePayloadCrc == true { |
||||
_, err = sr.WriterForCheckCrc32.Write(p) |
||||
} |
||||
return err |
||||
} |
||||
|
||||
// emptyFrame is emptying SelectObjectResponse Frame information
|
||||
func (sr *SelectObjectResponse) emptyFrame() { |
||||
crcCalc := crc32.NewIEEE() |
||||
sr.WriterForCheckCrc32 = crcCalc |
||||
sr.Finish = false |
||||
|
||||
sr.Frame.ConsumedBytesLength = 0 |
||||
sr.Frame.OpenLine = false |
||||
sr.Frame.Version = byte(0) |
||||
sr.Frame.FrameType = 0 |
||||
sr.Frame.PayloadLength = 0 |
||||
sr.Frame.HeaderCheckSum = 0 |
||||
sr.Frame.Offset = 0 |
||||
sr.Frame.Data = "" |
||||
|
||||
sr.Frame.EndFrame.TotalScanned = 0 |
||||
sr.Frame.EndFrame.HTTPStatusCode = 0 |
||||
sr.Frame.EndFrame.ErrorMsg = "" |
||||
|
||||
sr.Frame.MetaEndFrameCSV.TotalScanned = 0 |
||||
sr.Frame.MetaEndFrameCSV.Status = 0 |
||||
sr.Frame.MetaEndFrameCSV.SplitsCount = 0 |
||||
sr.Frame.MetaEndFrameCSV.RowsCount = 0 |
||||
sr.Frame.MetaEndFrameCSV.ColumnsCount = 0 |
||||
sr.Frame.MetaEndFrameCSV.ErrorMsg = "" |
||||
|
||||
sr.Frame.MetaEndFrameJSON.TotalScanned = 0 |
||||
sr.Frame.MetaEndFrameJSON.Status = 0 |
||||
sr.Frame.MetaEndFrameJSON.SplitsCount = 0 |
||||
sr.Frame.MetaEndFrameJSON.RowsCount = 0 |
||||
sr.Frame.MetaEndFrameJSON.ErrorMsg = "" |
||||
|
||||
sr.Frame.PayloadChecksum = 0 |
||||
} |
||||
|
||||
// bytesToInt byte's array trans to int
|
||||
func bytesToInt(b []byte, ret interface{}) { |
||||
binBuf := bytes.NewBuffer(b) |
||||
binary.Read(binBuf, binary.BigEndian, ret) |
||||
} |
||||
@ -0,0 +1,41 @@ |
||||
// +build !go1.7
|
||||
|
||||
package oss |
||||
|
||||
import ( |
||||
"crypto/tls" |
||||
"net" |
||||
"net/http" |
||||
"time" |
||||
) |
||||
|
||||
func newTransport(conn *Conn, config *Config) *http.Transport { |
||||
httpTimeOut := conn.config.HTTPTimeout |
||||
httpMaxConns := conn.config.HTTPMaxConns |
||||
// New Transport
|
||||
transport := &http.Transport{ |
||||
Dial: func(netw, addr string) (net.Conn, error) { |
||||
d := net.Dialer{ |
||||
Timeout: httpTimeOut.ConnectTimeout, |
||||
KeepAlive: 30 * time.Second, |
||||
} |
||||
if config.LocalAddr != nil { |
||||
d.LocalAddr = config.LocalAddr |
||||
} |
||||
conn, err := d.Dial(netw, addr) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return newTimeoutConn(conn, httpTimeOut.ReadWriteTimeout, httpTimeOut.LongTimeout), nil |
||||
}, |
||||
MaxIdleConnsPerHost: httpMaxConns.MaxIdleConnsPerHost, |
||||
ResponseHeaderTimeout: httpTimeOut.HeaderTimeout, |
||||
} |
||||
|
||||
if config.InsecureSkipVerify { |
||||
transport.TLSClientConfig = &tls.Config{ |
||||
InsecureSkipVerify: true, |
||||
} |
||||
} |
||||
return transport |
||||
} |
||||
@ -0,0 +1,43 @@ |
||||
// +build go1.7
|
||||
|
||||
package oss |
||||
|
||||
import ( |
||||
"crypto/tls" |
||||
"net" |
||||
"net/http" |
||||
"time" |
||||
) |
||||
|
||||
func newTransport(conn *Conn, config *Config) *http.Transport { |
||||
httpTimeOut := conn.config.HTTPTimeout |
||||
httpMaxConns := conn.config.HTTPMaxConns |
||||
// New Transport
|
||||
transport := &http.Transport{ |
||||
Dial: func(netw, addr string) (net.Conn, error) { |
||||
d := net.Dialer{ |
||||
Timeout: httpTimeOut.ConnectTimeout, |
||||
KeepAlive: 30 * time.Second, |
||||
} |
||||
if config.LocalAddr != nil { |
||||
d.LocalAddr = config.LocalAddr |
||||
} |
||||
conn, err := d.Dial(netw, addr) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return newTimeoutConn(conn, httpTimeOut.ReadWriteTimeout, httpTimeOut.LongTimeout), nil |
||||
}, |
||||
MaxIdleConns: httpMaxConns.MaxIdleConns, |
||||
MaxIdleConnsPerHost: httpMaxConns.MaxIdleConnsPerHost, |
||||
IdleConnTimeout: httpTimeOut.IdleConnTimeout, |
||||
ResponseHeaderTimeout: httpTimeOut.HeaderTimeout, |
||||
} |
||||
|
||||
if config.InsecureSkipVerify { |
||||
transport.TLSClientConfig = &tls.Config{ |
||||
InsecureSkipVerify: true, |
||||
} |
||||
} |
||||
return transport |
||||
} |
||||
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,552 @@ |
||||
package oss |
||||
|
||||
import ( |
||||
"crypto/md5" |
||||
"encoding/base64" |
||||
"encoding/hex" |
||||
"encoding/json" |
||||
"errors" |
||||
"fmt" |
||||
"io/ioutil" |
||||
"net/http" |
||||
"os" |
||||
"path/filepath" |
||||
"time" |
||||
) |
||||
|
||||
// UploadFile is multipart file upload.
|
||||
//
|
||||
// objectKey the object name.
|
||||
// filePath the local file path to upload.
|
||||
// partSize the part size in byte.
|
||||
// options the options for uploading object.
|
||||
//
|
||||
// error it's nil if the operation succeeds, otherwise it's an error object.
|
||||
//
|
||||
func (bucket Bucket) UploadFile(objectKey, filePath string, partSize int64, options ...Option) error { |
||||
if partSize < MinPartSize || partSize > MaxPartSize { |
||||
return errors.New("oss: part size invalid range (100KB, 5GB]") |
||||
} |
||||
|
||||
cpConf := getCpConfig(options) |
||||
routines := getRoutines(options) |
||||
|
||||
if cpConf != nil && cpConf.IsEnable { |
||||
cpFilePath := getUploadCpFilePath(cpConf, filePath, bucket.BucketName, objectKey) |
||||
if cpFilePath != "" { |
||||
return bucket.uploadFileWithCp(objectKey, filePath, partSize, options, cpFilePath, routines) |
||||
} |
||||
} |
||||
|
||||
return bucket.uploadFile(objectKey, filePath, partSize, options, routines) |
||||
} |
||||
|
||||
func getUploadCpFilePath(cpConf *cpConfig, srcFile, destBucket, destObject string) string { |
||||
if cpConf.FilePath == "" && cpConf.DirPath != "" { |
||||
dest := fmt.Sprintf("oss://%v/%v", destBucket, destObject) |
||||
absPath, _ := filepath.Abs(srcFile) |
||||
cpFileName := getCpFileName(absPath, dest, "") |
||||
cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName |
||||
} |
||||
return cpConf.FilePath |
||||
} |
||||
|
||||
// ----- concurrent upload without checkpoint -----
|
||||
|
||||
// getCpConfig gets checkpoint configuration
|
||||
func getCpConfig(options []Option) *cpConfig { |
||||
cpcOpt, err := FindOption(options, checkpointConfig, nil) |
||||
if err != nil || cpcOpt == nil { |
||||
return nil |
||||
} |
||||
|
||||
return cpcOpt.(*cpConfig) |
||||
} |
||||
|
||||
// getCpFileName return the name of the checkpoint file
|
||||
func getCpFileName(src, dest, versionId string) string { |
||||
md5Ctx := md5.New() |
||||
md5Ctx.Write([]byte(src)) |
||||
srcCheckSum := hex.EncodeToString(md5Ctx.Sum(nil)) |
||||
|
||||
md5Ctx.Reset() |
||||
md5Ctx.Write([]byte(dest)) |
||||
destCheckSum := hex.EncodeToString(md5Ctx.Sum(nil)) |
||||
|
||||
if versionId == "" { |
||||
return fmt.Sprintf("%v-%v.cp", srcCheckSum, destCheckSum) |
||||
} |
||||
|
||||
md5Ctx.Reset() |
||||
md5Ctx.Write([]byte(versionId)) |
||||
versionCheckSum := hex.EncodeToString(md5Ctx.Sum(nil)) |
||||
return fmt.Sprintf("%v-%v-%v.cp", srcCheckSum, destCheckSum, versionCheckSum) |
||||
} |
||||
|
||||
// getRoutines gets the routine count. by default it's 1.
|
||||
func getRoutines(options []Option) int { |
||||
rtnOpt, err := FindOption(options, routineNum, nil) |
||||
if err != nil || rtnOpt == nil { |
||||
return 1 |
||||
} |
||||
|
||||
rs := rtnOpt.(int) |
||||
if rs < 1 { |
||||
rs = 1 |
||||
} else if rs > 100 { |
||||
rs = 100 |
||||
} |
||||
|
||||
return rs |
||||
} |
||||
|
||||
// getPayer return the payer of the request
|
||||
func getPayer(options []Option) string { |
||||
payerOpt, err := FindOption(options, HTTPHeaderOssRequester, nil) |
||||
if err != nil || payerOpt == nil { |
||||
return "" |
||||
} |
||||
return payerOpt.(string) |
||||
} |
||||
|
||||
// GetProgressListener gets the progress callback
|
||||
func GetProgressListener(options []Option) ProgressListener { |
||||
isSet, listener, _ := IsOptionSet(options, progressListener) |
||||
if !isSet { |
||||
return nil |
||||
} |
||||
return listener.(ProgressListener) |
||||
} |
||||
|
||||
// uploadPartHook is for testing usage
|
||||
type uploadPartHook func(id int, chunk FileChunk) error |
||||
|
||||
var uploadPartHooker uploadPartHook = defaultUploadPart |
||||
|
||||
func defaultUploadPart(id int, chunk FileChunk) error { |
||||
return nil |
||||
} |
||||
|
||||
// workerArg defines worker argument structure
|
||||
type workerArg struct { |
||||
bucket *Bucket |
||||
filePath string |
||||
imur InitiateMultipartUploadResult |
||||
options []Option |
||||
hook uploadPartHook |
||||
} |
||||
|
||||
// worker is the worker coroutine function
|
||||
type defaultUploadProgressListener struct { |
||||
} |
||||
|
||||
// ProgressChanged no-ops
|
||||
func (listener *defaultUploadProgressListener) ProgressChanged(event *ProgressEvent) { |
||||
} |
||||
|
||||
func worker(id int, arg workerArg, jobs <-chan FileChunk, results chan<- UploadPart, failed chan<- error, die <-chan bool) { |
||||
for chunk := range jobs { |
||||
if err := arg.hook(id, chunk); err != nil { |
||||
failed <- err |
||||
break |
||||
} |
||||
var respHeader http.Header |
||||
p := Progress(&defaultUploadProgressListener{}) |
||||
opts := make([]Option, len(arg.options)+2) |
||||
opts = append(opts, arg.options...) |
||||
|
||||
// use defaultUploadProgressListener
|
||||
opts = append(opts, p, GetResponseHeader(&respHeader)) |
||||
|
||||
startT := time.Now().UnixNano() / 1000 / 1000 / 1000 |
||||
part, err := arg.bucket.UploadPartFromFile(arg.imur, arg.filePath, chunk.Offset, chunk.Size, chunk.Number, opts...) |
||||
endT := time.Now().UnixNano() / 1000 / 1000 / 1000 |
||||
if err != nil { |
||||
arg.bucket.Client.Config.WriteLog(Debug, "upload part error,cost:%d second,part number:%d,request id:%s,error:%s\n", endT-startT, chunk.Number, GetRequestId(respHeader), err.Error()) |
||||
failed <- err |
||||
break |
||||
} |
||||
select { |
||||
case <-die: |
||||
return |
||||
default: |
||||
} |
||||
results <- part |
||||
} |
||||
} |
||||
|
||||
// scheduler function
|
||||
func scheduler(jobs chan FileChunk, chunks []FileChunk) { |
||||
for _, chunk := range chunks { |
||||
jobs <- chunk |
||||
} |
||||
close(jobs) |
||||
} |
||||
|
||||
func getTotalBytes(chunks []FileChunk) int64 { |
||||
var tb int64 |
||||
for _, chunk := range chunks { |
||||
tb += chunk.Size |
||||
} |
||||
return tb |
||||
} |
||||
|
||||
// uploadFile is a concurrent upload, without checkpoint
|
||||
func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, options []Option, routines int) error { |
||||
listener := GetProgressListener(options) |
||||
|
||||
chunks, err := SplitFileByPartSize(filePath, partSize) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
partOptions := ChoiceTransferPartOption(options) |
||||
completeOptions := ChoiceCompletePartOption(options) |
||||
abortOptions := ChoiceAbortPartOption(options) |
||||
|
||||
// Initialize the multipart upload
|
||||
imur, err := bucket.InitiateMultipartUpload(objectKey, options...) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
jobs := make(chan FileChunk, len(chunks)) |
||||
results := make(chan UploadPart, len(chunks)) |
||||
failed := make(chan error) |
||||
die := make(chan bool) |
||||
|
||||
var completedBytes int64 |
||||
totalBytes := getTotalBytes(chunks) |
||||
event := newProgressEvent(TransferStartedEvent, 0, totalBytes, 0) |
||||
publishProgress(listener, event) |
||||
|
||||
// Start the worker coroutine
|
||||
arg := workerArg{&bucket, filePath, imur, partOptions, uploadPartHooker} |
||||
for w := 1; w <= routines; w++ { |
||||
go worker(w, arg, jobs, results, failed, die) |
||||
} |
||||
|
||||
// Schedule the jobs
|
||||
go scheduler(jobs, chunks) |
||||
|
||||
// Waiting for the upload finished
|
||||
completed := 0 |
||||
parts := make([]UploadPart, len(chunks)) |
||||
for completed < len(chunks) { |
||||
select { |
||||
case part := <-results: |
||||
completed++ |
||||
parts[part.PartNumber-1] = part |
||||
completedBytes += chunks[part.PartNumber-1].Size |
||||
|
||||
// why RwBytes in ProgressEvent is 0 ?
|
||||
// because read or write event has been notified in teeReader.Read()
|
||||
event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes, chunks[part.PartNumber-1].Size) |
||||
publishProgress(listener, event) |
||||
case err := <-failed: |
||||
close(die) |
||||
event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes, 0) |
||||
publishProgress(listener, event) |
||||
bucket.AbortMultipartUpload(imur, abortOptions...) |
||||
return err |
||||
} |
||||
|
||||
if completed >= len(chunks) { |
||||
break |
||||
} |
||||
} |
||||
|
||||
event = newProgressEvent(TransferStartedEvent, completedBytes, totalBytes, 0) |
||||
publishProgress(listener, event) |
||||
|
||||
// Complete the multpart upload
|
||||
_, err = bucket.CompleteMultipartUpload(imur, parts, completeOptions...) |
||||
if err != nil { |
||||
bucket.AbortMultipartUpload(imur, abortOptions...) |
||||
return err |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// ----- concurrent upload with checkpoint -----
|
||||
const uploadCpMagic = "FE8BB4EA-B593-4FAC-AD7A-2459A36E2E62" |
||||
|
||||
type uploadCheckpoint struct { |
||||
Magic string // Magic
|
||||
MD5 string // Checkpoint file content's MD5
|
||||
FilePath string // Local file path
|
||||
FileStat cpStat // File state
|
||||
ObjectKey string // Key
|
||||
UploadID string // Upload ID
|
||||
Parts []cpPart // All parts of the local file
|
||||
} |
||||
|
||||
type cpStat struct { |
||||
Size int64 // File size
|
||||
LastModified time.Time // File's last modified time
|
||||
MD5 string // Local file's MD5
|
||||
} |
||||
|
||||
type cpPart struct { |
||||
Chunk FileChunk // File chunk
|
||||
Part UploadPart // Uploaded part
|
||||
IsCompleted bool // Upload complete flag
|
||||
} |
||||
|
||||
// isValid checks if the uploaded data is valid---it's valid when the file is not updated and the checkpoint data is valid.
|
||||
func (cp uploadCheckpoint) isValid(filePath string) (bool, error) { |
||||
// Compare the CP's magic number and MD5.
|
||||
cpb := cp |
||||
cpb.MD5 = "" |
||||
js, _ := json.Marshal(cpb) |
||||
sum := md5.Sum(js) |
||||
b64 := base64.StdEncoding.EncodeToString(sum[:]) |
||||
|
||||
if cp.Magic != uploadCpMagic || b64 != cp.MD5 { |
||||
return false, nil |
||||
} |
||||
|
||||
// Make sure if the local file is updated.
|
||||
fd, err := os.Open(filePath) |
||||
if err != nil { |
||||
return false, err |
||||
} |
||||
defer fd.Close() |
||||
|
||||
st, err := fd.Stat() |
||||
if err != nil { |
||||
return false, err |
||||
} |
||||
|
||||
md, err := calcFileMD5(filePath) |
||||
if err != nil { |
||||
return false, err |
||||
} |
||||
|
||||
// Compare the file size, file's last modified time and file's MD5
|
||||
if cp.FileStat.Size != st.Size() || |
||||
!cp.FileStat.LastModified.Equal(st.ModTime()) || |
||||
cp.FileStat.MD5 != md { |
||||
return false, nil |
||||
} |
||||
|
||||
return true, nil |
||||
} |
||||
|
||||
// load loads from the file
|
||||
func (cp *uploadCheckpoint) load(filePath string) error { |
||||
contents, err := ioutil.ReadFile(filePath) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
err = json.Unmarshal(contents, cp) |
||||
return err |
||||
} |
||||
|
||||
// dump dumps to the local file
|
||||
func (cp *uploadCheckpoint) dump(filePath string) error { |
||||
bcp := *cp |
||||
|
||||
// Calculate MD5
|
||||
bcp.MD5 = "" |
||||
js, err := json.Marshal(bcp) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
sum := md5.Sum(js) |
||||
b64 := base64.StdEncoding.EncodeToString(sum[:]) |
||||
bcp.MD5 = b64 |
||||
|
||||
// Serialization
|
||||
js, err = json.Marshal(bcp) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
// Dump
|
||||
return ioutil.WriteFile(filePath, js, FilePermMode) |
||||
} |
||||
|
||||
// updatePart updates the part status
|
||||
func (cp *uploadCheckpoint) updatePart(part UploadPart) { |
||||
cp.Parts[part.PartNumber-1].Part = part |
||||
cp.Parts[part.PartNumber-1].IsCompleted = true |
||||
} |
||||
|
||||
// todoParts returns unfinished parts
|
||||
func (cp *uploadCheckpoint) todoParts() []FileChunk { |
||||
fcs := []FileChunk{} |
||||
for _, part := range cp.Parts { |
||||
if !part.IsCompleted { |
||||
fcs = append(fcs, part.Chunk) |
||||
} |
||||
} |
||||
return fcs |
||||
} |
||||
|
||||
// allParts returns all parts
|
||||
func (cp *uploadCheckpoint) allParts() []UploadPart { |
||||
ps := []UploadPart{} |
||||
for _, part := range cp.Parts { |
||||
ps = append(ps, part.Part) |
||||
} |
||||
return ps |
||||
} |
||||
|
||||
// getCompletedBytes returns completed bytes count
|
||||
func (cp *uploadCheckpoint) getCompletedBytes() int64 { |
||||
var completedBytes int64 |
||||
for _, part := range cp.Parts { |
||||
if part.IsCompleted { |
||||
completedBytes += part.Chunk.Size |
||||
} |
||||
} |
||||
return completedBytes |
||||
} |
||||
|
||||
// calcFileMD5 calculates the MD5 for the specified local file
|
||||
func calcFileMD5(filePath string) (string, error) { |
||||
return "", nil |
||||
} |
||||
|
||||
// prepare initializes the multipart upload
|
||||
func prepare(cp *uploadCheckpoint, objectKey, filePath string, partSize int64, bucket *Bucket, options []Option) error { |
||||
// CP
|
||||
cp.Magic = uploadCpMagic |
||||
cp.FilePath = filePath |
||||
cp.ObjectKey = objectKey |
||||
|
||||
// Local file
|
||||
fd, err := os.Open(filePath) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer fd.Close() |
||||
|
||||
st, err := fd.Stat() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
cp.FileStat.Size = st.Size() |
||||
cp.FileStat.LastModified = st.ModTime() |
||||
md, err := calcFileMD5(filePath) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
cp.FileStat.MD5 = md |
||||
|
||||
// Chunks
|
||||
parts, err := SplitFileByPartSize(filePath, partSize) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
cp.Parts = make([]cpPart, len(parts)) |
||||
for i, part := range parts { |
||||
cp.Parts[i].Chunk = part |
||||
cp.Parts[i].IsCompleted = false |
||||
} |
||||
|
||||
// Init load
|
||||
imur, err := bucket.InitiateMultipartUpload(objectKey, options...) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
cp.UploadID = imur.UploadID |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// complete completes the multipart upload and deletes the local CP files
|
||||
func complete(cp *uploadCheckpoint, bucket *Bucket, parts []UploadPart, cpFilePath string, options []Option) error { |
||||
imur := InitiateMultipartUploadResult{Bucket: bucket.BucketName, |
||||
Key: cp.ObjectKey, UploadID: cp.UploadID} |
||||
_, err := bucket.CompleteMultipartUpload(imur, parts, options...) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
os.Remove(cpFilePath) |
||||
return err |
||||
} |
||||
|
||||
// uploadFileWithCp handles concurrent upload with checkpoint
|
||||
func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int) error { |
||||
listener := GetProgressListener(options) |
||||
|
||||
partOptions := ChoiceTransferPartOption(options) |
||||
completeOptions := ChoiceCompletePartOption(options) |
||||
|
||||
// Load CP data
|
||||
ucp := uploadCheckpoint{} |
||||
err := ucp.load(cpFilePath) |
||||
if err != nil { |
||||
os.Remove(cpFilePath) |
||||
} |
||||
|
||||
// Load error or the CP data is invalid.
|
||||
valid, err := ucp.isValid(filePath) |
||||
if err != nil || !valid { |
||||
if err = prepare(&ucp, objectKey, filePath, partSize, &bucket, options); err != nil { |
||||
return err |
||||
} |
||||
os.Remove(cpFilePath) |
||||
} |
||||
|
||||
chunks := ucp.todoParts() |
||||
imur := InitiateMultipartUploadResult{ |
||||
Bucket: bucket.BucketName, |
||||
Key: objectKey, |
||||
UploadID: ucp.UploadID} |
||||
|
||||
jobs := make(chan FileChunk, len(chunks)) |
||||
results := make(chan UploadPart, len(chunks)) |
||||
failed := make(chan error) |
||||
die := make(chan bool) |
||||
|
||||
completedBytes := ucp.getCompletedBytes() |
||||
|
||||
// why RwBytes in ProgressEvent is 0 ?
|
||||
// because read or write event has been notified in teeReader.Read()
|
||||
event := newProgressEvent(TransferStartedEvent, completedBytes, ucp.FileStat.Size, 0) |
||||
publishProgress(listener, event) |
||||
|
||||
// Start the workers
|
||||
arg := workerArg{&bucket, filePath, imur, partOptions, uploadPartHooker} |
||||
for w := 1; w <= routines; w++ { |
||||
go worker(w, arg, jobs, results, failed, die) |
||||
} |
||||
|
||||
// Schedule jobs
|
||||
go scheduler(jobs, chunks) |
||||
|
||||
// Waiting for the job finished
|
||||
completed := 0 |
||||
for completed < len(chunks) { |
||||
select { |
||||
case part := <-results: |
||||
completed++ |
||||
ucp.updatePart(part) |
||||
ucp.dump(cpFilePath) |
||||
completedBytes += ucp.Parts[part.PartNumber-1].Chunk.Size |
||||
event = newProgressEvent(TransferDataEvent, completedBytes, ucp.FileStat.Size, ucp.Parts[part.PartNumber-1].Chunk.Size) |
||||
publishProgress(listener, event) |
||||
case err := <-failed: |
||||
close(die) |
||||
event = newProgressEvent(TransferFailedEvent, completedBytes, ucp.FileStat.Size, 0) |
||||
publishProgress(listener, event) |
||||
return err |
||||
} |
||||
|
||||
if completed >= len(chunks) { |
||||
break |
||||
} |
||||
} |
||||
|
||||
event = newProgressEvent(TransferCompletedEvent, completedBytes, ucp.FileStat.Size, 0) |
||||
publishProgress(listener, event) |
||||
|
||||
// Complete the multipart upload
|
||||
err = complete(&ucp, &bucket, ucp.allParts(), cpFilePath, completeOptions) |
||||
return err |
||||
} |
||||
@ -0,0 +1,522 @@ |
||||
package oss |
||||
|
||||
import ( |
||||
"bytes" |
||||
"errors" |
||||
"fmt" |
||||
"hash/crc32" |
||||
"hash/crc64" |
||||
"io" |
||||
"net/http" |
||||
"os" |
||||
"os/exec" |
||||
"runtime" |
||||
"strconv" |
||||
"strings" |
||||
"time" |
||||
) |
||||
|
||||
var sys_name string |
||||
var sys_release string |
||||
var sys_machine string |
||||
|
||||
func init() { |
||||
sys_name = runtime.GOOS |
||||
sys_release = "-" |
||||
sys_machine = runtime.GOARCH |
||||
|
||||
if out, err := exec.Command("uname", "-s").CombinedOutput(); err == nil { |
||||
sys_name = string(bytes.TrimSpace(out)) |
||||
} |
||||
if out, err := exec.Command("uname", "-r").CombinedOutput(); err == nil { |
||||
sys_release = string(bytes.TrimSpace(out)) |
||||
} |
||||
if out, err := exec.Command("uname", "-m").CombinedOutput(); err == nil { |
||||
sys_machine = string(bytes.TrimSpace(out)) |
||||
} |
||||
} |
||||
|
||||
// userAgent gets user agent
|
||||
// It has the SDK version information, OS information and GO version
|
||||
func userAgent() string { |
||||
sys := getSysInfo() |
||||
return fmt.Sprintf("aliyun-sdk-go/%s (%s/%s/%s;%s)", Version, sys.name, |
||||
sys.release, sys.machine, runtime.Version()) |
||||
} |
||||
|
||||
type sysInfo struct { |
||||
name string // OS name such as windows/Linux
|
||||
release string // OS version 2.6.32-220.23.2.ali1089.el5.x86_64 etc
|
||||
machine string // CPU type amd64/x86_64
|
||||
} |
||||
|
||||
// getSysInfo gets system info
|
||||
// gets the OS information and CPU type
|
||||
func getSysInfo() sysInfo { |
||||
return sysInfo{name: sys_name, release: sys_release, machine: sys_machine} |
||||
} |
||||
|
||||
// GetRangeConfig gets the download range from the options.
|
||||
func GetRangeConfig(options []Option) (*UnpackedRange, error) { |
||||
rangeOpt, err := FindOption(options, HTTPHeaderRange, nil) |
||||
if err != nil || rangeOpt == nil { |
||||
return nil, err |
||||
} |
||||
return ParseRange(rangeOpt.(string)) |
||||
} |
||||
|
||||
// UnpackedRange
|
||||
type UnpackedRange struct { |
||||
HasStart bool // Flag indicates if the start point is specified
|
||||
HasEnd bool // Flag indicates if the end point is specified
|
||||
Start int64 // Start point
|
||||
End int64 // End point
|
||||
} |
||||
|
||||
// InvalidRangeError returns invalid range error
|
||||
func InvalidRangeError(r string) error { |
||||
return fmt.Errorf("InvalidRange %s", r) |
||||
} |
||||
|
||||
func GetRangeString(unpackRange UnpackedRange) string { |
||||
var strRange string |
||||
if unpackRange.HasStart && unpackRange.HasEnd { |
||||
strRange = fmt.Sprintf("%d-%d", unpackRange.Start, unpackRange.End) |
||||
} else if unpackRange.HasStart { |
||||
strRange = fmt.Sprintf("%d-", unpackRange.Start) |
||||
} else if unpackRange.HasEnd { |
||||
strRange = fmt.Sprintf("-%d", unpackRange.End) |
||||
} |
||||
return strRange |
||||
} |
||||
|
||||
// ParseRange parse various styles of range such as bytes=M-N
|
||||
func ParseRange(normalizedRange string) (*UnpackedRange, error) { |
||||
var err error |
||||
hasStart := false |
||||
hasEnd := false |
||||
var start int64 |
||||
var end int64 |
||||
|
||||
// Bytes==M-N or ranges=M-N
|
||||
nrSlice := strings.Split(normalizedRange, "=") |
||||
if len(nrSlice) != 2 || nrSlice[0] != "bytes" { |
||||
return nil, InvalidRangeError(normalizedRange) |
||||
} |
||||
|
||||
// Bytes=M-N,X-Y
|
||||
rSlice := strings.Split(nrSlice[1], ",") |
||||
rStr := rSlice[0] |
||||
|
||||
if strings.HasSuffix(rStr, "-") { // M-
|
||||
startStr := rStr[:len(rStr)-1] |
||||
start, err = strconv.ParseInt(startStr, 10, 64) |
||||
if err != nil { |
||||
return nil, InvalidRangeError(normalizedRange) |
||||
} |
||||
hasStart = true |
||||
} else if strings.HasPrefix(rStr, "-") { // -N
|
||||
len := rStr[1:] |
||||
end, err = strconv.ParseInt(len, 10, 64) |
||||
if err != nil { |
||||
return nil, InvalidRangeError(normalizedRange) |
||||
} |
||||
if end == 0 { // -0
|
||||
return nil, InvalidRangeError(normalizedRange) |
||||
} |
||||
hasEnd = true |
||||
} else { // M-N
|
||||
valSlice := strings.Split(rStr, "-") |
||||
if len(valSlice) != 2 { |
||||
return nil, InvalidRangeError(normalizedRange) |
||||
} |
||||
start, err = strconv.ParseInt(valSlice[0], 10, 64) |
||||
if err != nil { |
||||
return nil, InvalidRangeError(normalizedRange) |
||||
} |
||||
hasStart = true |
||||
end, err = strconv.ParseInt(valSlice[1], 10, 64) |
||||
if err != nil { |
||||
return nil, InvalidRangeError(normalizedRange) |
||||
} |
||||
hasEnd = true |
||||
} |
||||
|
||||
return &UnpackedRange{hasStart, hasEnd, start, end}, nil |
||||
} |
||||
|
||||
// AdjustRange returns adjusted range, adjust the range according to the length of the file
|
||||
func AdjustRange(ur *UnpackedRange, size int64) (start, end int64) { |
||||
if ur == nil { |
||||
return 0, size |
||||
} |
||||
|
||||
if ur.HasStart && ur.HasEnd { |
||||
start = ur.Start |
||||
end = ur.End + 1 |
||||
if ur.Start < 0 || ur.Start >= size || ur.End > size || ur.Start > ur.End { |
||||
start = 0 |
||||
end = size |
||||
} |
||||
} else if ur.HasStart { |
||||
start = ur.Start |
||||
end = size |
||||
if ur.Start < 0 || ur.Start >= size { |
||||
start = 0 |
||||
} |
||||
} else if ur.HasEnd { |
||||
start = size - ur.End |
||||
end = size |
||||
if ur.End < 0 || ur.End > size { |
||||
start = 0 |
||||
end = size |
||||
} |
||||
} |
||||
return |
||||
} |
||||
|
||||
// GetNowSec returns Unix time, the number of seconds elapsed since January 1, 1970 UTC.
|
||||
// gets the current time in Unix time, in seconds.
|
||||
func GetNowSec() int64 { |
||||
return time.Now().Unix() |
||||
} |
||||
|
||||
// GetNowNanoSec returns t as a Unix time, the number of nanoseconds elapsed
|
||||
// since January 1, 1970 UTC. The result is undefined if the Unix time
|
||||
// in nanoseconds cannot be represented by an int64. Note that this
|
||||
// means the result of calling UnixNano on the zero Time is undefined.
|
||||
// gets the current time in Unix time, in nanoseconds.
|
||||
func GetNowNanoSec() int64 { |
||||
return time.Now().UnixNano() |
||||
} |
||||
|
||||
// GetNowGMT gets the current time in GMT format.
|
||||
func GetNowGMT() string { |
||||
return time.Now().UTC().Format(http.TimeFormat) |
||||
} |
||||
|
||||
// FileChunk is the file chunk definition
|
||||
type FileChunk struct { |
||||
Number int // Chunk number
|
||||
Offset int64 // Chunk offset
|
||||
Size int64 // Chunk size.
|
||||
} |
||||
|
||||
// SplitFileByPartNum splits big file into parts by the num of parts.
|
||||
// Split the file with specified parts count, returns the split result when error is nil.
|
||||
func SplitFileByPartNum(fileName string, chunkNum int) ([]FileChunk, error) { |
||||
if chunkNum <= 0 || chunkNum > 10000 { |
||||
return nil, errors.New("chunkNum invalid") |
||||
} |
||||
|
||||
file, err := os.Open(fileName) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
defer file.Close() |
||||
|
||||
stat, err := file.Stat() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
if int64(chunkNum) > stat.Size() { |
||||
return nil, errors.New("oss: chunkNum invalid") |
||||
} |
||||
|
||||
var chunks []FileChunk |
||||
var chunk = FileChunk{} |
||||
var chunkN = (int64)(chunkNum) |
||||
for i := int64(0); i < chunkN; i++ { |
||||
chunk.Number = int(i + 1) |
||||
chunk.Offset = i * (stat.Size() / chunkN) |
||||
if i == chunkN-1 { |
||||
chunk.Size = stat.Size()/chunkN + stat.Size()%chunkN |
||||
} else { |
||||
chunk.Size = stat.Size() / chunkN |
||||
} |
||||
chunks = append(chunks, chunk) |
||||
} |
||||
|
||||
return chunks, nil |
||||
} |
||||
|
||||
// SplitFileByPartSize splits big file into parts by the size of parts.
|
||||
// Splits the file by the part size. Returns the FileChunk when error is nil.
|
||||
func SplitFileByPartSize(fileName string, chunkSize int64) ([]FileChunk, error) { |
||||
if chunkSize <= 0 { |
||||
return nil, errors.New("chunkSize invalid") |
||||
} |
||||
|
||||
file, err := os.Open(fileName) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
defer file.Close() |
||||
|
||||
stat, err := file.Stat() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
var chunkN = stat.Size() / chunkSize |
||||
if chunkN >= 10000 { |
||||
return nil, errors.New("Too many parts, please increase part size") |
||||
} |
||||
|
||||
var chunks []FileChunk |
||||
var chunk = FileChunk{} |
||||
for i := int64(0); i < chunkN; i++ { |
||||
chunk.Number = int(i + 1) |
||||
chunk.Offset = i * chunkSize |
||||
chunk.Size = chunkSize |
||||
chunks = append(chunks, chunk) |
||||
} |
||||
|
||||
if stat.Size()%chunkSize > 0 { |
||||
chunk.Number = len(chunks) + 1 |
||||
chunk.Offset = int64(len(chunks)) * chunkSize |
||||
chunk.Size = stat.Size() % chunkSize |
||||
chunks = append(chunks, chunk) |
||||
} |
||||
|
||||
return chunks, nil |
||||
} |
||||
|
||||
// GetPartEnd calculates the end position
|
||||
func GetPartEnd(begin int64, total int64, per int64) int64 { |
||||
if begin+per > total { |
||||
return total - 1 |
||||
} |
||||
return begin + per - 1 |
||||
} |
||||
|
||||
// CrcTable returns the table constructed from the specified polynomial
|
||||
var CrcTable = func() *crc64.Table { |
||||
return crc64.MakeTable(crc64.ECMA) |
||||
} |
||||
|
||||
// CrcTable returns the table constructed from the specified polynomial
|
||||
var crc32Table = func() *crc32.Table { |
||||
return crc32.MakeTable(crc32.IEEE) |
||||
} |
||||
|
||||
// choiceTransferPartOption choices valid option supported by Uploadpart or DownloadPart
|
||||
func ChoiceTransferPartOption(options []Option) []Option { |
||||
var outOption []Option |
||||
|
||||
listener, _ := FindOption(options, progressListener, nil) |
||||
if listener != nil { |
||||
outOption = append(outOption, Progress(listener.(ProgressListener))) |
||||
} |
||||
|
||||
payer, _ := FindOption(options, HTTPHeaderOssRequester, nil) |
||||
if payer != nil { |
||||
outOption = append(outOption, RequestPayer(PayerType(payer.(string)))) |
||||
} |
||||
|
||||
versionId, _ := FindOption(options, "versionId", nil) |
||||
if versionId != nil { |
||||
outOption = append(outOption, VersionId(versionId.(string))) |
||||
} |
||||
|
||||
trafficLimit, _ := FindOption(options, HTTPHeaderOssTrafficLimit, nil) |
||||
if trafficLimit != nil { |
||||
speed, _ := strconv.ParseInt(trafficLimit.(string), 10, 64) |
||||
outOption = append(outOption, TrafficLimitHeader(speed)) |
||||
} |
||||
|
||||
respHeader, _ := FindOption(options, responseHeader, nil) |
||||
if respHeader != nil { |
||||
outOption = append(outOption, GetResponseHeader(respHeader.(*http.Header))) |
||||
} |
||||
|
||||
return outOption |
||||
} |
||||
|
||||
// ChoiceCompletePartOption choices valid option supported by CompleteMulitiPart
|
||||
func ChoiceCompletePartOption(options []Option) []Option { |
||||
var outOption []Option |
||||
|
||||
listener, _ := FindOption(options, progressListener, nil) |
||||
if listener != nil { |
||||
outOption = append(outOption, Progress(listener.(ProgressListener))) |
||||
} |
||||
|
||||
payer, _ := FindOption(options, HTTPHeaderOssRequester, nil) |
||||
if payer != nil { |
||||
outOption = append(outOption, RequestPayer(PayerType(payer.(string)))) |
||||
} |
||||
|
||||
acl, _ := FindOption(options, HTTPHeaderOssObjectACL, nil) |
||||
if acl != nil { |
||||
outOption = append(outOption, ObjectACL(ACLType(acl.(string)))) |
||||
} |
||||
|
||||
callback, _ := FindOption(options, HTTPHeaderOssCallback, nil) |
||||
if callback != nil { |
||||
outOption = append(outOption, Callback(callback.(string))) |
||||
} |
||||
|
||||
callbackVar, _ := FindOption(options, HTTPHeaderOssCallbackVar, nil) |
||||
if callbackVar != nil { |
||||
outOption = append(outOption, CallbackVar(callbackVar.(string))) |
||||
} |
||||
|
||||
respHeader, _ := FindOption(options, responseHeader, nil) |
||||
if respHeader != nil { |
||||
outOption = append(outOption, GetResponseHeader(respHeader.(*http.Header))) |
||||
} |
||||
|
||||
forbidOverWrite, _ := FindOption(options, HTTPHeaderOssForbidOverWrite, nil) |
||||
if forbidOverWrite != nil { |
||||
if forbidOverWrite.(string) == "true" { |
||||
outOption = append(outOption, ForbidOverWrite(true)) |
||||
} else { |
||||
outOption = append(outOption, ForbidOverWrite(false)) |
||||
} |
||||
} |
||||
|
||||
return outOption |
||||
} |
||||
|
||||
// ChoiceAbortPartOption choices valid option supported by AbortMultipartUpload
|
||||
func ChoiceAbortPartOption(options []Option) []Option { |
||||
var outOption []Option |
||||
payer, _ := FindOption(options, HTTPHeaderOssRequester, nil) |
||||
if payer != nil { |
||||
outOption = append(outOption, RequestPayer(PayerType(payer.(string)))) |
||||
} |
||||
|
||||
respHeader, _ := FindOption(options, responseHeader, nil) |
||||
if respHeader != nil { |
||||
outOption = append(outOption, GetResponseHeader(respHeader.(*http.Header))) |
||||
} |
||||
|
||||
return outOption |
||||
} |
||||
|
||||
// ChoiceHeadObjectOption choices valid option supported by HeadObject
|
||||
func ChoiceHeadObjectOption(options []Option) []Option { |
||||
var outOption []Option |
||||
|
||||
// not select HTTPHeaderRange to get whole object length
|
||||
payer, _ := FindOption(options, HTTPHeaderOssRequester, nil) |
||||
if payer != nil { |
||||
outOption = append(outOption, RequestPayer(PayerType(payer.(string)))) |
||||
} |
||||
|
||||
versionId, _ := FindOption(options, "versionId", nil) |
||||
if versionId != nil { |
||||
outOption = append(outOption, VersionId(versionId.(string))) |
||||
} |
||||
|
||||
respHeader, _ := FindOption(options, responseHeader, nil) |
||||
if respHeader != nil { |
||||
outOption = append(outOption, GetResponseHeader(respHeader.(*http.Header))) |
||||
} |
||||
|
||||
return outOption |
||||
} |
||||
|
||||
func CheckBucketName(bucketName string) error { |
||||
nameLen := len(bucketName) |
||||
if nameLen < 3 || nameLen > 63 { |
||||
return fmt.Errorf("bucket name %s len is between [3-63],now is %d", bucketName, nameLen) |
||||
} |
||||
|
||||
for _, v := range bucketName { |
||||
if !(('a' <= v && v <= 'z') || ('0' <= v && v <= '9') || v == '-') { |
||||
return fmt.Errorf("bucket name %s can only include lowercase letters, numbers, and -", bucketName) |
||||
} |
||||
} |
||||
if bucketName[0] == '-' || bucketName[nameLen-1] == '-' { |
||||
return fmt.Errorf("bucket name %s must start and end with a lowercase letter or number", bucketName) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func GetReaderLen(reader io.Reader) (int64, error) { |
||||
var contentLength int64 |
||||
var err error |
||||
switch v := reader.(type) { |
||||
case *bytes.Buffer: |
||||
contentLength = int64(v.Len()) |
||||
case *bytes.Reader: |
||||
contentLength = int64(v.Len()) |
||||
case *strings.Reader: |
||||
contentLength = int64(v.Len()) |
||||
case *os.File: |
||||
fInfo, fError := v.Stat() |
||||
if fError != nil { |
||||
err = fmt.Errorf("can't get reader content length,%s", fError.Error()) |
||||
} else { |
||||
contentLength = fInfo.Size() |
||||
} |
||||
case *io.LimitedReader: |
||||
contentLength = int64(v.N) |
||||
case *LimitedReadCloser: |
||||
contentLength = int64(v.N) |
||||
default: |
||||
err = fmt.Errorf("can't get reader content length,unkown reader type") |
||||
} |
||||
return contentLength, err |
||||
} |
||||
|
||||
func LimitReadCloser(r io.Reader, n int64) io.Reader { |
||||
var lc LimitedReadCloser |
||||
lc.R = r |
||||
lc.N = n |
||||
return &lc |
||||
} |
||||
|
||||
// LimitedRC support Close()
|
||||
type LimitedReadCloser struct { |
||||
io.LimitedReader |
||||
} |
||||
|
||||
func (lc *LimitedReadCloser) Close() error { |
||||
if closer, ok := lc.R.(io.ReadCloser); ok { |
||||
return closer.Close() |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
type DiscardReadCloser struct { |
||||
RC io.ReadCloser |
||||
Discard int |
||||
} |
||||
|
||||
func (drc *DiscardReadCloser) Read(b []byte) (int, error) { |
||||
n, err := drc.RC.Read(b) |
||||
if drc.Discard == 0 || n <= 0 { |
||||
return n, err |
||||
} |
||||
|
||||
if n <= drc.Discard { |
||||
drc.Discard -= n |
||||
return 0, err |
||||
} |
||||
|
||||
realLen := n - drc.Discard |
||||
copy(b[0:realLen], b[drc.Discard:n]) |
||||
drc.Discard = 0 |
||||
return realLen, err |
||||
} |
||||
|
||||
func (drc *DiscardReadCloser) Close() error { |
||||
closer, ok := drc.RC.(io.ReadCloser) |
||||
if ok { |
||||
return closer.Close() |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func ConvertEmptyValueToNil(params map[string]interface{}, keys []string) { |
||||
for _, key := range keys { |
||||
value, ok := params[key] |
||||
if ok && value == "" { |
||||
// convert "" to nil
|
||||
params[key] = nil |
||||
} |
||||
} |
||||
} |
||||
Loading…
Reference in new issue