summaryrefslogtreecommitdiff
path: root/vendor/github.com/minio/minio-go/v7/api-put-object.go
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/minio/minio-go/v7/api-put-object.go')
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-put-object.go16
1 files changed, 11 insertions, 5 deletions
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object.go b/vendor/github.com/minio/minio-go/v7/api-put-object.go
index 6ccb58156..a792cfe39 100644
--- a/vendor/github.com/minio/minio-go/v7/api-put-object.go
+++ b/vendor/github.com/minio/minio-go/v7/api-put-object.go
@@ -23,7 +23,6 @@ import (
"encoding/base64"
"errors"
"fmt"
- "hash/crc32"
"io"
"net/http"
"sort"
@@ -90,6 +89,11 @@ type PutObjectOptions struct {
DisableContentSha256 bool
DisableMultipart bool
+ // AutoChecksum is the type of checksum that will be added if no other checksum is added,
+ // like MD5 or SHA256 streaming checksum, and it is feasible for the upload type.
+ // If none is specified CRC32C is used, since it is generally the fastest.
+ AutoChecksum ChecksumType
+
// ConcurrentStreamParts will create NumThreads buffers of PartSize bytes,
// fill them serially and upload them in parallel.
// This can be used for faster uploads on non-seekable or slow-to-seek input.
@@ -300,6 +304,7 @@ func (c *Client) putObjectCommon(ctx context.Context, bucketName, objectName str
if size > int64(maxMultipartPutObjectSize) {
return UploadInfo{}, errEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName)
}
+ opts.AutoChecksum.SetDefault(ChecksumCRC32C)
// NOTE: Streaming signature is not supported by GCS.
if s3utils.IsGoogleEndpoint(*c.endpointURL) {
@@ -361,7 +366,7 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam
if opts.UserMetadata == nil {
opts.UserMetadata = make(map[string]string, 1)
}
- opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C"
+ opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String()
}
// Initiate a new multipart upload.
@@ -390,7 +395,7 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam
// CRC32C is ~50% faster on AMD64 @ 30GB/s
var crcBytes []byte
customHeader := make(http.Header)
- crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
+ crc := opts.AutoChecksum.Hasher()
for partNumber <= totalPartsCount {
length, rerr := readFull(reader, buf)
@@ -413,7 +418,7 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam
crc.Reset()
crc.Write(buf[:length])
cSum := crc.Sum(nil)
- customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum))
+ customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum))
crcBytes = append(crcBytes, cSum...)
}
@@ -466,12 +471,13 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam
opts = PutObjectOptions{
ServerSideEncryption: opts.ServerSideEncryption,
+ AutoChecksum: opts.AutoChecksum,
}
if len(crcBytes) > 0 {
// Add hash of hashes.
crc.Reset()
crc.Write(crcBytes)
- opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
+ opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): base64.StdEncoding.EncodeToString(crc.Sum(nil))}
}
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
if err != nil {