summaryrefslogtreecommitdiff
path: root/vendor/github.com/minio
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/minio')
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-bucket-cors.go2
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-bucket-policy.go2
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-error-response.go37
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-get-object.go8
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-list.go49
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-prompt-object.go4
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-put-bucket.go2
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go17
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go2
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-remove.go187
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go8
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-stat.go12
-rw-r--r--vendor/github.com/minio/minio-go/v7/api.go63
-rw-r--r--vendor/github.com/minio/minio-go/v7/bucket-cache.go8
-rw-r--r--vendor/github.com/minio/minio-go/v7/functional_tests.go308
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go45
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/utils/peek-reader-closer.go73
-rw-r--r--vendor/github.com/minio/minio-go/v7/post-policy.go2
-rw-r--r--vendor/github.com/minio/minio-go/v7/retry.go2
-rw-r--r--vendor/github.com/minio/minio-go/v7/s3-error.go130
-rw-r--r--vendor/github.com/minio/minio-go/v7/utils.go22
21 files changed, 823 insertions, 160 deletions
diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-cors.go b/vendor/github.com/minio/minio-go/v7/api-bucket-cors.go
index 8bf537f73..9d514947d 100644
--- a/vendor/github.com/minio/minio-go/v7/api-bucket-cors.go
+++ b/vendor/github.com/minio/minio-go/v7/api-bucket-cors.go
@@ -98,7 +98,7 @@ func (c *Client) GetBucketCors(ctx context.Context, bucketName string) (*cors.Co
bucketCors, err := c.getBucketCors(ctx, bucketName)
if err != nil {
errResponse := ToErrorResponse(err)
- if errResponse.Code == "NoSuchCORSConfiguration" {
+ if errResponse.Code == NoSuchCORSConfiguration {
return nil, nil
}
return nil, err
diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go b/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go
index dbb5259a8..3a168c13e 100644
--- a/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go
+++ b/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go
@@ -104,7 +104,7 @@ func (c *Client) GetBucketPolicy(ctx context.Context, bucketName string) (string
bucketPolicy, err := c.getBucketPolicy(ctx, bucketName)
if err != nil {
errResponse := ToErrorResponse(err)
- if errResponse.Code == "NoSuchBucketPolicy" {
+ if errResponse.Code == NoSuchBucketPolicy {
return "", nil
}
return "", err
diff --git a/vendor/github.com/minio/minio-go/v7/api-error-response.go b/vendor/github.com/minio/minio-go/v7/api-error-response.go
index 7df211fda..e85aa322c 100644
--- a/vendor/github.com/minio/minio-go/v7/api-error-response.go
+++ b/vendor/github.com/minio/minio-go/v7/api-error-response.go
@@ -136,15 +136,15 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
if objectName == "" {
errResp = ErrorResponse{
StatusCode: resp.StatusCode,
- Code: "NoSuchBucket",
- Message: "The specified bucket does not exist.",
+ Code: NoSuchBucket,
+ Message: s3ErrorResponseMap[NoSuchBucket],
BucketName: bucketName,
}
} else {
errResp = ErrorResponse{
StatusCode: resp.StatusCode,
- Code: "NoSuchKey",
- Message: "The specified key does not exist.",
+ Code: NoSuchKey,
+ Message: s3ErrorResponseMap[NoSuchKey],
BucketName: bucketName,
Key: objectName,
}
@@ -152,23 +152,23 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
case http.StatusForbidden:
errResp = ErrorResponse{
StatusCode: resp.StatusCode,
- Code: "AccessDenied",
- Message: "Access Denied.",
+ Code: AccessDenied,
+ Message: s3ErrorResponseMap[AccessDenied],
BucketName: bucketName,
Key: objectName,
}
case http.StatusConflict:
errResp = ErrorResponse{
StatusCode: resp.StatusCode,
- Code: "Conflict",
- Message: "Bucket not empty.",
+ Code: Conflict,
+ Message: s3ErrorResponseMap[Conflict],
BucketName: bucketName,
}
case http.StatusPreconditionFailed:
errResp = ErrorResponse{
StatusCode: resp.StatusCode,
- Code: "PreconditionFailed",
- Message: s3ErrorResponseMap["PreconditionFailed"],
+ Code: PreconditionFailed,
+ Message: s3ErrorResponseMap[PreconditionFailed],
BucketName: bucketName,
Key: objectName,
}
@@ -209,7 +209,7 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
if errResp.Region == "" {
errResp.Region = resp.Header.Get("x-amz-bucket-region")
}
- if errResp.Code == "InvalidRegion" && errResp.Region != "" {
+ if errResp.Code == InvalidRegion && errResp.Region != "" {
errResp.Message = fmt.Sprintf("Region does not match, expecting region ‘%s’.", errResp.Region)
}
@@ -218,10 +218,11 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
// errTransferAccelerationBucket - bucket name is invalid to be used with transfer acceleration.
func errTransferAccelerationBucket(bucketName string) error {
+ msg := "The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods ‘.’."
return ErrorResponse{
StatusCode: http.StatusBadRequest,
- Code: "InvalidArgument",
- Message: "The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods ‘.’.",
+ Code: InvalidArgument,
+ Message: msg,
BucketName: bucketName,
}
}
@@ -231,7 +232,7 @@ func errEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName st
msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", totalSize, maxObjectSize)
return ErrorResponse{
StatusCode: http.StatusBadRequest,
- Code: "EntityTooLarge",
+ Code: EntityTooLarge,
Message: msg,
BucketName: bucketName,
Key: objectName,
@@ -243,7 +244,7 @@ func errEntityTooSmall(totalSize int64, bucketName, objectName string) error {
msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size ‘0B’ for single PUT operation.", totalSize)
return ErrorResponse{
StatusCode: http.StatusBadRequest,
- Code: "EntityTooSmall",
+ Code: EntityTooSmall,
Message: msg,
BucketName: bucketName,
Key: objectName,
@@ -255,7 +256,7 @@ func errUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string)
msg := fmt.Sprintf("Data read ‘%d’ is not equal to the size ‘%d’ of the input Reader.", totalRead, totalSize)
return ErrorResponse{
StatusCode: http.StatusBadRequest,
- Code: "UnexpectedEOF",
+ Code: UnexpectedEOF,
Message: msg,
BucketName: bucketName,
Key: objectName,
@@ -266,7 +267,7 @@ func errUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string)
func errInvalidArgument(message string) error {
return ErrorResponse{
StatusCode: http.StatusBadRequest,
- Code: "InvalidArgument",
+ Code: InvalidArgument,
Message: message,
RequestID: "minio",
}
@@ -277,7 +278,7 @@ func errInvalidArgument(message string) error {
func errAPINotSupported(message string) error {
return ErrorResponse{
StatusCode: http.StatusNotImplemented,
- Code: "APINotSupported",
+ Code: APINotSupported,
Message: message,
RequestID: "minio",
}
diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object.go b/vendor/github.com/minio/minio-go/v7/api-get-object.go
index 5cc85f61c..d3cb6c22a 100644
--- a/vendor/github.com/minio/minio-go/v7/api-get-object.go
+++ b/vendor/github.com/minio/minio-go/v7/api-get-object.go
@@ -34,14 +34,14 @@ func (c *Client) GetObject(ctx context.Context, bucketName, objectName string, o
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return nil, ErrorResponse{
StatusCode: http.StatusBadRequest,
- Code: "InvalidBucketName",
+ Code: InvalidBucketName,
Message: err.Error(),
}
}
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return nil, ErrorResponse{
StatusCode: http.StatusBadRequest,
- Code: "XMinioInvalidObjectName",
+ Code: XMinioInvalidObjectName,
Message: err.Error(),
}
}
@@ -659,14 +659,14 @@ func (c *Client) getObject(ctx context.Context, bucketName, objectName string, o
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return nil, ObjectInfo{}, nil, ErrorResponse{
StatusCode: http.StatusBadRequest,
- Code: "InvalidBucketName",
+ Code: InvalidBucketName,
Message: err.Error(),
}
}
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return nil, ObjectInfo{}, nil, ErrorResponse{
StatusCode: http.StatusBadRequest,
- Code: "XMinioInvalidObjectName",
+ Code: XMinioInvalidObjectName,
Message: err.Error(),
}
}
diff --git a/vendor/github.com/minio/minio-go/v7/api-list.go b/vendor/github.com/minio/minio-go/v7/api-list.go
index 1af0fadbf..5bf67a666 100644
--- a/vendor/github.com/minio/minio-go/v7/api-list.go
+++ b/vendor/github.com/minio/minio-go/v7/api-list.go
@@ -285,7 +285,7 @@ func (c *Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefi
// sure proper responses are received.
if listBucketResult.IsTruncated && listBucketResult.NextContinuationToken == "" {
return listBucketResult, ErrorResponse{
- Code: "NotImplemented",
+ Code: NotImplemented,
Message: "Truncated response should have continuation token set",
}
}
@@ -419,19 +419,25 @@ func (c *Client) listObjectVersions(ctx context.Context, bucketName string, opts
}
for _, version := range vers {
info := ObjectInfo{
- ETag: trimEtag(version.ETag),
- Key: version.Key,
- LastModified: version.LastModified.Truncate(time.Millisecond),
- Size: version.Size,
- Owner: version.Owner,
- StorageClass: version.StorageClass,
- IsLatest: version.IsLatest,
- VersionID: version.VersionID,
- IsDeleteMarker: version.isDeleteMarker,
- UserTags: version.UserTags,
- UserMetadata: version.UserMetadata,
- Internal: version.Internal,
- NumVersions: numVersions,
+ ETag: trimEtag(version.ETag),
+ Key: version.Key,
+ LastModified: version.LastModified.Truncate(time.Millisecond),
+ Size: version.Size,
+ Owner: version.Owner,
+ StorageClass: version.StorageClass,
+ IsLatest: version.IsLatest,
+ VersionID: version.VersionID,
+ IsDeleteMarker: version.isDeleteMarker,
+ UserTags: version.UserTags,
+ UserMetadata: version.UserMetadata,
+ Internal: version.Internal,
+ NumVersions: numVersions,
+ ChecksumMode: version.ChecksumType,
+ ChecksumCRC32: version.ChecksumCRC32,
+ ChecksumCRC32C: version.ChecksumCRC32C,
+ ChecksumSHA1: version.ChecksumSHA1,
+ ChecksumSHA256: version.ChecksumSHA256,
+ ChecksumCRC64NVME: version.ChecksumCRC64NVME,
}
if !yield(info) {
return false
@@ -753,13 +759,9 @@ func (c *Client) ListObjects(ctx context.Context, bucketName string, opts ListOb
objectStatCh := make(chan ObjectInfo, 1)
go func() {
defer close(objectStatCh)
- send := func(obj ObjectInfo) bool {
- select {
- case <-ctx.Done():
- return false
- case objectStatCh <- obj:
- return true
- }
+ if contextCanceled(ctx) {
+ objectStatCh <- ObjectInfo{Err: ctx.Err()}
+ return
}
var objIter iter.Seq[ObjectInfo]
@@ -777,8 +779,11 @@ func (c *Client) ListObjects(ctx context.Context, bucketName string, opts ListOb
}
}
for obj := range objIter {
- if !send(obj) {
+ select {
+ case <-ctx.Done():
+ objectStatCh <- ObjectInfo{Err: ctx.Err()}
return
+ case objectStatCh <- obj:
}
}
}()
diff --git a/vendor/github.com/minio/minio-go/v7/api-prompt-object.go b/vendor/github.com/minio/minio-go/v7/api-prompt-object.go
index bf6239d2d..26c41d34a 100644
--- a/vendor/github.com/minio/minio-go/v7/api-prompt-object.go
+++ b/vendor/github.com/minio/minio-go/v7/api-prompt-object.go
@@ -35,14 +35,14 @@ func (c *Client) PromptObject(ctx context.Context, bucketName, objectName, promp
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return nil, ErrorResponse{
StatusCode: http.StatusBadRequest,
- Code: "InvalidBucketName",
+ Code: InvalidBucketName,
Message: err.Error(),
}
}
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return nil, ErrorResponse{
StatusCode: http.StatusBadRequest,
- Code: "XMinioInvalidObjectName",
+ Code: XMinioInvalidObjectName,
Message: err.Error(),
}
}
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-bucket.go b/vendor/github.com/minio/minio-go/v7/api-put-bucket.go
index 447d0c796..47d8419e6 100644
--- a/vendor/github.com/minio/minio-go/v7/api-put-bucket.go
+++ b/vendor/github.com/minio/minio-go/v7/api-put-bucket.go
@@ -35,7 +35,7 @@ func (c *Client) makeBucket(ctx context.Context, bucketName string, opts MakeBuc
err = c.doMakeBucket(ctx, bucketName, opts)
if err != nil && (opts.Region == "" || opts.Region == "us-east-1") {
- if resp, ok := err.(ErrorResponse); ok && resp.Code == "AuthorizationHeaderMalformed" && resp.Region != "" {
+ if resp, ok := err.(ErrorResponse); ok && resp.Code == AuthorizationHeaderMalformed && resp.Region != "" {
opts.Region = resp.Region
err = c.doMakeBucket(ctx, bucketName, opts)
}
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
index 84bc19b28..844172324 100644
--- a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
+++ b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
@@ -44,7 +44,7 @@ func (c *Client) putObjectMultipart(ctx context.Context, bucketName, objectName
errResp := ToErrorResponse(err)
// Verify if multipart functionality is not available, if not
// fall back to single PutObject operation.
- if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
+ if errResp.Code == AccessDenied && strings.Contains(errResp.Message, "Access Denied") {
// Verify if size of reader is greater than '5GiB'.
if size > maxSinglePutObjectSize {
return UploadInfo{}, errEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
@@ -392,13 +392,14 @@ func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, object
// Instantiate all the complete multipart buffer.
completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes)
reqMetadata := requestMetadata{
- bucketName: bucketName,
- objectName: objectName,
- queryValues: urlValues,
- contentBody: completeMultipartUploadBuffer,
- contentLength: int64(len(completeMultipartUploadBytes)),
- contentSHA256Hex: sum256Hex(completeMultipartUploadBytes),
- customHeader: headers,
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ contentBody: completeMultipartUploadBuffer,
+ contentLength: int64(len(completeMultipartUploadBytes)),
+ contentSHA256Hex: sum256Hex(completeMultipartUploadBytes),
+ customHeader: headers,
+ expect200OKWithError: true,
}
// Execute POST to complete multipart upload for an objectName.
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
index 987a3c692..4a7243edc 100644
--- a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
+++ b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
@@ -56,7 +56,7 @@ func (c *Client) putObjectMultipartStream(ctx context.Context, bucketName, objec
errResp := ToErrorResponse(err)
// Verify if multipart functionality is not available, if not
// fall back to single PutObject operation.
- if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
+ if errResp.Code == AccessDenied && strings.Contains(errResp.Message, "Access Denied") {
// Verify if size of reader is greater than '5GiB'.
if size > maxSinglePutObjectSize {
return UploadInfo{}, errEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
diff --git a/vendor/github.com/minio/minio-go/v7/api-remove.go b/vendor/github.com/minio/minio-go/v7/api-remove.go
index 5b4443ec5..2a38e014a 100644
--- a/vendor/github.com/minio/minio-go/v7/api-remove.go
+++ b/vendor/github.com/minio/minio-go/v7/api-remove.go
@@ -22,6 +22,7 @@ import (
"context"
"encoding/xml"
"io"
+ "iter"
"net/http"
"net/url"
"time"
@@ -271,7 +272,7 @@ func processRemoveMultiObjectsResponse(body io.Reader, resultCh chan<- RemoveObj
for _, obj := range rmResult.UnDeletedObjects {
// Version does not exist is not an error ignore and continue.
switch obj.Code {
- case "InvalidArgument", "NoSuchVersion":
+ case InvalidArgument, NoSuchVersion:
continue
}
resultCh <- RemoveObjectResult{
@@ -333,6 +334,33 @@ func (c *Client) RemoveObjects(ctx context.Context, bucketName string, objectsCh
return errorCh
}
+// RemoveObjectsWithIter bulk deletes multiple objects from a bucket.
+// Objects (with optional versions) to be removed must be provided with
+// an iterator. Objects are removed asynchronously and results must be
+// consumed. If the returned result iterator is stopped, the context is
+// canceled, or a remote call failed, the provided iterator will no
+// longer accept more objects.
+func (c *Client) RemoveObjectsWithIter(ctx context.Context, bucketName string, objectsIter iter.Seq[ObjectInfo], opts RemoveObjectsOptions) (iter.Seq[RemoveObjectResult], error) {
+ // Validate if bucket name is valid.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return nil, err
+ }
+ // Validate objects channel to be properly allocated.
+ if objectsIter == nil {
+ return nil, errInvalidArgument("Objects iter can never by nil")
+ }
+
+ return func(yield func(RemoveObjectResult) bool) {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ }
+
+ c.removeObjectsIter(ctx, bucketName, objectsIter, yield, opts)
+ }, nil
+}
+
// RemoveObjectsWithResult removes multiple objects from a bucket while
// it is possible to specify objects versions which are received from
// objectsCh. Remove results, successes and failures are sent back via
@@ -381,6 +409,144 @@ func hasInvalidXMLChar(str string) bool {
return false
}
+// Generate and call MultiDelete S3 requests based on entries received from the iterator.
+func (c *Client) removeObjectsIter(ctx context.Context, bucketName string, objectsIter iter.Seq[ObjectInfo], yield func(RemoveObjectResult) bool, opts RemoveObjectsOptions) {
+ maxEntries := 1000
+ urlValues := make(url.Values)
+ urlValues.Set("delete", "")
+
+ // Build headers.
+ headers := make(http.Header)
+ if opts.GovernanceBypass {
+ // Set the bypass goverenance retention header
+ headers.Set(amzBypassGovernance, "true")
+ }
+
+ processRemoveMultiObjectsResponseIter := func(batch []ObjectInfo, yield func(RemoveObjectResult) bool) bool {
+ if len(batch) == 0 {
+ return false
+ }
+
+ // Generate remove multi objects XML request
+ removeBytes := generateRemoveMultiObjectsRequest(batch)
+ // Execute POST on bucket to remove objects.
+ resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentBody: bytes.NewReader(removeBytes),
+ contentLength: int64(len(removeBytes)),
+ contentMD5Base64: sumMD5Base64(removeBytes),
+ contentSHA256Hex: sum256Hex(removeBytes),
+ customHeader: headers,
+ })
+ if resp != nil {
+ defer closeResponse(resp)
+ if resp.StatusCode != http.StatusOK {
+ err = httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+ if err != nil {
+ for _, b := range batch {
+ if !yield(RemoveObjectResult{
+ ObjectName: b.Key,
+ ObjectVersionID: b.VersionID,
+ Err: err,
+ }) {
+ return false
+ }
+ }
+ return false
+ }
+
+ // Parse multi delete XML response
+ rmResult := &deleteMultiObjectsResult{}
+ if err := xmlDecoder(resp.Body, rmResult); err != nil {
+ yield(RemoveObjectResult{ObjectName: "", Err: err})
+ return false
+ }
+
+ // Fill deletion that returned an error.
+ for _, obj := range rmResult.UnDeletedObjects {
+ // Version does not exist is not an error ignore and continue.
+ switch obj.Code {
+ case "InvalidArgument", "NoSuchVersion":
+ continue
+ }
+ if !yield(RemoveObjectResult{
+ ObjectName: obj.Key,
+ ObjectVersionID: obj.VersionID,
+ Err: ErrorResponse{
+ Code: obj.Code,
+ Message: obj.Message,
+ },
+ }) {
+ return false
+ }
+ }
+
+ // Fill deletion that returned success
+ for _, obj := range rmResult.DeletedObjects {
+ if !yield(RemoveObjectResult{
+ ObjectName: obj.Key,
+ // Only filled with versioned buckets
+ ObjectVersionID: obj.VersionID,
+ DeleteMarker: obj.DeleteMarker,
+ DeleteMarkerVersionID: obj.DeleteMarkerVersionID,
+ }) {
+ return false
+ }
+ }
+
+ return true
+ }
+
+ var batch []ObjectInfo
+
+ next, stop := iter.Pull(objectsIter)
+ defer stop()
+
+ for {
+ // Loop over entries by 1000 and call MultiDelete requests
+ object, ok := next()
+ if !ok {
+ // delete the remaining batch.
+ processRemoveMultiObjectsResponseIter(batch, yield)
+ return
+ }
+
+ if hasInvalidXMLChar(object.Key) {
+ // Use single DELETE so the object name will be in the request URL instead of the multi-delete XML document.
+ removeResult := c.removeObject(ctx, bucketName, object.Key, RemoveObjectOptions{
+ VersionID: object.VersionID,
+ GovernanceBypass: opts.GovernanceBypass,
+ })
+ if err := removeResult.Err; err != nil {
+ // Version does not exist is not an error ignore and continue.
+ switch ToErrorResponse(err).Code {
+ case "InvalidArgument", "NoSuchVersion":
+ continue
+ }
+ }
+ if !yield(removeResult) {
+ return
+ }
+
+ continue
+ }
+
+ batch = append(batch, object)
+ if len(batch) < maxEntries {
+ continue
+ }
+
+ if !processRemoveMultiObjectsResponseIter(batch, yield) {
+ return
+ }
+
+ batch = batch[:0]
+ }
+}
+
// Generate and call MultiDelete S3 requests based on entries received from objectsCh
func (c *Client) removeObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, resultCh chan<- RemoveObjectResult, opts RemoveObjectsOptions) {
maxEntries := 1000
@@ -407,7 +573,7 @@ func (c *Client) removeObjects(ctx context.Context, bucketName string, objectsCh
if err := removeResult.Err; err != nil {
// Version does not exist is not an error ignore and continue.
switch ToErrorResponse(err).Code {
- case "InvalidArgument", "NoSuchVersion":
+ case InvalidArgument, NoSuchVersion:
continue
}
resultCh <- removeResult
@@ -442,13 +608,14 @@ func (c *Client) removeObjects(ctx context.Context, bucketName string, objectsCh
removeBytes := generateRemoveMultiObjectsRequest(batch)
// Execute POST on bucket to remove objects.
resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{
- bucketName: bucketName,
- queryValues: urlValues,
- contentBody: bytes.NewReader(removeBytes),
- contentLength: int64(len(removeBytes)),
- contentMD5Base64: sumMD5Base64(removeBytes),
- contentSHA256Hex: sum256Hex(removeBytes),
- customHeader: headers,
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentBody: bytes.NewReader(removeBytes),
+ contentLength: int64(len(removeBytes)),
+ contentMD5Base64: sumMD5Base64(removeBytes),
+ contentSHA256Hex: sum256Hex(removeBytes),
+ customHeader: headers,
+ expect200OKWithError: true,
})
if resp != nil {
if resp.StatusCode != http.StatusOK {
@@ -535,7 +702,7 @@ func (c *Client) abortMultipartUpload(ctx context.Context, bucketName, objectNam
// This is needed specifically for abort and it cannot
// be converged into default case.
errorResponse = ErrorResponse{
- Code: "NoSuchUpload",
+ Code: NoSuchUpload,
Message: "The specified multipart upload does not exist.",
BucketName: bucketName,
Key: objectName,
diff --git a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
index aaa3158b9..32d589716 100644
--- a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
+++ b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
@@ -107,6 +107,14 @@ type Version struct {
M int // Parity blocks
} `xml:"Internal"`
+ // Checksum values. Only returned by AiStor servers.
+ ChecksumCRC32 string `xml:",omitempty"`
+ ChecksumCRC32C string `xml:",omitempty"`
+ ChecksumSHA1 string `xml:",omitempty"`
+ ChecksumSHA256 string `xml:",omitempty"`
+ ChecksumCRC64NVME string `xml:",omitempty"`
+ ChecksumType string `xml:",omitempty"`
+
isDeleteMarker bool
}
diff --git a/vendor/github.com/minio/minio-go/v7/api-stat.go b/vendor/github.com/minio/minio-go/v7/api-stat.go
index 11455beb3..a4b2af7ae 100644
--- a/vendor/github.com/minio/minio-go/v7/api-stat.go
+++ b/vendor/github.com/minio/minio-go/v7/api-stat.go
@@ -39,14 +39,14 @@ func (c *Client) BucketExists(ctx context.Context, bucketName string) (bool, err
})
defer closeResponse(resp)
if err != nil {
- if ToErrorResponse(err).Code == "NoSuchBucket" {
+ if ToErrorResponse(err).Code == NoSuchBucket {
return false, nil
}
return false, err
}
if resp != nil {
resperr := httpRespToErrorResponse(resp, bucketName, "")
- if ToErrorResponse(resperr).Code == "NoSuchBucket" {
+ if ToErrorResponse(resperr).Code == NoSuchBucket {
return false, nil
}
if resp.StatusCode != http.StatusOK {
@@ -63,14 +63,14 @@ func (c *Client) StatObject(ctx context.Context, bucketName, objectName string,
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ObjectInfo{}, ErrorResponse{
StatusCode: http.StatusBadRequest,
- Code: "InvalidBucketName",
+ Code: InvalidBucketName,
Message: err.Error(),
}
}
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return ObjectInfo{}, ErrorResponse{
StatusCode: http.StatusBadRequest,
- Code: "XMinioInvalidObjectName",
+ Code: XMinioInvalidObjectName,
Message: err.Error(),
}
}
@@ -102,8 +102,8 @@ func (c *Client) StatObject(ctx context.Context, bucketName, objectName string,
if resp.StatusCode == http.StatusMethodNotAllowed && opts.VersionID != "" && deleteMarker {
errResp := ErrorResponse{
StatusCode: resp.StatusCode,
- Code: "MethodNotAllowed",
- Message: "The specified method is not allowed against this resource.",
+ Code: MethodNotAllowed,
+ Message: s3ErrorResponseMap[MethodNotAllowed],
BucketName: bucketName,
Key: objectName,
}
diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go
index cc00f92a3..10a12ccfa 100644
--- a/vendor/github.com/minio/minio-go/v7/api.go
+++ b/vendor/github.com/minio/minio-go/v7/api.go
@@ -21,6 +21,7 @@ import (
"bytes"
"context"
"encoding/base64"
+ "encoding/xml"
"errors"
"fmt"
"io"
@@ -38,6 +39,7 @@ import (
"sync/atomic"
"time"
+ "github.com/dustin/go-humanize"
md5simd "github.com/minio/md5-simd"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/minio/minio-go/v7/pkg/kvcache"
@@ -45,6 +47,8 @@ import (
"github.com/minio/minio-go/v7/pkg/signer"
"github.com/minio/minio-go/v7/pkg/singleflight"
"golang.org/x/net/publicsuffix"
+
+ internalutils "github.com/minio/minio-go/v7/pkg/utils"
)
// Client implements Amazon S3 compatible methods.
@@ -159,7 +163,7 @@ type Options struct {
// Global constants.
const (
libraryName = "minio-go"
- libraryVersion = "v7.0.92"
+ libraryVersion = "v7.0.94"
)
// User Agent should always following the below style.
@@ -455,7 +459,7 @@ func (c *Client) HealthCheck(hcDuration time.Duration) (context.CancelFunc, erro
gcancel()
if !IsNetworkOrHostDown(err, false) {
switch ToErrorResponse(err).Code {
- case "NoSuchBucket", "AccessDenied", "":
+ case NoSuchBucket, AccessDenied, "":
atomic.CompareAndSwapInt32(&c.healthStatus, offline, online)
}
}
@@ -477,7 +481,7 @@ func (c *Client) HealthCheck(hcDuration time.Duration) (context.CancelFunc, erro
gcancel()
if !IsNetworkOrHostDown(err, false) {
switch ToErrorResponse(err).Code {
- case "NoSuchBucket", "AccessDenied", "":
+ case NoSuchBucket, AccessDenied, "":
atomic.CompareAndSwapInt32(&c.healthStatus, offline, online)
}
}
@@ -512,6 +516,8 @@ type requestMetadata struct {
streamSha256 bool
addCrc *ChecksumType
trailer http.Header // (http.Request).Trailer. Requires v4 signature.
+
+ expect200OKWithError bool
}
// dumpHTTP - dump HTTP request and response.
@@ -615,6 +621,28 @@ func (c *Client) do(req *http.Request) (resp *http.Response, err error) {
return resp, nil
}
+// Peek resp.Body looking for S3 XMl error response:
+// - Return the error XML bytes if an error is found
+// - Make sure to always restablish the whole http response stream before returning
+func tryParseErrRespFromBody(resp *http.Response) ([]byte, error) {
+ peeker := internalutils.NewPeekReadCloser(resp.Body, 5*humanize.MiByte)
+ defer func() {
+ peeker.ReplayFromStart()
+ resp.Body = peeker
+ }()
+
+ errResp := ErrorResponse{}
+ errBytes, err := xmlDecodeAndBody(peeker, &errResp)
+ if err != nil {
+ var unmarshalErr xml.UnmarshalError
+ if errors.As(err, &unmarshalErr) {
+ return nil, nil
+ }
+ return nil, err
+ }
+ return errBytes, nil
+}
+
// List of success status.
var successStatus = []int{
http.StatusOK,
@@ -702,16 +730,30 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ
return nil, err
}
- // For any known successful http status, return quickly.
+ var success bool
+ var errBodyBytes []byte
+
for _, httpStatus := range successStatus {
if httpStatus == res.StatusCode {
+ success = true
+ break
+ }
+ }
+
+ if success {
+ if !metadata.expect200OKWithError {
+ return res, nil
+ }
+ errBodyBytes, err = tryParseErrRespFromBody(res)
+ if err == nil && len(errBodyBytes) == 0 {
+ // No S3 XML error is found
return res, nil
}
+ } else {
+ errBodyBytes, err = io.ReadAll(res.Body)
}
- // Read the body to be saved later.
- errBodyBytes, err := io.ReadAll(res.Body)
- // res.Body should be closed
+ // By now, res.Body should be closed
closeResponse(res)
if err != nil {
return nil, err
@@ -723,6 +765,7 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ
// For errors verify if its retryable otherwise fail quickly.
errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName))
+ err = errResponse
// Save the body back again.
errBodySeeker.Seek(0, 0) // Seek back to starting point.
@@ -736,11 +779,11 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ
// region is empty.
if c.region == "" {
switch errResponse.Code {
- case "AuthorizationHeaderMalformed":
+ case AuthorizationHeaderMalformed:
fallthrough
- case "InvalidRegion":
+ case InvalidRegion:
fallthrough
- case "AccessDenied":
+ case AccessDenied:
if errResponse.Region == "" {
// Region is empty we simply return the error.
return res, err
diff --git a/vendor/github.com/minio/minio-go/v7/bucket-cache.go b/vendor/github.com/minio/minio-go/v7/bucket-cache.go
index e43b889b9..b41902f65 100644
--- a/vendor/github.com/minio/minio-go/v7/bucket-cache.go
+++ b/vendor/github.com/minio/minio-go/v7/bucket-cache.go
@@ -84,18 +84,18 @@ func processBucketLocationResponse(resp *http.Response, bucketName string) (buck
// request. Move forward and let the top level callers
// succeed if possible based on their policy.
switch errResp.Code {
- case "NotImplemented":
+ case NotImplemented:
switch errResp.Server {
case "AmazonSnowball":
return "snowball", nil
case "cloudflare":
return "us-east-1", nil
}
- case "AuthorizationHeaderMalformed":
+ case AuthorizationHeaderMalformed:
fallthrough
- case "InvalidRegion":
+ case InvalidRegion:
fallthrough
- case "AccessDenied":
+ case AccessDenied:
if errResp.Region == "" {
return "us-east-1", nil
}
diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go
index 33e87e6e1..97c6930fb 100644
--- a/vendor/github.com/minio/minio-go/v7/functional_tests.go
+++ b/vendor/github.com/minio/minio-go/v7/functional_tests.go
@@ -31,6 +31,7 @@ import (
"hash"
"hash/crc32"
"io"
+ "iter"
"log/slog"
"math/rand"
"mime/multipart"
@@ -259,7 +260,7 @@ func cleanupVersionedBucket(bucketName string, c *minio.Client) error {
}
func isErrNotImplemented(err error) bool {
- return minio.ToErrorResponse(err).Code == "NotImplemented"
+ return minio.ToErrorResponse(err).Code == minio.NotImplemented
}
func isRunOnFail() bool {
@@ -465,8 +466,8 @@ func testMakeBucketError() {
return
}
// Verify valid error response from server.
- if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" &&
- minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" {
+ if minio.ToErrorResponse(err).Code != minio.BucketAlreadyExists &&
+ minio.ToErrorResponse(err).Code != minio.BucketAlreadyOwnedByYou {
logError(testName, function, args, startTime, "", "Invalid error returned by server", err)
return
}
@@ -1073,7 +1074,7 @@ func testPutObjectWithVersioning() {
var results []minio.ObjectInfo
for info := range objectsInfo {
if info.Err != nil {
- logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
+ logError(testName, function, args, startTime, "", "Unexpected error during listing objects", info.Err)
return
}
results = append(results, info)
@@ -3204,7 +3205,7 @@ func testGetObjectAttributesErrorCases() {
}
errorResponse := err.(minio.ErrorResponse)
- if errorResponse.Code != "NoSuchBucket" {
+ if errorResponse.Code != minio.NoSuchBucket {
logError(testName, function, args, startTime, "", "Invalid error code, expected NoSuchBucket but got "+errorResponse.Code, nil)
return
}
@@ -3247,8 +3248,8 @@ func testGetObjectAttributesErrorCases() {
}
errorResponse = err.(minio.ErrorResponse)
- if errorResponse.Code != "NoSuchKey" {
- logError(testName, function, args, startTime, "", "Invalid error code, expected NoSuchKey but got "+errorResponse.Code, nil)
+ if errorResponse.Code != minio.NoSuchKey {
+ logError(testName, function, args, startTime, "", "Invalid error code, expected "+minio.NoSuchKey+" but got "+errorResponse.Code, nil)
return
}
@@ -3272,8 +3273,8 @@ func testGetObjectAttributesErrorCases() {
return
}
errorResponse = err.(minio.ErrorResponse)
- if errorResponse.Code != "NoSuchVersion" {
- logError(testName, function, args, startTime, "", "Invalid error code, expected NoSuchVersion but got "+errorResponse.Code, nil)
+ if errorResponse.Code != minio.NoSuchVersion {
+ logError(testName, function, args, startTime, "", "Invalid error code, expected "+minio.NoSuchVersion+" but got "+errorResponse.Code, nil)
return
}
@@ -3928,10 +3929,10 @@ func testRemoveMultipleObjects() {
defer cleanupBucket(bucketName, c)
- r := bytes.NewReader(bytes.Repeat([]byte("a"), 8))
+ r := bytes.NewReader(bytes.Repeat([]byte("a"), 1))
// Multi remove of 1100 objects
- nrObjects := 200
+ nrObjects := 1100
objectsCh := make(chan minio.ObjectInfo)
@@ -3940,7 +3941,7 @@ func testRemoveMultipleObjects() {
// Upload objects and send them to objectsCh
for i := 0; i < nrObjects; i++ {
objectName := "sample" + strconv.Itoa(i) + ".txt"
- info, err := c.PutObject(context.Background(), bucketName, objectName, r, 8,
+ info, err := c.PutObject(context.Background(), bucketName, objectName, r, 1,
minio.PutObjectOptions{ContentType: "application/octet-stream"})
if err != nil {
logError(testName, function, args, startTime, "", "PutObject failed", err)
@@ -3968,6 +3969,78 @@ func testRemoveMultipleObjects() {
logSuccess(testName, function, args, startTime)
}
+// Test removing multiple objects with Remove API as iterator
+func testRemoveMultipleObjectsIter() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "RemoveObjects(bucketName, objectsCh)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ }
+
+ c, err := NewClient(ClientConfig{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ buf := []byte("a")
+
+ // Multi remove of 1100 objects
+ nrObjects := 1100
+
+ objectsIter := func() iter.Seq[minio.ObjectInfo] {
+ return func(yield func(minio.ObjectInfo) bool) {
+ // Upload objects and send them to objectsCh
+ for i := 0; i < nrObjects; i++ {
+ objectName := "sample" + strconv.Itoa(i) + ".txt"
+ info, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), 1,
+ minio.PutObjectOptions{ContentType: "application/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ continue
+ }
+ if !yield(minio.ObjectInfo{
+ Key: info.Key,
+ VersionID: info.VersionID,
+ }) {
+ return
+ }
+ }
+ }
+ }
+
+ // Call RemoveObjects API
+ results, err := c.RemoveObjectsWithIter(context.Background(), bucketName, objectsIter(), minio.RemoveObjectsOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Unexpected error", err)
+ return
+ }
+
+ for result := range results {
+ if result.Err != nil {
+ logError(testName, function, args, startTime, "", "Unexpected error", result.Err)
+ return
+ }
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
// Test removing multiple objects and check for results
func testRemoveMultipleObjectsWithResult() {
// initialize logging params
@@ -3997,7 +4070,7 @@ func testRemoveMultipleObjectsWithResult() {
defer cleanupVersionedBucket(bucketName, c)
- r := bytes.NewReader(bytes.Repeat([]byte("a"), 8))
+ buf := []byte("a")
nrObjects := 10
nrLockedObjects := 5
@@ -4009,7 +4082,7 @@ func testRemoveMultipleObjectsWithResult() {
// Upload objects and send them to objectsCh
for i := 0; i < nrObjects; i++ {
objectName := "sample" + strconv.Itoa(i) + ".txt"
- info, err := c.PutObject(context.Background(), bucketName, objectName, r, 8,
+ info, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), 1,
minio.PutObjectOptions{ContentType: "application/octet-stream"})
if err != nil {
logError(testName, function, args, startTime, "", "PutObject failed", err)
@@ -7589,7 +7662,7 @@ func testGetObjectModified() {
// Confirm that a Stat() call in between doesn't change the Object's cached etag.
_, err = reader.Stat()
- expectedError := "At least one of the pre-conditions you specified did not hold"
+ expectedError := "At least one of the pre-conditions you specified did not hold."
if err.Error() != expectedError {
logError(testName, function, args, startTime, "", "Expected Stat to fail with error "+expectedError+", but received "+err.Error(), err)
return
@@ -7751,8 +7824,8 @@ func testMakeBucketErrorV2() {
return
}
// Verify valid error response from server.
- if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" &&
- minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" {
+ if minio.ToErrorResponse(err).Code != minio.BucketAlreadyExists &&
+ minio.ToErrorResponse(err).Code != minio.BucketAlreadyOwnedByYou {
logError(testName, function, args, startTime, "", "Invalid error returned by server", err)
return
}
@@ -11415,6 +11488,87 @@ func testPutObject0ByteV2() {
logSuccess(testName, function, args, startTime)
}
+// Test put object with 0 byte object with non-US-ASCII characters.
+func testPutObjectMetadataNonUSASCIIV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PutObject(bucketName, objectName, reader, size, opts)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "size": 0,
+ "opts": "",
+ }
+ metadata := map[string]string{
+ "test-zh": "你好",
+ "test-ja": "こんにちは",
+ "test-ko": "안녕하세요",
+ "test-ru": "Здравствуй",
+ "test-de": "Hallo",
+ "test-it": "Ciao",
+ "test-pt": "Olá",
+ "test-ar": "مرحبا",
+ "test-hi": "नमस्ते",
+ "test-hu": "Helló",
+ "test-ro": "Bună",
+ "test-be": "Прывiтанне",
+ "test-sl": "Pozdravljen",
+ "test-sr": "Здраво",
+ "test-bg": "Здравейте",
+ "test-uk": "Привіт",
+ }
+ c, err := NewClient(ClientConfig{CredsV2: true})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
+ return
+ }
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+
+ objectName := bucketName + "unique"
+ args["objectName"] = objectName
+ args["opts"] = minio.PutObjectOptions{}
+
+ // Upload an object.
+ _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader([]byte("")), 0, minio.PutObjectOptions{
+ UserMetadata: metadata,
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err)
+ return
+ }
+ st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObjectWithSize failed", err)
+ return
+ }
+ if st.Size != 0 {
+ logError(testName, function, args, startTime, "", "Expected upload object size 0 but got "+string(st.Size), err)
+ return
+ }
+
+ for k, v := range metadata {
+ if st.Metadata.Get(http.CanonicalHeaderKey("X-Amz-Meta-"+k)) != v {
+ logError(testName, function, args, startTime, "", "Expected upload object metadata "+k+": "+v+" but got "+st.Metadata.Get("X-Amz-Meta-"+k), err)
+ return
+ }
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
// Test expected error cases
func testComposeObjectErrorCases() {
// initialize logging params
@@ -13557,6 +13711,115 @@ func testRemoveObjects() {
logSuccess(testName, function, args, startTime)
}
+// Test deleting multiple objects with object retention set in Governance mode, via iterators
+func testRemoveObjectsIter() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "RemoveObjects(bucketName, objectsCh, opts)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectPrefix": "",
+ "recursive": "true",
+ }
+
+ c, err := NewClient(ClientConfig{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err)
+ return
+ }
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ bufSize := dataFileMap["datafile-129-MB"]
+ reader := getDataReader("datafile-129-MB")
+ defer reader.Close()
+
+ _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Error uploading object", err)
+ return
+ }
+
+ // Replace with smaller...
+ bufSize = dataFileMap["datafile-10-kB"]
+ reader = getDataReader("datafile-10-kB")
+ defer reader.Close()
+
+ _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Error uploading object", err)
+ }
+
+ t := time.Date(2030, time.April, 25, 14, 0, 0, 0, time.UTC)
+ m := minio.RetentionMode(minio.Governance)
+ opts := minio.PutObjectRetentionOptions{
+ GovernanceBypass: false,
+ RetainUntilDate: &t,
+ Mode: &m,
+ }
+ err = c.PutObjectRetention(context.Background(), bucketName, objectName, opts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Error setting retention", err)
+ return
+ }
+
+ objectsIter := c.ListObjectsIter(context.Background(), bucketName, minio.ListObjectsOptions{
+ WithVersions: true,
+ Recursive: true,
+ })
+ results, err := c.RemoveObjectsWithIter(context.Background(), bucketName, objectsIter, minio.RemoveObjectsOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Error sending delete request", err)
+ return
+ }
+ for result := range results {
+ if result.Err != nil {
+ // Error is expected here because Retention is set on the object
+ // and RemoveObjects is called without Bypass Governance
+ break
+ }
+ logError(testName, function, args, startTime, "", "Expected error during deletion", nil)
+ return
+ }
+
+ objectsIter = c.ListObjectsIter(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Recursive: true})
+ results, err = c.RemoveObjectsWithIter(context.Background(), bucketName, objectsIter, minio.RemoveObjectsOptions{
+ GovernanceBypass: true,
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Error sending delete request", err)
+ return
+ }
+ for result := range results {
+ if result.Err != nil {
+ // Error is not expected here because Retention is set on the object
+ // and RemoveObjects is called with Bypass Governance
+ logError(testName, function, args, startTime, "", "Error detected during deletion", result.Err)
+ return
+ }
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupVersionedBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
// Test get bucket tags
func testGetBucketTagging() {
// initialize logging params
@@ -13585,7 +13848,7 @@ func testGetBucketTagging() {
}
_, err = c.GetBucketTagging(context.Background(), bucketName)
- if minio.ToErrorResponse(err).Code != "NoSuchTagSet" {
+ if minio.ToErrorResponse(err).Code != minio.NoSuchTagSet {
logError(testName, function, args, startTime, "", "Invalid error from server failed", err)
return
}
@@ -13627,7 +13890,7 @@ func testSetBucketTagging() {
}
_, err = c.GetBucketTagging(context.Background(), bucketName)
- if minio.ToErrorResponse(err).Code != "NoSuchTagSet" {
+ if minio.ToErrorResponse(err).Code != minio.NoSuchTagSet {
logError(testName, function, args, startTime, "", "Invalid error from server", err)
return
}
@@ -13699,7 +13962,7 @@ func testRemoveBucketTagging() {
}
_, err = c.GetBucketTagging(context.Background(), bucketName)
- if minio.ToErrorResponse(err).Code != "NoSuchTagSet" {
+ if minio.ToErrorResponse(err).Code != minio.NoSuchTagSet {
logError(testName, function, args, startTime, "", "Invalid error from server", err)
return
}
@@ -13740,7 +14003,7 @@ func testRemoveBucketTagging() {
}
_, err = c.GetBucketTagging(context.Background(), bucketName)
- if minio.ToErrorResponse(err).Code != "NoSuchTagSet" {
+ if minio.ToErrorResponse(err).Code != minio.NoSuchTagSet {
logError(testName, function, args, startTime, "", "Invalid error from server", err)
return
}
@@ -13809,6 +14072,7 @@ func main() {
testPutMultipartObjectWithChecksums(false)
testPutMultipartObjectWithChecksums(true)
testPutObject0ByteV2()
+ testPutObjectMetadataNonUSASCIIV2()
testPutObjectNoLengthV2()
testPutObjectsUnknownV2()
testGetObjectContextV2()
@@ -13826,6 +14090,7 @@ func main() {
testGetObjectS3Zip()
testRemoveMultipleObjects()
testRemoveMultipleObjectsWithResult()
+ testRemoveMultipleObjectsIter()
testFPutObjectMultipart()
testFPutObject()
testGetObjectReadSeekFunctional()
@@ -13852,6 +14117,7 @@ func main() {
testPutObjectWithContentLanguage()
testListObjects()
testRemoveObjects()
+ testRemoveObjectsIter()
testListObjectVersions()
testStatObjectWithVersioning()
testGetObjectWithVersioning()
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go
index 55636ad48..2f7993f4b 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go
@@ -730,6 +730,8 @@ type Metrics struct {
Errors TimedErrStats `json:"failed,omitempty"`
// Total number of entries that are queued for replication
QStats InQueueMetric `json:"queued"`
+ // Total number of entries that have replication in progress
+ InProgress InProgressMetric `json:"inProgress"`
// Deprecated fields
// Total Pending size in bytes across targets
PendingSize uint64 `json:"pendingReplicationSize,omitempty"`
@@ -830,6 +832,9 @@ type InQueueMetric struct {
Max QStat `json:"peak" msg:"pq"`
}
+// InProgressMetric holds stats for objects with replication in progress
+type InProgressMetric InQueueMetric
+
// MetricName name of replication metric
type MetricName string
@@ -849,6 +854,14 @@ type WorkerStat struct {
Max int32 `json:"max"`
}
+// TgtHealth holds health status of a target
+type TgtHealth struct {
+ Online bool `json:"online"`
+ LastOnline time.Time `json:"lastOnline"`
+ TotalDowntime time.Duration `json:"totalDowntime"`
+ OfflineCount int64 `json:"offlineCount"`
+}
+
// ReplMRFStats holds stats of MRF backlog saved to disk in the last 5 minutes
// and number of entries that failed replication after 3 retries
type ReplMRFStats struct {
@@ -863,15 +876,18 @@ type ReplMRFStats struct {
type ReplQNodeStats struct {
NodeName string `json:"nodeName"`
Uptime int64 `json:"uptime"`
- Workers WorkerStat `json:"activeWorkers"`
+ Workers WorkerStat `json:"workers"`
XferStats map[MetricName]XferStats `json:"transferSummary"`
TgtXferStats map[string]map[MetricName]XferStats `json:"tgtTransferStats"`
- QStats InQueueMetric `json:"queueStats"`
- MRFStats ReplMRFStats `json:"mrfStats"`
- Retries CounterSummary `json:"retries"`
- Errors CounterSummary `json:"errors"`
+ QStats InQueueMetric `json:"queueStats"`
+ InProgressStats InProgressMetric `json:"progressStats"`
+
+ MRFStats ReplMRFStats `json:"mrfStats"`
+ Retries CounterSummary `json:"retries"`
+ Errors CounterSummary `json:"errors"`
+ TgtHealth map[string]TgtHealth `json:"tgtHealth,omitempty"`
}
// CounterSummary denotes the stats counter summary
@@ -918,6 +934,19 @@ func (q ReplQueueStats) qStatSummary() InQueueMetric {
return m
}
+// inProgressSummary returns cluster level stats for objects with replication in progress
+func (q ReplQueueStats) inProgressSummary() InProgressMetric {
+ m := InProgressMetric{}
+ for _, v := range q.Nodes {
+ m.Avg.Add(v.InProgressStats.Avg)
+ m.Curr.Add(v.InProgressStats.Curr)
+ if m.Max.Count < v.InProgressStats.Max.Count {
+ m.Max.Add(v.InProgressStats.Max)
+ }
+ }
+ return m
+}
+
// ReplQStats holds stats for objects in replication queue
type ReplQStats struct {
Uptime int64 `json:"uptime"`
@@ -926,7 +955,9 @@ type ReplQStats struct {
XferStats map[MetricName]XferStats `json:"xferStats"`
TgtXferStats map[string]map[MetricName]XferStats `json:"tgtXferStats"`
- QStats InQueueMetric `json:"qStats"`
+ QStats InQueueMetric `json:"qStats"`
+ InProgressStats InProgressMetric `json:"progressStats"`
+
MRFStats ReplMRFStats `json:"mrfStats"`
Retries CounterSummary `json:"retries"`
Errors CounterSummary `json:"errors"`
@@ -935,10 +966,10 @@ type ReplQStats struct {
// QStats returns cluster level stats for objects in replication queue
func (q ReplQueueStats) QStats() (r ReplQStats) {
r.QStats = q.qStatSummary()
+ r.InProgressStats = q.inProgressSummary()
r.XferStats = make(map[MetricName]XferStats)
r.TgtXferStats = make(map[string]map[MetricName]XferStats)
r.Workers = q.Workers()
-
for _, node := range q.Nodes {
for arn := range node.TgtXferStats {
xmap, ok := node.TgtXferStats[arn]
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/utils/peek-reader-closer.go b/vendor/github.com/minio/minio-go/v7/pkg/utils/peek-reader-closer.go
new file mode 100644
index 000000000..d6f674fac
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/utils/peek-reader-closer.go
@@ -0,0 +1,73 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2025 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package utils
+
+import (
+ "bytes"
+ "errors"
+ "io"
+)
+
+// PeekReadCloser offers a way to peek a ReadCloser stream and then
+// return the exact stream of the underlying ReadCloser
+type PeekReadCloser struct {
+ io.ReadCloser
+
+ recordMode bool
+ recordMaxBuf int
+ recordBuf *bytes.Buffer
+}
+
+// ReplayFromStart ensures next Read() will restart to stream the
+// underlying ReadCloser stream from the beginning
+func (prc *PeekReadCloser) ReplayFromStart() {
+ prc.recordMode = false
+}
+
+func (prc *PeekReadCloser) Read(p []byte) (int, error) {
+ if prc.recordMode {
+ if prc.recordBuf.Len() > prc.recordMaxBuf {
+ return 0, errors.New("maximum peek buffer exceeded")
+ }
+ n, err := prc.ReadCloser.Read(p)
+ prc.recordBuf.Write(p[:n])
+ return n, err
+ }
+ // Replay mode
+ if prc.recordBuf.Len() > 0 {
+ pn, _ := prc.recordBuf.Read(p)
+ return pn, nil
+ }
+ return prc.ReadCloser.Read(p)
+}
+
+// Close releases the record buffer memory and close the underlying ReadCloser
+func (prc *PeekReadCloser) Close() error {
+ prc.recordBuf.Reset()
+ return prc.ReadCloser.Close()
+}
+
+// NewPeekReadCloser returns a new peek reader
+func NewPeekReadCloser(rc io.ReadCloser, maxBufSize int) *PeekReadCloser {
+ return &PeekReadCloser{
+ ReadCloser: rc,
+ recordMode: true, // recording mode by default
+ recordBuf: bytes.NewBuffer(make([]byte, 0, 1024)),
+ recordMaxBuf: maxBufSize,
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/v7/post-policy.go b/vendor/github.com/minio/minio-go/v7/post-policy.go
index 26bf441b5..e2c24b60a 100644
--- a/vendor/github.com/minio/minio-go/v7/post-policy.go
+++ b/vendor/github.com/minio/minio-go/v7/post-policy.go
@@ -161,7 +161,7 @@ func (p *PostPolicy) SetTagging(tagging string) error {
}
_, err := tags.ParseObjectXML(strings.NewReader(tagging))
if err != nil {
- return errors.New("The XML you provided was not well-formed or did not validate against our published schema.") //nolint
+ return errors.New(s3ErrorResponseMap[MalformedXML]) //nolint
}
policyCond := policyCondition{
matchType: "eq",
diff --git a/vendor/github.com/minio/minio-go/v7/retry.go b/vendor/github.com/minio/minio-go/v7/retry.go
index b83d1b2e5..59c7a163d 100644
--- a/vendor/github.com/minio/minio-go/v7/retry.go
+++ b/vendor/github.com/minio/minio-go/v7/retry.go
@@ -104,6 +104,8 @@ var retryableS3Codes = map[string]struct{}{
"ExpiredToken": {},
"ExpiredTokenException": {},
"SlowDown": {},
+ "SlowDownWrite": {},
+ "SlowDownRead": {},
// Add more AWS S3 codes here.
}
diff --git a/vendor/github.com/minio/minio-go/v7/s3-error.go b/vendor/github.com/minio/minio-go/v7/s3-error.go
index f7fad19f6..4bcc47d80 100644
--- a/vendor/github.com/minio/minio-go/v7/s3-error.go
+++ b/vendor/github.com/minio/minio-go/v7/s3-error.go
@@ -17,46 +17,100 @@
package minio
+// Constants for error keys
+const (
+ NoSuchBucket = "NoSuchBucket"
+ NoSuchKey = "NoSuchKey"
+ NoSuchUpload = "NoSuchUpload"
+ AccessDenied = "AccessDenied"
+ Conflict = "Conflict"
+ PreconditionFailed = "PreconditionFailed"
+ InvalidArgument = "InvalidArgument"
+ EntityTooLarge = "EntityTooLarge"
+ EntityTooSmall = "EntityTooSmall"
+ UnexpectedEOF = "UnexpectedEOF"
+ APINotSupported = "APINotSupported"
+ InvalidRegion = "InvalidRegion"
+ NoSuchBucketPolicy = "NoSuchBucketPolicy"
+ BadDigest = "BadDigest"
+ IncompleteBody = "IncompleteBody"
+ InternalError = "InternalError"
+ InvalidAccessKeyID = "InvalidAccessKeyId"
+ InvalidBucketName = "InvalidBucketName"
+ InvalidDigest = "InvalidDigest"
+ InvalidRange = "InvalidRange"
+ MalformedXML = "MalformedXML"
+ MissingContentLength = "MissingContentLength"
+ MissingContentMD5 = "MissingContentMD5"
+ MissingRequestBodyError = "MissingRequestBodyError"
+ NotImplemented = "NotImplemented"
+ RequestTimeTooSkewed = "RequestTimeTooSkewed"
+ SignatureDoesNotMatch = "SignatureDoesNotMatch"
+ MethodNotAllowed = "MethodNotAllowed"
+ InvalidPart = "InvalidPart"
+ InvalidPartOrder = "InvalidPartOrder"
+ InvalidObjectState = "InvalidObjectState"
+ AuthorizationHeaderMalformed = "AuthorizationHeaderMalformed"
+ MalformedPOSTRequest = "MalformedPOSTRequest"
+ BucketNotEmpty = "BucketNotEmpty"
+ AllAccessDisabled = "AllAccessDisabled"
+ MalformedPolicy = "MalformedPolicy"
+ MissingFields = "MissingFields"
+ AuthorizationQueryParametersError = "AuthorizationQueryParametersError"
+ MalformedDate = "MalformedDate"
+ BucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou"
+ InvalidDuration = "InvalidDuration"
+ XAmzContentSHA256Mismatch = "XAmzContentSHA256Mismatch"
+ XMinioInvalidObjectName = "XMinioInvalidObjectName"
+ NoSuchCORSConfiguration = "NoSuchCORSConfiguration"
+ BucketAlreadyExists = "BucketAlreadyExists"
+ NoSuchVersion = "NoSuchVersion"
+ NoSuchTagSet = "NoSuchTagSet"
+ Testing = "Testing"
+ Success = "Success"
+)
+
// Non exhaustive list of AWS S3 standard error responses -
// http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
var s3ErrorResponseMap = map[string]string{
- "AccessDenied": "Access Denied.",
- "BadDigest": "The Content-Md5 you specified did not match what we received.",
- "EntityTooSmall": "Your proposed upload is smaller than the minimum allowed object size.",
- "EntityTooLarge": "Your proposed upload exceeds the maximum allowed object size.",
- "IncompleteBody": "You did not provide the number of bytes specified by the Content-Length HTTP header.",
- "InternalError": "We encountered an internal error, please try again.",
- "InvalidAccessKeyId": "The access key ID you provided does not exist in our records.",
- "InvalidBucketName": "The specified bucket is not valid.",
- "InvalidDigest": "The Content-Md5 you specified is not valid.",
- "InvalidRange": "The requested range is not satisfiable",
- "MalformedXML": "The XML you provided was not well-formed or did not validate against our published schema.",
- "MissingContentLength": "You must provide the Content-Length HTTP header.",
- "MissingContentMD5": "Missing required header for this request: Content-Md5.",
- "MissingRequestBodyError": "Request body is empty.",
- "NoSuchBucket": "The specified bucket does not exist.",
- "NoSuchBucketPolicy": "The bucket policy does not exist",
- "NoSuchKey": "The specified key does not exist.",
- "NoSuchUpload": "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.",
- "NotImplemented": "A header you provided implies functionality that is not implemented",
- "PreconditionFailed": "At least one of the pre-conditions you specified did not hold",
- "RequestTimeTooSkewed": "The difference between the request time and the server's time is too large.",
- "SignatureDoesNotMatch": "The request signature we calculated does not match the signature you provided. Check your key and signing method.",
- "MethodNotAllowed": "The specified method is not allowed against this resource.",
- "InvalidPart": "One or more of the specified parts could not be found.",
- "InvalidPartOrder": "The list of parts was not in ascending order. The parts list must be specified in order by part number.",
- "InvalidObjectState": "The operation is not valid for the current state of the object.",
- "AuthorizationHeaderMalformed": "The authorization header is malformed; the region is wrong.",
- "MalformedPOSTRequest": "The body of your POST request is not well-formed multipart/form-data.",
- "BucketNotEmpty": "The bucket you tried to delete is not empty",
- "AllAccessDisabled": "All access to this bucket has been disabled.",
- "MalformedPolicy": "Policy has invalid resource.",
- "MissingFields": "Missing fields in request.",
- "AuthorizationQueryParametersError": "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"<YOUR-AKID>/YYYYMMDD/REGION/SERVICE/aws4_request\".",
- "MalformedDate": "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.",
- "BucketAlreadyOwnedByYou": "Your previous request to create the named bucket succeeded and you already own it.",
- "InvalidDuration": "Duration provided in the request is invalid.",
- "XAmzContentSHA256Mismatch": "The provided 'x-amz-content-sha256' header does not match what was computed.",
- "NoSuchCORSConfiguration": "The specified bucket does not have a CORS configuration.",
+ AccessDenied: "Access Denied.",
+ BadDigest: "The Content-Md5 you specified did not match what we received.",
+ EntityTooSmall: "Your proposed upload is smaller than the minimum allowed object size.",
+ EntityTooLarge: "Your proposed upload exceeds the maximum allowed object size.",
+ IncompleteBody: "You did not provide the number of bytes specified by the Content-Length HTTP header.",
+ InternalError: "We encountered an internal error, please try again.",
+ InvalidAccessKeyID: "The access key ID you provided does not exist in our records.",
+ InvalidBucketName: "The specified bucket is not valid.",
+ InvalidDigest: "The Content-Md5 you specified is not valid.",
+ InvalidRange: "The requested range is not satisfiable.",
+ MalformedXML: "The XML you provided was not well-formed or did not validate against our published schema.",
+ MissingContentLength: "You must provide the Content-Length HTTP header.",
+ MissingContentMD5: "Missing required header for this request: Content-Md5.",
+ MissingRequestBodyError: "Request body is empty.",
+ NoSuchBucket: "The specified bucket does not exist.",
+ NoSuchBucketPolicy: "The bucket policy does not exist.",
+ NoSuchKey: "The specified key does not exist.",
+ NoSuchUpload: "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.",
+ NotImplemented: "A header you provided implies functionality that is not implemented.",
+ PreconditionFailed: "At least one of the pre-conditions you specified did not hold.",
+ RequestTimeTooSkewed: "The difference between the request time and the server's time is too large.",
+ SignatureDoesNotMatch: "The request signature we calculated does not match the signature you provided. Check your key and signing method.",
+ MethodNotAllowed: "The specified method is not allowed against this resource.",
+ InvalidPart: "One or more of the specified parts could not be found.",
+ InvalidPartOrder: "The list of parts was not in ascending order. The parts list must be specified in order by part number.",
+ InvalidObjectState: "The operation is not valid for the current state of the object.",
+ AuthorizationHeaderMalformed: "The authorization header is malformed; the region is wrong.",
+ MalformedPOSTRequest: "The body of your POST request is not well-formed multipart/form-data.",
+ BucketNotEmpty: "The bucket you tried to delete is not empty.",
+ AllAccessDisabled: "All access to this bucket has been disabled.",
+ MalformedPolicy: "Policy has invalid resource.",
+ MissingFields: "Missing fields in request.",
+ AuthorizationQueryParametersError: "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"<YOUR-AKID>/YYYYMMDD/REGION/SERVICE/aws4_request\".",
+ MalformedDate: "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.",
+ BucketAlreadyOwnedByYou: "Your previous request to create the named bucket succeeded and you already own it.",
+ InvalidDuration: "Duration provided in the request is invalid.",
+ XAmzContentSHA256Mismatch: "The provided 'x-amz-content-sha256' header does not match what was computed.",
+ NoSuchCORSConfiguration: "The specified bucket does not have a CORS configuration.",
+ Conflict: "Bucket not empty.",
// Add new API errors here.
}
diff --git a/vendor/github.com/minio/minio-go/v7/utils.go b/vendor/github.com/minio/minio-go/v7/utils.go
index 6024bfa5b..cc96005b9 100644
--- a/vendor/github.com/minio/minio-go/v7/utils.go
+++ b/vendor/github.com/minio/minio-go/v7/utils.go
@@ -30,6 +30,7 @@ import (
"hash"
"io"
"math/rand"
+ "mime"
"net"
"net/http"
"net/url"
@@ -210,6 +211,7 @@ func extractObjMetadata(header http.Header) http.Header {
"X-Amz-Server-Side-Encryption",
"X-Amz-Tagging-Count",
"X-Amz-Meta-",
+ "X-Minio-Meta-",
// Add new headers to be preserved.
// if you add new headers here, please extend
// PutObjectOptions{} to preserve them
@@ -223,6 +225,16 @@ func extractObjMetadata(header http.Header) http.Header {
continue
}
found = true
+ if prefix == "X-Amz-Meta-" || prefix == "X-Minio-Meta-" {
+ for index, val := range v {
+ if strings.HasPrefix(val, "=?") {
+ decoder := mime.WordDecoder{}
+ if decoded, err := decoder.DecodeHeader(val); err == nil {
+ v[index] = decoded
+ }
+ }
+ }
+ }
break
}
if found {
@@ -268,7 +280,7 @@ func ToObjectInfo(bucketName, objectName string, h http.Header) (ObjectInfo, err
if err != nil {
// Content-Length is not valid
return ObjectInfo{}, ErrorResponse{
- Code: "InternalError",
+ Code: InternalError,
Message: fmt.Sprintf("Content-Length is not an integer, failed with %v", err),
BucketName: bucketName,
Key: objectName,
@@ -283,7 +295,7 @@ func ToObjectInfo(bucketName, objectName string, h http.Header) (ObjectInfo, err
mtime, err := parseRFC7231Time(h.Get("Last-Modified"))
if err != nil {
return ObjectInfo{}, ErrorResponse{
- Code: "InternalError",
+ Code: InternalError,
Message: fmt.Sprintf("Last-Modified time format is invalid, failed with %v", err),
BucketName: bucketName,
Key: objectName,
@@ -305,7 +317,7 @@ func ToObjectInfo(bucketName, objectName string, h http.Header) (ObjectInfo, err
expiry, err = parseRFC7231Time(expiryStr)
if err != nil {
return ObjectInfo{}, ErrorResponse{
- Code: "InternalError",
+ Code: InternalError,
Message: fmt.Sprintf("'Expiry' is not in supported format: %v", err),
BucketName: bucketName,
Key: objectName,
@@ -327,7 +339,7 @@ func ToObjectInfo(bucketName, objectName string, h http.Header) (ObjectInfo, err
userTags, err := tags.ParseObjectTags(h.Get(amzTaggingHeader))
if err != nil {
return ObjectInfo{}, ErrorResponse{
- Code: "InternalError",
+ Code: InternalError,
}
}
@@ -336,7 +348,7 @@ func ToObjectInfo(bucketName, objectName string, h http.Header) (ObjectInfo, err
tagCount, err = strconv.Atoi(count)
if err != nil {
return ObjectInfo{}, ErrorResponse{
- Code: "InternalError",
+ Code: InternalError,
Message: fmt.Sprintf("x-amz-tagging-count is not an integer, failed with %v", err),
BucketName: bucketName,
Key: objectName,