From c00cad2cebcb8136a998f6f7ba2c27672f785d10 Mon Sep 17 00:00:00 2001 From: kim Date: Tue, 22 Jul 2025 18:00:27 +0200 Subject: [chore] bump dependencies (#4339) - github.com/KimMachineGun/automemlimit v0.7.4 - github.com/miekg/dns v1.1.67 - github.com/minio/minio-go/v7 v7.0.95 - github.com/spf13/pflag v1.0.7 - github.com/tdewolff/minify/v2 v2.23.9 - github.com/uptrace/bun v1.2.15 - github.com/uptrace/bun/dialect/pgdialect v1.2.15 - github.com/uptrace/bun/dialect/sqlitedialect v1.2.15 - github.com/uptrace/bun/extra/bunotel v1.2.15 - golang.org/x/image v0.29.0 - golang.org/x/net v0.42.0 Reviewed-on: https://codeberg.org/superseriousbusiness/gotosocial/pulls/4339 Co-authored-by: kim Co-committed-by: kim --- .../minio/minio-go/v7/api-append-object.go | 8 +- .../minio/minio-go/v7/api-put-object-multipart.go | 10 +- .../minio/minio-go/v7/api-put-object-streaming.go | 68 +++---- .../github.com/minio/minio-go/v7/api-put-object.go | 48 +++-- vendor/github.com/minio/minio-go/v7/api.go | 7 +- vendor/github.com/minio/minio-go/v7/checksum.go | 26 ++- .../minio/minio-go/v7/functional_tests.go | 215 ++++++++++++++++++--- vendor/github.com/minio/minio-go/v7/hook-reader.go | 2 +- .../v7/pkg/credentials/sts_tls_identity.go | 5 +- .../minio-go/v7/pkg/peeker/peek-reader-closer.go | 73 +++++++ .../minio-go/v7/pkg/utils/peek-reader-closer.go | 73 ------- vendor/github.com/minio/minio-go/v7/post-policy.go | 2 +- 12 files changed, 358 insertions(+), 179 deletions(-) create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/peeker/peek-reader-closer.go delete mode 100644 vendor/github.com/minio/minio-go/v7/pkg/utils/peek-reader-closer.go (limited to 'vendor/github.com/minio/minio-go') diff --git a/vendor/github.com/minio/minio-go/v7/api-append-object.go b/vendor/github.com/minio/minio-go/v7/api-append-object.go index fca08c373..b1bddf986 100644 --- a/vendor/github.com/minio/minio-go/v7/api-append-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-append-object.go @@ -127,6 +127,10 @@ func (c *Client) appendObjectDo(ctx context.Context, bucketName, objectName stri if opts.checksumType.IsSet() { reqMetadata.addCrc = &opts.checksumType + reqMetadata.customHeader.Set(amzChecksumAlgo, opts.checksumType.String()) + if opts.checksumType.FullObjectRequested() { + reqMetadata.customHeader.Set(amzChecksumMode, ChecksumFullObjectMode.String()) + } } // Execute PUT an objectName. @@ -183,8 +187,8 @@ func (c *Client) AppendObject(ctx context.Context, bucketName, objectName string if err != nil { return UploadInfo{}, err } - if oinfo.ChecksumMode != ChecksumFullObjectMode.String() { - return UploadInfo{}, fmt.Errorf("append API is not allowed on objects that are not full_object checksum type: %s", oinfo.ChecksumMode) + if oinfo.ChecksumMode != "" && oinfo.ChecksumMode != ChecksumFullObjectMode.String() { + return UploadInfo{}, fmt.Errorf("Append() is not allowed on objects that are not of FULL_OBJECT checksum type: %s", oinfo.ChecksumMode) } opts.setChecksumParams(oinfo) // set the appropriate checksum params based on the existing object checksum metadata. opts.setWriteOffset(oinfo.Size) // First append must set the current object size as the offset. diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go index 844172324..6a3e9f092 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go @@ -82,16 +82,12 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj // avoid sha256 with non-v4 signature request or // HTTPS connection. hashAlgos, hashSums := c.hashMaterials(opts.SendContentMd5, !opts.DisableContentSha256) - if len(hashSums) == 0 { - addAutoChecksumHeaders(&opts) - } // Initiate a new multipart upload. uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) if err != nil { return UploadInfo{}, err } - delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm") defer func() { if err != nil { @@ -145,11 +141,15 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj if hashSums["sha256"] != nil { sha256Hex = hex.EncodeToString(hashSums["sha256"]) } - if len(hashSums) == 0 { + if opts.AutoChecksum.IsSet() { crc.Reset() crc.Write(buf[:length]) cSum := crc.Sum(nil) customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum)) + customHeader.Set(amzChecksumAlgo, opts.AutoChecksum.String()) + if opts.AutoChecksum.FullObjectRequested() { + customHeader.Set(amzChecksumMode, ChecksumFullObjectMode.String()) + } } p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: rd, partNumber: partNumber, md5Base64: md5Base64, sha256Hex: sha256Hex, size: int64(length), sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader} diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go index 4a7243edc..db5314d5f 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go @@ -108,19 +108,14 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN if err != nil { return UploadInfo{}, err } - if opts.Checksum.IsSet() { - opts.AutoChecksum = opts.Checksum - } - withChecksum := c.trailingHeaderSupport - if withChecksum { - addAutoChecksumHeaders(&opts) - } + // Initiate a new multipart upload. uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) if err != nil { return UploadInfo{}, err } - delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm") + + withChecksum := c.trailingHeaderSupport // Aborts the multipart upload in progress, if the // function returns any error, since we do not resume @@ -297,15 +292,6 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b return UploadInfo{}, err } - if opts.Checksum.IsSet() { - opts.AutoChecksum = opts.Checksum - opts.SendContentMd5 = false - } - - if !opts.SendContentMd5 { - addAutoChecksumHeaders(&opts) - } - // Calculate the optimal parts info for a given size. totalPartsCount, partSize, lastPartSize, err := OptimalPartInfo(size, opts.PartSize) if err != nil { @@ -316,7 +302,6 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b if err != nil { return UploadInfo{}, err } - delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm") // Aborts the multipart upload if the function returns // any error, since we do not resume we should purge @@ -369,12 +354,18 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b md5Hash.Reset() md5Hash.Write(buf[:length]) md5Base64 = base64.StdEncoding.EncodeToString(md5Hash.Sum(nil)) - } else { + } + + if opts.AutoChecksum.IsSet() { // Add CRC32C instead. crc.Reset() crc.Write(buf[:length]) cSum := crc.Sum(nil) - customHeader.Set(opts.AutoChecksum.KeyCapitalized(), base64.StdEncoding.EncodeToString(cSum)) + customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum)) + customHeader.Set(amzChecksumAlgo, opts.AutoChecksum.String()) + if opts.AutoChecksum.FullObjectRequested() { + customHeader.Set(amzChecksumMode, ChecksumFullObjectMode.String()) + } } // Update progress reader appropriately to the latest offset @@ -453,13 +444,6 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam if err = s3utils.CheckValidObjectName(objectName); err != nil { return UploadInfo{}, err } - if opts.Checksum.IsSet() { - opts.SendContentMd5 = false - opts.AutoChecksum = opts.Checksum - } - if !opts.SendContentMd5 { - addAutoChecksumHeaders(&opts) - } // Cancel all when an error occurs. ctx, cancel := context.WithCancel(ctx) @@ -476,7 +460,6 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam if err != nil { return UploadInfo{}, err } - delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm") // Aborts the multipart upload if the function returns // any error, since we do not resume we should purge @@ -541,18 +524,22 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam return UploadInfo{}, rerr } - // Calculate md5sum. - customHeader := make(http.Header) - if !opts.SendContentMd5 { - // Add Checksum instead. - crc.Reset() - crc.Write(buf[:length]) - cSum := crc.Sum(nil) - customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum)) - } - wg.Add(1) go func(partNumber int) { + // Calculate md5sum. + customHeader := make(http.Header) + if opts.AutoChecksum.IsSet() { + // Add Checksum instead. + crc.Reset() + crc.Write(buf[:length]) + cSum := crc.Sum(nil) + customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum)) + customHeader.Set(amzChecksumAlgo, opts.AutoChecksum.String()) + if opts.AutoChecksum.FullObjectRequested() { + customHeader.Set(amzChecksumMode, ChecksumFullObjectMode.String()) + } + } + // Avoid declaring variables in the for loop var md5Base64 string @@ -664,9 +651,6 @@ func (c *Client) putObject(ctx context.Context, bucketName, objectName string, r if opts.SendContentMd5 && s3utils.IsGoogleEndpoint(*c.endpointURL) && size < 0 { return UploadInfo{}, errInvalidArgument("MD5Sum cannot be calculated with size '-1'") } - if opts.Checksum.IsSet() { - opts.SendContentMd5 = false - } var readSeeker io.Seeker if size > 0 { @@ -759,7 +743,7 @@ func (c *Client) putObjectDo(ctx context.Context, bucketName, objectName string, } } if addCrc { - opts.AutoChecksum.SetDefault(ChecksumCRC32C) + opts.AutoChecksum.SetDefault(ChecksumFullObjectCRC32C) reqMetadata.addCrc = &opts.AutoChecksum } } diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object.go b/vendor/github.com/minio/minio-go/v7/api-put-object.go index ce4834790..877cecb84 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object.go @@ -26,6 +26,7 @@ import ( "io" "net/http" "sort" + "strings" "time" "github.com/minio/minio-go/v7/pkg/encrypt" @@ -267,7 +268,16 @@ func (opts PutObjectOptions) validate(c *Client) (err error) { if opts.LegalHold != "" && !opts.LegalHold.IsValid() { return errInvalidArgument(opts.LegalHold.String() + " unsupported legal-hold status") } - if opts.Checksum.IsSet() { + + checkCrc := false + for k := range opts.UserMetadata { + if strings.HasPrefix(k, "x-amz-checksum-") { + checkCrc = true + break + } + } + + if opts.Checksum.IsSet() || checkCrc { switch { case !c.trailingHeaderSupport: return errInvalidArgument("Checksum requires Client with TrailingHeaders enabled") @@ -307,10 +317,10 @@ func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].Part // be reused for best outcomes for PutObject(), pass the size always. // // NOTE: Upon errors during upload multipart operation is entirely aborted. -func (c *Client) PutObject(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64, +func (c *Client) PutObject(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions, ) (info UploadInfo, err error) { - if objectSize < 0 && opts.DisableMultipart { + if size < 0 && opts.DisableMultipart { return UploadInfo{}, errors.New("object size must be provided with disable multipart upload") } @@ -319,15 +329,20 @@ func (c *Client) PutObject(ctx context.Context, bucketName, objectName string, r return UploadInfo{}, err } - return c.putObjectCommon(ctx, bucketName, objectName, reader, objectSize, opts) -} - -func (c *Client) putObjectCommon(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) { // Check for largest object size allowed. if size > int64(maxMultipartPutObjectSize) { return UploadInfo{}, errEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName) } - opts.AutoChecksum.SetDefault(ChecksumCRC32C) + + if opts.Checksum.IsSet() { + opts.AutoChecksum = opts.Checksum + opts.SendContentMd5 = false + } + + if c.trailingHeaderSupport { + opts.AutoChecksum.SetDefault(ChecksumCRC32C) + addAutoChecksumHeaders(&opts) + } // NOTE: Streaming signature is not supported by GCS. if s3utils.IsGoogleEndpoint(*c.endpointURL) { @@ -385,20 +400,11 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam return UploadInfo{}, err } - if opts.Checksum.IsSet() { - opts.SendContentMd5 = false - opts.AutoChecksum = opts.Checksum - } - if !opts.SendContentMd5 { - addAutoChecksumHeaders(&opts) - } - // Initiate a new multipart upload. uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) if err != nil { return UploadInfo{}, err } - delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm") defer func() { if err != nil { @@ -437,11 +443,17 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam hash.Write(buf[:length]) md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil)) hash.Close() - } else { + } + + if opts.AutoChecksum.IsSet() { crc.Reset() crc.Write(buf[:length]) cSum := crc.Sum(nil) customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum)) + customHeader.Set(amzChecksumAlgo, opts.AutoChecksum.String()) + if opts.AutoChecksum.FullObjectRequested() { + customHeader.Set(amzChecksumMode, ChecksumFullObjectMode.String()) + } } // Update progress reader appropriately to the latest offset diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go index 10a12ccfa..53ef6b85a 100644 --- a/vendor/github.com/minio/minio-go/v7/api.go +++ b/vendor/github.com/minio/minio-go/v7/api.go @@ -43,12 +43,11 @@ import ( md5simd "github.com/minio/md5-simd" "github.com/minio/minio-go/v7/pkg/credentials" "github.com/minio/minio-go/v7/pkg/kvcache" + "github.com/minio/minio-go/v7/pkg/peeker" "github.com/minio/minio-go/v7/pkg/s3utils" "github.com/minio/minio-go/v7/pkg/signer" "github.com/minio/minio-go/v7/pkg/singleflight" "golang.org/x/net/publicsuffix" - - internalutils "github.com/minio/minio-go/v7/pkg/utils" ) // Client implements Amazon S3 compatible methods. @@ -163,7 +162,7 @@ type Options struct { // Global constants. const ( libraryName = "minio-go" - libraryVersion = "v7.0.94" + libraryVersion = "v7.0.95" ) // User Agent should always following the below style. @@ -625,7 +624,7 @@ func (c *Client) do(req *http.Request) (resp *http.Response, err error) { // - Return the error XML bytes if an error is found // - Make sure to always restablish the whole http response stream before returning func tryParseErrRespFromBody(resp *http.Response) ([]byte, error) { - peeker := internalutils.NewPeekReadCloser(resp.Body, 5*humanize.MiByte) + peeker := peeker.NewPeekReadCloser(resp.Body, 5*humanize.MiByte) defer func() { peeker.ReplayFromStart() resp.Body = peeker diff --git a/vendor/github.com/minio/minio-go/v7/checksum.go b/vendor/github.com/minio/minio-go/v7/checksum.go index 2fd94b5e0..0691c1fbb 100644 --- a/vendor/github.com/minio/minio-go/v7/checksum.go +++ b/vendor/github.com/minio/minio-go/v7/checksum.go @@ -29,6 +29,7 @@ import ( "math/bits" "net/http" "sort" + "strings" "github.com/minio/crc64nvme" ) @@ -432,9 +433,19 @@ func addAutoChecksumHeaders(opts *PutObjectOptions) { if opts.UserMetadata == nil { opts.UserMetadata = make(map[string]string, 1) } - opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String() - if opts.AutoChecksum.FullObjectRequested() { - opts.UserMetadata[amzChecksumMode] = ChecksumFullObjectMode.String() + + addChecksum := true + for k := range opts.UserMetadata { + if strings.HasPrefix(strings.ToLower(k), "x-amz-checksum-") { + addChecksum = false + } + } + + if addChecksum && opts.AutoChecksum.IsSet() { + opts.UserMetadata[amzChecksumAlgo] = opts.AutoChecksum.String() + if opts.AutoChecksum.FullObjectRequested() { + opts.UserMetadata[amzChecksumMode] = ChecksumFullObjectMode.String() + } } } @@ -446,14 +457,17 @@ func applyAutoChecksum(opts *PutObjectOptions, allParts []ObjectPart) { // Add composite hash of hashes. crc, err := opts.AutoChecksum.CompositeChecksum(allParts) if err == nil { - opts.UserMetadata = map[string]string{opts.AutoChecksum.Key(): crc.Encoded()} + opts.UserMetadata = map[string]string{ + opts.AutoChecksum.Key(): crc.Encoded(), + amzChecksumMode: ChecksumCompositeMode.String(), + } } } else if opts.AutoChecksum.CanMergeCRC() { crc, err := opts.AutoChecksum.FullObjectChecksum(allParts) if err == nil { opts.UserMetadata = map[string]string{ - opts.AutoChecksum.KeyCapitalized(): crc.Encoded(), - amzChecksumMode: ChecksumFullObjectMode.String(), + opts.AutoChecksum.Key(): crc.Encoded(), + amzChecksumMode: ChecksumFullObjectMode.String(), } } } diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go index 97c6930fb..3ade9a6af 100644 --- a/vendor/github.com/minio/minio-go/v7/functional_tests.go +++ b/vendor/github.com/minio/minio-go/v7/functional_tests.go @@ -1970,7 +1970,7 @@ func testPutObjectWithChecksums() { // initialize logging params startTime := time.Now() testName := getFuncName() - function := "PutObject(bucketName, objectName, reader,size, opts)" + function := "PutObject(bucketName, objectName, reader, size, opts)" args := map[string]interface{}{ "bucketName": "", "objectName": "", @@ -1982,7 +1982,7 @@ func testPutObjectWithChecksums() { return } - c, err := NewClient(ClientConfig{}) + c, err := NewClient(ClientConfig{TrailingHeaders: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return @@ -2037,6 +2037,10 @@ func testPutObjectWithChecksums() { h := test.cs.Hasher() h.Reset() + if test.cs.IsSet() { + meta["x-amz-checksum-algorithm"] = test.cs.String() + } + // Test with a bad CRC - we haven't called h.Write(b), so this is a checksum of empty data meta[test.cs.Key()] = base64.StdEncoding.EncodeToString(h.Sum(nil)) args["metadata"] = meta @@ -2323,7 +2327,7 @@ func testPutObjectWithTrailingChecksums() { } // Test PutObject with custom checksums. -func testPutMultipartObjectWithChecksums(trailing bool) { +func testPutMultipartObjectWithChecksums() { // initialize logging params startTime := time.Now() testName := getFuncName() @@ -2331,7 +2335,7 @@ func testPutMultipartObjectWithChecksums(trailing bool) { args := map[string]interface{}{ "bucketName": "", "objectName": "", - "opts": fmt.Sprintf("minio.PutObjectOptions{UserMetadata: metadata, Trailing: %v}", trailing), + "opts": "minio.PutObjectOptions{UserMetadata: metadata, Trailing: true}", } if !isFullMode() { @@ -2339,7 +2343,7 @@ func testPutMultipartObjectWithChecksums(trailing bool) { return } - c, err := NewClient(ClientConfig{TrailingHeaders: trailing}) + c, err := NewClient(ClientConfig{TrailingHeaders: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return @@ -2433,12 +2437,8 @@ func testPutMultipartObjectWithChecksums(trailing bool) { h.Reset() want := hashMultiPart(b, partSize, test.cs) - var cs minio.ChecksumType - rd := io.Reader(io.NopCloser(bytes.NewReader(b))) - if trailing { - cs = test.cs - rd = bytes.NewReader(b) - } + rd := bytes.NewReader(b) + cs := test.cs // Set correct CRC. args["section"] = "PutObject" @@ -2447,7 +2447,6 @@ func testPutMultipartObjectWithChecksums(trailing bool) { DisableMultipart: false, UserMetadata: nil, PartSize: partSize, - AutoChecksum: test.cs, Checksum: cs, }) if err != nil { @@ -2589,11 +2588,10 @@ func testTrailingChecksums() { return } - hashMultiPart := func(b []byte, partSize int, hasher hash.Hash) string { + hashMultiPart := func(b []byte, partSize int, hasher hash.Hash) (oparts []minio.ObjectPart) { r := bytes.NewReader(b) tmp := make([]byte, partSize) parts := 0 - var all []byte for { n, err := io.ReadFull(r, tmp) if err != nil && err != io.ErrUnexpectedEOF { @@ -2605,14 +2603,16 @@ func testTrailingChecksums() { parts++ hasher.Reset() hasher.Write(tmp[:n]) - all = append(all, hasher.Sum(nil)...) + oparts = append(oparts, minio.ObjectPart{ + PartNumber: parts, + Size: int64(n), + ChecksumCRC32C: base64.StdEncoding.EncodeToString(hasher.Sum(nil)), + }) if err != nil { break } } - hasher.Reset() - hasher.Write(all) - return fmt.Sprintf("%s-%d", base64.StdEncoding.EncodeToString(hasher.Sum(nil)), parts) + return oparts } defer cleanupBucket(bucketName, c) tests := []struct { @@ -2636,6 +2636,7 @@ func testTrailingChecksums() { DisableMultipart: false, UserMetadata: nil, PartSize: 5 << 20, + Checksum: minio.ChecksumFullObjectCRC32C, }, }, { @@ -2647,6 +2648,7 @@ func testTrailingChecksums() { DisableMultipart: false, UserMetadata: nil, PartSize: 6_645_654, // Rather arbitrary size + Checksum: minio.ChecksumFullObjectCRC32C, }, }, { @@ -2658,6 +2660,7 @@ func testTrailingChecksums() { DisableMultipart: false, UserMetadata: nil, PartSize: 5 << 20, + Checksum: minio.ChecksumFullObjectCRC32C, }, }, { @@ -2669,6 +2672,7 @@ func testTrailingChecksums() { DisableMultipart: false, UserMetadata: nil, PartSize: 6_645_654, // Rather arbitrary size + Checksum: minio.ChecksumFullObjectCRC32C, }, }, } @@ -2696,7 +2700,14 @@ func testTrailingChecksums() { reader.Close() h := test.hasher h.Reset() - test.ChecksumCRC32C = hashMultiPart(b, int(test.PO.PartSize), test.hasher) + + parts := hashMultiPart(b, int(test.PO.PartSize), test.hasher) + cksum, err := minio.ChecksumFullObjectCRC32C.FullObjectChecksum(parts) + if err != nil { + logError(testName, function, args, startTime, "", "checksum calculation failed", err) + return + } + test.ChecksumCRC32C = cksum.Encoded() // Set correct CRC. resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), test.PO) @@ -4172,7 +4183,7 @@ func testFPutObjectMultipart() { "opts": "", } - c, err := NewClient(ClientConfig{}) + c, err := NewClient(ClientConfig{TrailingHeaders: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return @@ -5586,6 +5597,161 @@ func testPresignedPostPolicyWrongFile() { logSuccess(testName, function, args, startTime) } +// testPresignedPostPolicyEmptyFileName tests that an empty file name in the presigned post policy +func testPresignedPostPolicyEmptyFileName() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PresignedPostPolicy(policy)" + args := map[string]interface{}{ + "policy": "", + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + // Azure requires the key to not start with a number + metadataKey := randString(60, rand.NewSource(time.Now().UnixNano()), "user") + metadataValue := randString(60, rand.NewSource(time.Now().UnixNano()), "") + + buf, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + policy := minio.NewPostPolicy() + policy.SetBucket(bucketName) + policy.SetKey(objectName) + policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days + policy.SetContentType("binary/octet-stream") + policy.SetContentLengthRange(10, 1024*1024) + policy.SetUserMetadata(metadataKey, metadataValue) + policy.SetContentEncoding("gzip") + + // Add CRC32C + checksum := minio.ChecksumCRC32C.ChecksumBytes(buf) + err = policy.SetChecksum(checksum) + if err != nil { + logError(testName, function, args, startTime, "", "SetChecksum failed", err) + return + } + + args["policy"] = policy.String() + + presignedPostPolicyURL, formData, err := c.PresignedPostPolicy(context.Background(), policy) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedPostPolicy failed", err) + return + } + + var formBuf bytes.Buffer + writer := multipart.NewWriter(&formBuf) + for k, v := range formData { + writer.WriteField(k, v) + } + + // Get a 33KB file to upload and test if set post policy works + filePath := getMintDataDirFilePath("datafile-33-kB") + if filePath == "" { + // Make a temp file with 33 KB data. + file, err := os.CreateTemp(os.TempDir(), "PresignedPostPolicyTest") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile creation failed", err) + return + } + if _, err = io.Copy(file, getDataReader("datafile-33-kB")); err != nil { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + if err = file.Close(); err != nil { + logError(testName, function, args, startTime, "", "File Close failed", err) + return + } + filePath = file.Name() + } + + // add file to post request + f, err := os.Open(filePath) + defer f.Close() + if err != nil { + logError(testName, function, args, startTime, "", "File open failed", err) + return + } + w, err := writer.CreateFormFile("", filePath) + if err != nil { + logError(testName, function, args, startTime, "", "CreateFormFile failed", err) + return + } + + _, err = io.Copy(w, f) + if err != nil { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + writer.Close() + + httpClient := &http.Client{ + // Setting a sensible time out of 30secs to wait for response + // headers. Request is pro-actively canceled after 30secs + // with no response. + Timeout: 30 * time.Second, + Transport: createHTTPTransport(), + } + args["url"] = presignedPostPolicyURL.String() + + req, err := http.NewRequest(http.MethodPost, presignedPostPolicyURL.String(), bytes.NewReader(formBuf.Bytes())) + if err != nil { + logError(testName, function, args, startTime, "", "Http request failed", err) + return + } + + req.Header.Set("Content-Type", writer.FormDataContentType()) + + // make post request with correct form data + res, err := httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "Http request failed", err) + return + } + defer res.Body.Close() + if res.StatusCode != http.StatusBadRequest { + logError(testName, function, args, startTime, "", "Http request failed", errors.New(res.Status)) + return + } + + body, err := io.ReadAll(res.Body) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + if !strings.Contains(string(body), "MalformedPOSTRequest") { + logError(testName, function, args, startTime, "", "Invalid error from server", errors.New(string(body))) + } + + logSuccess(testName, function, args, startTime) +} + // Tests copy object func testCopyObject() { // initialize logging params @@ -11560,8 +11726,11 @@ func testPutObjectMetadataNonUSASCIIV2() { } for k, v := range metadata { + if strings.HasPrefix(strings.ToLower(k), "x-amz-checksum-") { + continue + } if st.Metadata.Get(http.CanonicalHeaderKey("X-Amz-Meta-"+k)) != v { - logError(testName, function, args, startTime, "", "Expected upload object metadata "+k+": "+v+" but got "+st.Metadata.Get("X-Amz-Meta-"+k), err) + logError(testName, function, args, startTime, "", "Expected upload object metadata "+k+": "+v+" but got "+st.Metadata.Get(http.CanonicalHeaderKey("X-Amz-Meta-"+k)), err) return } } @@ -14069,8 +14238,7 @@ func main() { testUserMetadataCopyingV2() testPutObjectWithChecksums() testPutObjectWithTrailingChecksums() - testPutMultipartObjectWithChecksums(false) - testPutMultipartObjectWithChecksums(true) + testPutMultipartObjectWithChecksums() testPutObject0ByteV2() testPutObjectMetadataNonUSASCIIV2() testPutObjectNoLengthV2() @@ -14098,6 +14266,7 @@ func main() { testGetObjectReadAtWhenEOFWasReached() testPresignedPostPolicy() testPresignedPostPolicyWrongFile() + testPresignedPostPolicyEmptyFileName() testCopyObject() testComposeObjectErrorCases() testCompose10KSources() diff --git a/vendor/github.com/minio/minio-go/v7/hook-reader.go b/vendor/github.com/minio/minio-go/v7/hook-reader.go index 61268a104..06dbbb74d 100644 --- a/vendor/github.com/minio/minio-go/v7/hook-reader.go +++ b/vendor/github.com/minio/minio-go/v7/hook-reader.go @@ -84,7 +84,7 @@ func (hr *hookReader) Read(b []byte) (n int, err error) { // reports the data read from the source to the hook. func newHook(source, hook io.Reader) io.Reader { if hook == nil { - return source + return &hookReader{source: source} } return &hookReader{ source: source, diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go index 2a35a51a4..beab4a6a6 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go @@ -125,6 +125,7 @@ func (i *STSCertificateIdentity) RetrieveWithCredContext(cc *CredContext) (Value queryValues := url.Values{} queryValues.Set("Action", "AssumeRoleWithCertificate") queryValues.Set("Version", STSVersion) + queryValues.Set("DurationSeconds", strconv.FormatUint(uint64(livetime.Seconds()), 10)) if i.TokenRevokeType != "" { queryValues.Set("TokenRevokeType", i.TokenRevokeType) } @@ -134,10 +135,6 @@ func (i *STSCertificateIdentity) RetrieveWithCredContext(cc *CredContext) (Value if err != nil { return Value{}, err } - if req.Form == nil { - req.Form = url.Values{} - } - req.Form.Add("DurationSeconds", strconv.FormatUint(uint64(livetime.Seconds()), 10)) client := i.Client if client == nil { diff --git a/vendor/github.com/minio/minio-go/v7/pkg/peeker/peek-reader-closer.go b/vendor/github.com/minio/minio-go/v7/pkg/peeker/peek-reader-closer.go new file mode 100644 index 000000000..26c9cf637 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/peeker/peek-reader-closer.go @@ -0,0 +1,73 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2025 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package peeker + +import ( + "bytes" + "errors" + "io" +) + +// PeekReadCloser offers a way to peek a ReadCloser stream and then +// return the exact stream of the underlying ReadCloser +type PeekReadCloser struct { + io.ReadCloser + + recordMode bool + recordMaxBuf int + recordBuf *bytes.Buffer +} + +// ReplayFromStart ensures next Read() will restart to stream the +// underlying ReadCloser stream from the beginning +func (prc *PeekReadCloser) ReplayFromStart() { + prc.recordMode = false +} + +func (prc *PeekReadCloser) Read(p []byte) (int, error) { + if prc.recordMode { + if prc.recordBuf.Len() > prc.recordMaxBuf { + return 0, errors.New("maximum peek buffer exceeded") + } + n, err := prc.ReadCloser.Read(p) + prc.recordBuf.Write(p[:n]) + return n, err + } + // Replay mode + if prc.recordBuf.Len() > 0 { + pn, _ := prc.recordBuf.Read(p) + return pn, nil + } + return prc.ReadCloser.Read(p) +} + +// Close releases the record buffer memory and close the underlying ReadCloser +func (prc *PeekReadCloser) Close() error { + prc.recordBuf.Reset() + return prc.ReadCloser.Close() +} + +// NewPeekReadCloser returns a new peek reader +func NewPeekReadCloser(rc io.ReadCloser, maxBufSize int) *PeekReadCloser { + return &PeekReadCloser{ + ReadCloser: rc, + recordMode: true, // recording mode by default + recordBuf: bytes.NewBuffer(make([]byte, 0, 1024)), + recordMaxBuf: maxBufSize, + } +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/utils/peek-reader-closer.go b/vendor/github.com/minio/minio-go/v7/pkg/utils/peek-reader-closer.go deleted file mode 100644 index d6f674fac..000000000 --- a/vendor/github.com/minio/minio-go/v7/pkg/utils/peek-reader-closer.go +++ /dev/null @@ -1,73 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2025 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package utils - -import ( - "bytes" - "errors" - "io" -) - -// PeekReadCloser offers a way to peek a ReadCloser stream and then -// return the exact stream of the underlying ReadCloser -type PeekReadCloser struct { - io.ReadCloser - - recordMode bool - recordMaxBuf int - recordBuf *bytes.Buffer -} - -// ReplayFromStart ensures next Read() will restart to stream the -// underlying ReadCloser stream from the beginning -func (prc *PeekReadCloser) ReplayFromStart() { - prc.recordMode = false -} - -func (prc *PeekReadCloser) Read(p []byte) (int, error) { - if prc.recordMode { - if prc.recordBuf.Len() > prc.recordMaxBuf { - return 0, errors.New("maximum peek buffer exceeded") - } - n, err := prc.ReadCloser.Read(p) - prc.recordBuf.Write(p[:n]) - return n, err - } - // Replay mode - if prc.recordBuf.Len() > 0 { - pn, _ := prc.recordBuf.Read(p) - return pn, nil - } - return prc.ReadCloser.Read(p) -} - -// Close releases the record buffer memory and close the underlying ReadCloser -func (prc *PeekReadCloser) Close() error { - prc.recordBuf.Reset() - return prc.ReadCloser.Close() -} - -// NewPeekReadCloser returns a new peek reader -func NewPeekReadCloser(rc io.ReadCloser, maxBufSize int) *PeekReadCloser { - return &PeekReadCloser{ - ReadCloser: rc, - recordMode: true, // recording mode by default - recordBuf: bytes.NewBuffer(make([]byte, 0, 1024)), - recordMaxBuf: maxBufSize, - } -} diff --git a/vendor/github.com/minio/minio-go/v7/post-policy.go b/vendor/github.com/minio/minio-go/v7/post-policy.go index e2c24b60a..d2899416c 100644 --- a/vendor/github.com/minio/minio-go/v7/post-policy.go +++ b/vendor/github.com/minio/minio-go/v7/post-policy.go @@ -417,7 +417,7 @@ func (p PostPolicy) String() string { // marshalJSON - Provides Marshaled JSON in bytes. func (p PostPolicy) marshalJSON() []byte { - expirationStr := `"expiration":"` + p.expiration.Format(expirationDateFormat) + `"` + expirationStr := `"expiration":"` + p.expiration.UTC().Format(expirationDateFormat) + `"` var conditionsStr string conditions := []string{} for _, po := range p.conditions { -- cgit v1.2.3