summaryrefslogtreecommitdiff
path: root/vendor/github.com
diff options
context:
space:
mode:
authorLibravatar kim <grufwub@gmail.com>2025-04-28 11:20:24 +0000
committerLibravatar kim <gruf@noreply.codeberg.org>2025-04-28 11:20:24 +0000
commit436765a6a2ddf400b6e06c9a7c3283b1351fcbcd (patch)
treec1a0e969c23a74eaca258c709db1776763d576dd /vendor/github.com
parent[chore] Update build to use new woodpecker dind container, bump version numbe... (diff)
downloadgotosocial-436765a6a2ddf400b6e06c9a7c3283b1351fcbcd.tar.xz
bump dependencies: minio-go, go-sqlite3, goldmark, otel, x/image/webp (#4075)
Reviewed-on: https://codeberg.org/superseriousbusiness/gotosocial/pulls/4075 Co-authored-by: kim <grufwub@gmail.com> Co-committed-by: kim <grufwub@gmail.com>
Diffstat (limited to 'vendor/github.com')
-rw-r--r--vendor/github.com/gin-contrib/cors/.golangci.yml34
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-append-object.go226
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-bucket-notification.go9
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-datatypes.go2
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go1
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go1
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go1
-rw-r--r--vendor/github.com/minio/minio-go/v7/api.go25
-rw-r--r--vendor/github.com/minio/minio-go/v7/checksum.go45
-rw-r--r--vendor/github.com/minio/minio-go/v7/hook-reader.go10
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go25
-rw-r--r--vendor/github.com/minio/minio-go/v7/retry-continous.go26
-rw-r--r--vendor/github.com/minio/minio-go/v7/retry.go27
-rw-r--r--vendor/github.com/minio/minio-go/v7/s3-endpoints.go12
-rw-r--r--vendor/github.com/minio/minio-go/v7/utils.go1
-rw-r--r--vendor/github.com/ncruces/go-sqlite3/driver/driver.go43
-rw-r--r--vendor/github.com/ncruces/go-sqlite3/embed/build.sh4
-rw-r--r--vendor/github.com/ncruces/go-sqlite3/embed/exports.txt1
-rw-r--r--vendor/github.com/ncruces/go-sqlite3/embed/sqlite3.wasmbin1397228 -> 1398689 bytes
-rw-r--r--vendor/github.com/ncruces/go-sqlite3/error.go2
-rw-r--r--vendor/github.com/ncruces/go-sqlite3/stmt.go13
-rw-r--r--vendor/github.com/ncruces/go-sqlite3/util/sql3util/wasm/sql3parse_table.wasmbin17400 -> 15969 bytes
-rw-r--r--vendor/github.com/yuin/goldmark/ast/block.go1
-rw-r--r--vendor/github.com/yuin/goldmark/parser/parser.go2
24 files changed, 416 insertions, 95 deletions
diff --git a/vendor/github.com/gin-contrib/cors/.golangci.yml b/vendor/github.com/gin-contrib/cors/.golangci.yml
index 67edf0ac8..47094ac61 100644
--- a/vendor/github.com/gin-contrib/cors/.golangci.yml
+++ b/vendor/github.com/gin-contrib/cors/.golangci.yml
@@ -1,7 +1,6 @@
+version: "2"
linters:
- enable-all: false
- disable-all: true
- fast: false
+ default: none
enable:
- bodyclose
- dogsled
@@ -12,11 +11,8 @@ linters:
- goconst
- gocritic
- gocyclo
- - gofmt
- - goimports
- goprintffuncname
- gosec
- - gosimple
- govet
- ineffassign
- lll
@@ -26,13 +22,29 @@ linters:
- nolintlint
- rowserrcheck
- staticcheck
- - stylecheck
- - typecheck
- unconvert
- unparam
- unused
- whitespace
+ exclusions:
+ generated: lax
+ presets:
+ - comments
+ - common-false-positives
+ - legacy
+ - std-error-handling
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
+formatters:
+ enable:
+ - gofmt
- gofumpt
-
-run:
- timeout: 3m
+ - goimports
+ exclusions:
+ generated: lax
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
diff --git a/vendor/github.com/minio/minio-go/v7/api-append-object.go b/vendor/github.com/minio/minio-go/v7/api-append-object.go
new file mode 100644
index 000000000..fca08c373
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-append-object.go
@@ -0,0 +1,226 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2025 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "strconv"
+
+ "github.com/minio/minio-go/v7/pkg/s3utils"
+)
+
+// AppendObjectOptions https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-append.html
+type AppendObjectOptions struct {
+ // Provide a progress reader to indicate the current append() progress.
+ Progress io.Reader
+ // ChunkSize indicates the maximum append() size,
+ // it is useful when you want to control how much data
+ // per append() you are interested in sending to server
+ // while keeping the input io.Reader of a longer length.
+ ChunkSize uint64
+ // Aggressively disable sha256 payload, it is automatically
+ // turned-off for TLS supporting endpoints, useful in benchmarks
+ // where you are interested in the peak() numbers.
+ DisableContentSha256 bool
+
+ customHeaders http.Header
+ checksumType ChecksumType
+}
+
+// Header returns the custom header for AppendObject API
+func (opts AppendObjectOptions) Header() (header http.Header) {
+ header = make(http.Header)
+ for k, v := range opts.customHeaders {
+ header[k] = v
+ }
+ return header
+}
+
+func (opts *AppendObjectOptions) setWriteOffset(offset int64) {
+ if len(opts.customHeaders) == 0 {
+ opts.customHeaders = make(http.Header)
+ }
+ opts.customHeaders["x-amz-write-offset-bytes"] = []string{strconv.FormatInt(offset, 10)}
+}
+
+func (opts *AppendObjectOptions) setChecksumParams(info ObjectInfo) {
+ if len(opts.customHeaders) == 0 {
+ opts.customHeaders = make(http.Header)
+ }
+ fullObject := info.ChecksumMode == ChecksumFullObjectMode.String()
+ switch {
+ case info.ChecksumCRC32 != "":
+ if fullObject {
+ opts.checksumType = ChecksumFullObjectCRC32
+ }
+ case info.ChecksumCRC32C != "":
+ if fullObject {
+ opts.checksumType = ChecksumFullObjectCRC32C
+ }
+ case info.ChecksumCRC64NVME != "":
+ // CRC64NVME only has a full object variant
+ // so it does not carry any special full object
+ // modifier
+ opts.checksumType = ChecksumCRC64NVME
+ }
+}
+
+func (opts AppendObjectOptions) validate(c *Client) (err error) {
+ if opts.ChunkSize > maxPartSize {
+ return errInvalidArgument("Append chunkSize cannot be larger than max part size allowed")
+ }
+ switch {
+ case !c.trailingHeaderSupport:
+ return errInvalidArgument("AppendObject() requires Client with TrailingHeaders enabled")
+ case c.overrideSignerType.IsV2():
+ return errInvalidArgument("AppendObject() cannot be used with v2 signatures")
+ case s3utils.IsGoogleEndpoint(*c.endpointURL):
+ return errInvalidArgument("AppendObject() cannot be used with GCS endpoints")
+ }
+
+ return nil
+}
+
+// appendObjectDo - executes the append object http operation.
+// NOTE: You must have WRITE permissions on a bucket to add an object to it.
+func (c *Client) appendObjectDo(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts AppendObjectOptions) (UploadInfo, error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return UploadInfo{}, err
+ }
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return UploadInfo{}, err
+ }
+
+ // Set headers.
+ customHeader := opts.Header()
+
+ // Populate request metadata.
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ customHeader: customHeader,
+ contentBody: reader,
+ contentLength: size,
+ streamSha256: !opts.DisableContentSha256,
+ }
+
+ if opts.checksumType.IsSet() {
+ reqMetadata.addCrc = &opts.checksumType
+ }
+
+ // Execute PUT an objectName.
+ resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return UploadInfo{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return UploadInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+
+ h := resp.Header
+
+ // When AppendObject() is used, S3 Express will return final object size as x-amz-object-size
+ if amzSize := h.Get("x-amz-object-size"); amzSize != "" {
+ size, err = strconv.ParseInt(amzSize, 10, 64)
+ if err != nil {
+ return UploadInfo{}, err
+ }
+ }
+
+ return UploadInfo{
+ Bucket: bucketName,
+ Key: objectName,
+ ETag: trimEtag(h.Get("ETag")),
+ Size: size,
+
+ // Checksum values
+ ChecksumCRC32: h.Get(ChecksumCRC32.Key()),
+ ChecksumCRC32C: h.Get(ChecksumCRC32C.Key()),
+ ChecksumSHA1: h.Get(ChecksumSHA1.Key()),
+ ChecksumSHA256: h.Get(ChecksumSHA256.Key()),
+ ChecksumCRC64NVME: h.Get(ChecksumCRC64NVME.Key()),
+ ChecksumMode: h.Get(ChecksumFullObjectMode.Key()),
+ }, nil
+}
+
+// AppendObject - S3 Express Zone https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-append.html
+func (c *Client) AppendObject(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64,
+ opts AppendObjectOptions,
+) (info UploadInfo, err error) {
+ if objectSize < 0 && opts.ChunkSize == 0 {
+ return UploadInfo{}, errors.New("object size must be provided when no chunk size is provided")
+ }
+
+ if err = opts.validate(c); err != nil {
+ return UploadInfo{}, err
+ }
+
+ oinfo, err := c.StatObject(ctx, bucketName, objectName, StatObjectOptions{Checksum: true})
+ if err != nil {
+ return UploadInfo{}, err
+ }
+ if oinfo.ChecksumMode != ChecksumFullObjectMode.String() {
+ return UploadInfo{}, fmt.Errorf("append API is not allowed on objects that are not full_object checksum type: %s", oinfo.ChecksumMode)
+ }
+ opts.setChecksumParams(oinfo) // set the appropriate checksum params based on the existing object checksum metadata.
+ opts.setWriteOffset(oinfo.Size) // First append must set the current object size as the offset.
+
+ if opts.ChunkSize > 0 {
+ finalObjSize := int64(-1)
+ if objectSize > 0 {
+ finalObjSize = info.Size + objectSize
+ }
+ totalPartsCount, partSize, lastPartSize, err := OptimalPartInfo(finalObjSize, opts.ChunkSize)
+ if err != nil {
+ return UploadInfo{}, err
+ }
+ buf := make([]byte, partSize)
+ var partNumber int
+ for partNumber = 1; partNumber <= totalPartsCount; partNumber++ {
+ // Proceed to upload the part.
+ if partNumber == totalPartsCount {
+ partSize = lastPartSize
+ }
+ n, err := readFull(reader, buf)
+ if err != nil {
+ return info, err
+ }
+ if n != int(partSize) {
+ return info, io.ErrUnexpectedEOF
+ }
+ rd := newHook(bytes.NewReader(buf[:n]), opts.Progress)
+ uinfo, err := c.appendObjectDo(ctx, bucketName, objectName, rd, partSize, opts)
+ if err != nil {
+ return info, err
+ }
+ opts.setWriteOffset(uinfo.Size)
+ }
+ }
+
+ rd := newHook(reader, opts.Progress)
+ return c.appendObjectDo(ctx, bucketName, objectName, rd, objectSize, opts)
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go b/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go
index 33811b98f..b1e5b0aae 100644
--- a/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go
+++ b/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go
@@ -157,13 +157,6 @@ func (c *Client) ListenBucketNotification(ctx context.Context, bucketName, prefi
return
}
- // Continuously run and listen on bucket notification.
- // Create a done channel to control 'ListObjects' go routine.
- retryDoneCh := make(chan struct{}, 1)
-
- // Indicate to our routine to exit cleanly upon return.
- defer close(retryDoneCh)
-
// Prepare urlValues to pass into the request on every loop
urlValues := make(url.Values)
urlValues.Set("ping", "10")
@@ -172,7 +165,7 @@ func (c *Client) ListenBucketNotification(ctx context.Context, bucketName, prefi
urlValues["events"] = events
// Wait on the jitter retry loop.
- for range c.newRetryTimerContinous(time.Second, time.Second*30, MaxJitter, retryDoneCh) {
+ for range c.newRetryTimerContinous(time.Second, time.Second*30, MaxJitter) {
// Execute GET on bucket to list objects.
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
bucketName: bucketName,
diff --git a/vendor/github.com/minio/minio-go/v7/api-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-datatypes.go
index 2118c7c77..39ff9d27c 100644
--- a/vendor/github.com/minio/minio-go/v7/api-datatypes.go
+++ b/vendor/github.com/minio/minio-go/v7/api-datatypes.go
@@ -148,6 +148,7 @@ type UploadInfo struct {
ChecksumSHA1 string
ChecksumSHA256 string
ChecksumCRC64NVME string
+ ChecksumMode string
}
// RestoreInfo contains information of the restore operation of an archived object
@@ -223,6 +224,7 @@ type ObjectInfo struct {
ChecksumSHA1 string
ChecksumSHA256 string
ChecksumCRC64NVME string
+ ChecksumMode string
Internal *struct {
K int // Data blocks
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
index 03bd34f76..84bc19b28 100644
--- a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
+++ b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
@@ -457,5 +457,6 @@ func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, object
ChecksumCRC32: completeMultipartUploadResult.ChecksumCRC32,
ChecksumCRC32C: completeMultipartUploadResult.ChecksumCRC32C,
ChecksumCRC64NVME: completeMultipartUploadResult.ChecksumCRC64NVME,
+ ChecksumMode: completeMultipartUploadResult.ChecksumType,
}, nil
}
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
index 82c0ae9e4..987a3c692 100644
--- a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
+++ b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
@@ -805,5 +805,6 @@ func (c *Client) putObjectDo(ctx context.Context, bucketName, objectName string,
ChecksumSHA1: h.Get(ChecksumSHA1.Key()),
ChecksumSHA256: h.Get(ChecksumSHA256.Key()),
ChecksumCRC64NVME: h.Get(ChecksumCRC64NVME.Key()),
+ ChecksumMode: h.Get(ChecksumFullObjectMode.Key()),
}, nil
}
diff --git a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
index 08a5a7b6e..3204263dc 100644
--- a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
+++ b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
@@ -366,6 +366,7 @@ type completeMultipartUploadResult struct {
ChecksumSHA1 string
ChecksumSHA256 string
ChecksumCRC64NVME string
+ ChecksumType string
}
// CompletePart sub container lists individual part numbers and their
diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go
index b64f57615..39cd5fd53 100644
--- a/vendor/github.com/minio/minio-go/v7/api.go
+++ b/vendor/github.com/minio/minio-go/v7/api.go
@@ -155,7 +155,7 @@ type Options struct {
// Global constants.
const (
libraryName = "minio-go"
- libraryVersion = "v7.0.89"
+ libraryVersion = "v7.0.91"
)
// User Agent should always following the below style.
@@ -660,13 +660,7 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ
metadata.trailer.Set(metadata.addCrc.Key(), base64.StdEncoding.EncodeToString(crc.Sum(nil)))
}
- // Create cancel context to control 'newRetryTimer' go routine.
- retryCtx, cancel := context.WithCancel(ctx)
-
- // Indicate to our routine to exit cleanly upon return.
- defer cancel()
-
- for range c.newRetryTimer(retryCtx, reqRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter) {
+ for range c.newRetryTimer(ctx, reqRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter) {
// Retry executes the following function body if request has an
// error until maxRetries have been exhausted, retry attempts are
// performed after waiting for a given period of time in a
@@ -779,7 +773,7 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ
}
// Return an error when retry is canceled or deadlined
- if e := retryCtx.Err(); e != nil {
+ if e := ctx.Err(); e != nil {
return nil, e
}
@@ -909,6 +903,11 @@ func (c *Client) newRequest(ctx context.Context, method string, metadata request
// For anonymous requests just return.
if signerType.IsAnonymous() {
+ if len(metadata.trailer) > 0 {
+ req.Header.Set("X-Amz-Content-Sha256", unsignedPayloadTrailer)
+ return signer.UnsignedTrailer(*req, metadata.trailer), nil
+ }
+
return req, nil
}
@@ -1066,3 +1065,11 @@ func (c *Client) CredContext() *credentials.CredContext {
Endpoint: c.endpointURL.String(),
}
}
+
+// GetCreds returns the access creds for the client
+func (c *Client) GetCreds() (credentials.Value, error) {
+ if c.credsProvider == nil {
+ return credentials.Value{}, errors.New("no credentials provider")
+ }
+ return c.credsProvider.GetWithContext(c.CredContext())
+}
diff --git a/vendor/github.com/minio/minio-go/v7/checksum.go b/vendor/github.com/minio/minio-go/v7/checksum.go
index c7456cda2..5c24bf64a 100644
--- a/vendor/github.com/minio/minio-go/v7/checksum.go
+++ b/vendor/github.com/minio/minio-go/v7/checksum.go
@@ -34,6 +34,43 @@ import (
"github.com/minio/crc64nvme"
)
+// ChecksumMode contains information about the checksum mode on the object
+type ChecksumMode uint32
+
+const (
+ // ChecksumFullObjectMode Full object checksum `csumCombine(csum1, csum2...)...), csumN...)`
+ ChecksumFullObjectMode ChecksumMode = 1 << iota
+
+ // ChecksumCompositeMode Composite checksum `csum([csum1 + csum2 ... + csumN])`
+ ChecksumCompositeMode
+
+ // Keep after all valid checksums
+ checksumLastMode
+
+ // checksumModeMask is a mask for valid checksum mode types.
+ checksumModeMask = checksumLastMode - 1
+)
+
+// Is returns if c is all of t.
+func (c ChecksumMode) Is(t ChecksumMode) bool {
+ return c&t == t
+}
+
+// Key returns the header key.
+func (c ChecksumMode) Key() string {
+ return amzChecksumMode
+}
+
+func (c ChecksumMode) String() string {
+ switch c & checksumModeMask {
+ case ChecksumFullObjectMode:
+ return "FULL_OBJECT"
+ case ChecksumCompositeMode:
+ return "COMPOSITE"
+ }
+ return ""
+}
+
// ChecksumType contains information about the checksum type.
type ChecksumType uint32
@@ -75,6 +112,7 @@ const (
amzChecksumSHA1 = "x-amz-checksum-sha1"
amzChecksumSHA256 = "x-amz-checksum-sha256"
amzChecksumCRC64NVME = "x-amz-checksum-crc64nvme"
+ amzChecksumMode = "x-amz-checksum-type"
)
// Base returns the base type, without modifiers.
@@ -397,7 +435,7 @@ func addAutoChecksumHeaders(opts *PutObjectOptions) {
}
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String()
if opts.AutoChecksum.FullObjectRequested() {
- opts.UserMetadata["X-Amz-Checksum-Type"] = "FULL_OBJECT"
+ opts.UserMetadata[amzChecksumMode] = ChecksumFullObjectMode.String()
}
}
@@ -414,7 +452,10 @@ func applyAutoChecksum(opts *PutObjectOptions, allParts []ObjectPart) {
} else if opts.AutoChecksum.CanMergeCRC() {
crc, err := opts.AutoChecksum.FullObjectChecksum(allParts)
if err == nil {
- opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): crc.Encoded(), "X-Amz-Checksum-Type": "FULL_OBJECT"}
+ opts.UserMetadata = map[string]string{
+ opts.AutoChecksum.KeyCapitalized(): crc.Encoded(),
+ amzChecksumMode: ChecksumFullObjectMode.String(),
+ }
}
}
}
diff --git a/vendor/github.com/minio/minio-go/v7/hook-reader.go b/vendor/github.com/minio/minio-go/v7/hook-reader.go
index 07bc7dbcf..61268a104 100644
--- a/vendor/github.com/minio/minio-go/v7/hook-reader.go
+++ b/vendor/github.com/minio/minio-go/v7/hook-reader.go
@@ -20,7 +20,6 @@ package minio
import (
"fmt"
"io"
- "sync"
)
// hookReader hooks additional reader in the source stream. It is
@@ -28,7 +27,6 @@ import (
// notified about the exact number of bytes read from the primary
// source on each Read operation.
type hookReader struct {
- mu sync.RWMutex
source io.Reader
hook io.Reader
}
@@ -36,9 +34,6 @@ type hookReader struct {
// Seek implements io.Seeker. Seeks source first, and if necessary
// seeks hook if Seek method is appropriately found.
func (hr *hookReader) Seek(offset int64, whence int) (n int64, err error) {
- hr.mu.Lock()
- defer hr.mu.Unlock()
-
// Verify for source has embedded Seeker, use it.
sourceSeeker, ok := hr.source.(io.Seeker)
if ok {
@@ -70,9 +65,6 @@ func (hr *hookReader) Seek(offset int64, whence int) (n int64, err error) {
// value 'n' number of bytes are reported through the hook. Returns
// error for all non io.EOF conditions.
func (hr *hookReader) Read(b []byte) (n int, err error) {
- hr.mu.RLock()
- defer hr.mu.RUnlock()
-
n, err = hr.source.Read(b)
if err != nil && err != io.EOF {
return n, err
@@ -92,7 +84,7 @@ func (hr *hookReader) Read(b []byte) (n int, err error) {
// reports the data read from the source to the hook.
func newHook(source, hook io.Reader) io.Reader {
if hook == nil {
- return &hookReader{source: source}
+ return source
}
return &hookReader{
source: source,
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go
index 09ece53a0..2842899b9 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go
@@ -333,11 +333,34 @@ func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, locati
if len(trailer) > 0 {
// Use custom chunked encoding.
req.Trailer = trailer
- return StreamingUnsignedV4(&req, sessionToken, req.ContentLength, time.Now().UTC())
+ return StreamingUnsignedV4(&req, sessionToken, req.ContentLength, t)
}
return &req
}
+// UnsignedTrailer will do chunked encoding with a custom trailer.
+func UnsignedTrailer(req http.Request, trailer http.Header) *http.Request {
+ if len(trailer) == 0 {
+ return &req
+ }
+ // Initial time.
+ t := time.Now().UTC()
+
+ // Set x-amz-date.
+ req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat))
+
+ for k := range trailer {
+ req.Header.Add("X-Amz-Trailer", strings.ToLower(k))
+ }
+
+ req.Header.Set("Content-Encoding", "aws-chunked")
+ req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(req.ContentLength, 10))
+
+ // Use custom chunked encoding.
+ req.Trailer = trailer
+ return StreamingUnsignedV4(&req, "", req.ContentLength, t)
+}
+
// SignV4 sign the request before Do(), in accordance with
// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html.
func SignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string) *http.Request {
diff --git a/vendor/github.com/minio/minio-go/v7/retry-continous.go b/vendor/github.com/minio/minio-go/v7/retry-continous.go
index 81fcf16f1..21e9fd455 100644
--- a/vendor/github.com/minio/minio-go/v7/retry-continous.go
+++ b/vendor/github.com/minio/minio-go/v7/retry-continous.go
@@ -17,12 +17,14 @@
package minio
-import "time"
+import (
+ "iter"
+ "math"
+ "time"
+)
// newRetryTimerContinous creates a timer with exponentially increasing delays forever.
-func (c *Client) newRetryTimerContinous(baseSleep, maxSleep time.Duration, jitter float64, doneCh chan struct{}) <-chan int {
- attemptCh := make(chan int)
-
+func (c *Client) newRetryTimerContinous(baseSleep, maxSleep time.Duration, jitter float64) iter.Seq[int] {
// normalize jitter to the range [0, 1.0]
if jitter < NoJitter {
jitter = NoJitter
@@ -44,26 +46,20 @@ func (c *Client) newRetryTimerContinous(baseSleep, maxSleep time.Duration, jitte
if sleep > maxSleep {
sleep = maxSleep
}
- if jitter != NoJitter {
+ if math.Abs(jitter-NoJitter) > 1e-9 {
sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter)
}
return sleep
}
- go func() {
- defer close(attemptCh)
+ return func(yield func(int) bool) {
var nextBackoff int
for {
- select {
- // Attempts starts.
- case attemptCh <- nextBackoff:
- nextBackoff++
- case <-doneCh:
- // Stop the routine.
+ if !yield(nextBackoff) {
return
}
+ nextBackoff++
time.Sleep(exponentialBackoffWait(nextBackoff))
}
- }()
- return attemptCh
+ }
}
diff --git a/vendor/github.com/minio/minio-go/v7/retry.go b/vendor/github.com/minio/minio-go/v7/retry.go
index ed954017c..b83d1b2e5 100644
--- a/vendor/github.com/minio/minio-go/v7/retry.go
+++ b/vendor/github.com/minio/minio-go/v7/retry.go
@@ -21,6 +21,8 @@ import (
"context"
"crypto/x509"
"errors"
+ "iter"
+ "math"
"net/http"
"net/url"
"time"
@@ -45,9 +47,7 @@ var DefaultRetryCap = time.Second
// newRetryTimer creates a timer with exponentially increasing
// delays until the maximum retry attempts are reached.
-func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, baseSleep, maxSleep time.Duration, jitter float64) <-chan int {
- attemptCh := make(chan int)
-
+func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, baseSleep, maxSleep time.Duration, jitter float64) iter.Seq[int] {
// computes the exponential backoff duration according to
// https://www.awsarchitectureblog.com/2015/03/backoff.html
exponentialBackoffWait := func(attempt int) time.Duration {
@@ -64,18 +64,22 @@ func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, baseSleep, max
if sleep > maxSleep {
sleep = maxSleep
}
- if jitter != NoJitter {
+ if math.Abs(jitter-NoJitter) > 1e-9 {
sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter)
}
return sleep
}
- go func() {
- defer close(attemptCh)
- for i := 0; i < maxRetry; i++ {
- select {
- case attemptCh <- i + 1:
- case <-ctx.Done():
+ return func(yield func(int) bool) {
+ // if context is already canceled, skip yield
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ }
+
+ for i := range maxRetry {
+ if !yield(i) {
return
}
@@ -85,8 +89,7 @@ func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, baseSleep, max
return
}
}
- }()
- return attemptCh
+ }
}
// List of AWS S3 error codes which are retryable.
diff --git a/vendor/github.com/minio/minio-go/v7/s3-endpoints.go b/vendor/github.com/minio/minio-go/v7/s3-endpoints.go
index baab23e96..6928b8eb3 100644
--- a/vendor/github.com/minio/minio-go/v7/s3-endpoints.go
+++ b/vendor/github.com/minio/minio-go/v7/s3-endpoints.go
@@ -168,6 +168,18 @@ var awsS3EndpointMap = map[string]awsS3Endpoint{
"s3.il-central-1.amazonaws.com",
"s3.dualstack.il-central-1.amazonaws.com",
},
+ "ap-southeast-5": {
+ "s3.ap-southeast-5.amazonaws.com",
+ "s3.dualstack.ap-southeast-5.amazonaws.com",
+ },
+ "ap-southeast-7": {
+ "s3.ap-southeast-7.amazonaws.com",
+ "s3.dualstack.ap-southeast-7.amazonaws.com",
+ },
+ "mx-central-1": {
+ "s3.mx-central-1.amazonaws.com",
+ "s3.dualstack.mx-central-1.amazonaws.com",
+ },
}
// getS3Endpoint get Amazon S3 endpoint based on the bucket location.
diff --git a/vendor/github.com/minio/minio-go/v7/utils.go b/vendor/github.com/minio/minio-go/v7/utils.go
index 027bb6ce3..6024bfa5b 100644
--- a/vendor/github.com/minio/minio-go/v7/utils.go
+++ b/vendor/github.com/minio/minio-go/v7/utils.go
@@ -390,6 +390,7 @@ func ToObjectInfo(bucketName, objectName string, h http.Header) (ObjectInfo, err
ChecksumSHA1: h.Get(ChecksumSHA1.Key()),
ChecksumSHA256: h.Get(ChecksumSHA256.Key()),
ChecksumCRC64NVME: h.Get(ChecksumCRC64NVME.Key()),
+ ChecksumMode: h.Get(ChecksumFullObjectMode.Key()),
}, nil
}
diff --git a/vendor/github.com/ncruces/go-sqlite3/driver/driver.go b/vendor/github.com/ncruces/go-sqlite3/driver/driver.go
index 9250cf39d..f473220c0 100644
--- a/vendor/github.com/ncruces/go-sqlite3/driver/driver.go
+++ b/vendor/github.com/ncruces/go-sqlite3/driver/driver.go
@@ -241,8 +241,9 @@ func (n *connector) Connect(ctx context.Context) (ret driver.Conn, err error) {
}
}()
- old := c.Conn.SetInterrupt(ctx)
- defer c.Conn.SetInterrupt(old)
+ if old := c.Conn.SetInterrupt(ctx); old != ctx {
+ defer c.Conn.SetInterrupt(old)
+ }
if !n.pragmas {
err = c.Conn.BusyTimeout(time.Minute)
@@ -362,8 +363,9 @@ func (c *conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, e
c.txReset = `; PRAGMA query_only=` + string(c.readOnly)
}
- old := c.Conn.SetInterrupt(ctx)
- defer c.Conn.SetInterrupt(old)
+ if old := c.Conn.SetInterrupt(ctx); old != ctx {
+ defer c.Conn.SetInterrupt(old)
+ }
err := c.Conn.Exec(txBegin)
if err != nil {
@@ -382,8 +384,10 @@ func (c *conn) Commit() error {
func (c *conn) Rollback() error {
// ROLLBACK even if interrupted.
- old := c.Conn.SetInterrupt(context.Background())
- defer c.Conn.SetInterrupt(old)
+ ctx := context.Background()
+ if old := c.Conn.SetInterrupt(ctx); old != ctx {
+ defer c.Conn.SetInterrupt(old)
+ }
return c.Conn.Exec(`ROLLBACK` + c.txReset)
}
@@ -393,8 +397,9 @@ func (c *conn) Prepare(query string) (driver.Stmt, error) {
}
func (c *conn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
- old := c.Conn.SetInterrupt(ctx)
- defer c.Conn.SetInterrupt(old)
+ if old := c.Conn.SetInterrupt(ctx); old != ctx {
+ defer c.Conn.SetInterrupt(old)
+ }
s, tail, err := c.Conn.Prepare(query)
if err != nil {
@@ -419,8 +424,9 @@ func (c *conn) ExecContext(ctx context.Context, query string, args []driver.Name
return resultRowsAffected(0), nil
}
- old := c.Conn.SetInterrupt(ctx)
- defer c.Conn.SetInterrupt(old)
+ if old := c.Conn.SetInterrupt(ctx); old != ctx {
+ defer c.Conn.SetInterrupt(old)
+ }
err := c.Conn.Exec(query)
if err != nil {
@@ -483,8 +489,10 @@ func (s *stmt) ExecContext(ctx context.Context, args []driver.NamedValue) (drive
return nil, err
}
- old := s.Stmt.Conn().SetInterrupt(ctx)
- defer s.Stmt.Conn().SetInterrupt(old)
+ c := s.Stmt.Conn()
+ if old := c.SetInterrupt(ctx); old != ctx {
+ defer c.SetInterrupt(old)
+ }
err = errors.Join(
s.Stmt.Exec(),
@@ -493,7 +501,7 @@ func (s *stmt) ExecContext(ctx context.Context, args []driver.NamedValue) (drive
return nil, err
}
- return newResult(s.Stmt.Conn()), nil
+ return newResult(c), nil
}
func (s *stmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
@@ -678,13 +686,14 @@ func (r *rows) scanType(index int) scantype {
func (r *rows) loadColumnMetadata() {
if r.nulls == nil {
+ c := r.Stmt.Conn()
count := r.Stmt.ColumnCount()
nulls := make([]bool, count)
types := make([]string, count)
scans := make([]scantype, count)
for i := range nulls {
if col := r.Stmt.ColumnOriginName(i); col != "" {
- types[i], _, nulls[i], _, _, _ = r.Stmt.Conn().TableColumnMetadata(
+ types[i], _, nulls[i], _, _, _ = c.TableColumnMetadata(
r.Stmt.ColumnDatabaseName(i),
r.Stmt.ColumnTableName(i),
col)
@@ -762,8 +771,10 @@ func (r *rows) ColumnTypeScanType(index int) (typ reflect.Type) {
}
func (r *rows) Next(dest []driver.Value) error {
- old := r.Stmt.Conn().SetInterrupt(r.ctx)
- defer r.Stmt.Conn().SetInterrupt(old)
+ c := r.Stmt.Conn()
+ if old := c.SetInterrupt(r.ctx); old != r.ctx {
+ defer c.SetInterrupt(old)
+ }
if !r.Stmt.Step() {
if err := r.Stmt.Err(); err != nil {
diff --git a/vendor/github.com/ncruces/go-sqlite3/embed/build.sh b/vendor/github.com/ncruces/go-sqlite3/embed/build.sh
index a6b21d366..88e12be31 100644
--- a/vendor/github.com/ncruces/go-sqlite3/embed/build.sh
+++ b/vendor/github.com/ncruces/go-sqlite3/embed/build.sh
@@ -12,7 +12,7 @@ trap 'rm -f sqlite3.tmp' EXIT
"$WASI_SDK/clang" --target=wasm32-wasi -std=c23 -g0 -O2 \
-Wall -Wextra -Wno-unused-parameter -Wno-unused-function \
-o sqlite3.wasm "$ROOT/sqlite3/main.c" \
- -I"$ROOT/sqlite3" \
+ -I"$ROOT/sqlite3/libc" -I"$ROOT/sqlite3" \
-mexec-model=reactor \
-msimd128 -mmutable-globals -mmultivalue \
-mbulk-memory -mreference-types \
@@ -27,7 +27,7 @@ trap 'rm -f sqlite3.tmp' EXIT
"$BINARYEN/wasm-ctor-eval" -g -c _initialize sqlite3.wasm -o sqlite3.tmp
"$BINARYEN/wasm-opt" -g --strip --strip-producers -c -O3 \
- sqlite3.tmp -o sqlite3.wasm \
+ sqlite3.tmp -o sqlite3.wasm --low-memory-unused \
--enable-simd --enable-mutable-globals --enable-multivalue \
--enable-bulk-memory --enable-reference-types \
--enable-nontrapping-float-to-int --enable-sign-ext \ No newline at end of file
diff --git a/vendor/github.com/ncruces/go-sqlite3/embed/exports.txt b/vendor/github.com/ncruces/go-sqlite3/embed/exports.txt
index 84d6102b8..1de4b3382 100644
--- a/vendor/github.com/ncruces/go-sqlite3/embed/exports.txt
+++ b/vendor/github.com/ncruces/go-sqlite3/embed/exports.txt
@@ -66,6 +66,7 @@ sqlite3_errmsg
sqlite3_error_offset
sqlite3_errstr
sqlite3_exec
+sqlite3_exec_go
sqlite3_expanded_sql
sqlite3_file_control
sqlite3_filename_database
diff --git a/vendor/github.com/ncruces/go-sqlite3/embed/sqlite3.wasm b/vendor/github.com/ncruces/go-sqlite3/embed/sqlite3.wasm
index 713fc0675..88ebe9932 100644
--- a/vendor/github.com/ncruces/go-sqlite3/embed/sqlite3.wasm
+++ b/vendor/github.com/ncruces/go-sqlite3/embed/sqlite3.wasm
Binary files differ
diff --git a/vendor/github.com/ncruces/go-sqlite3/error.go b/vendor/github.com/ncruces/go-sqlite3/error.go
index 59982eafd..83b057d0d 100644
--- a/vendor/github.com/ncruces/go-sqlite3/error.go
+++ b/vendor/github.com/ncruces/go-sqlite3/error.go
@@ -75,7 +75,7 @@ func (e *Error) As(err any) bool {
// Temporary returns true for [BUSY] errors.
func (e *Error) Temporary() bool {
- return e.Code() == BUSY
+ return e.Code() == BUSY || e.Code() == INTERRUPT
}
// Timeout returns true for [BUSY_TIMEOUT] errors.
diff --git a/vendor/github.com/ncruces/go-sqlite3/stmt.go b/vendor/github.com/ncruces/go-sqlite3/stmt.go
index 1ea726ea1..7fa2a504d 100644
--- a/vendor/github.com/ncruces/go-sqlite3/stmt.go
+++ b/vendor/github.com/ncruces/go-sqlite3/stmt.go
@@ -110,10 +110,7 @@ func (s *Stmt) Step() bool {
s.err = INTERRUPT
return false
}
- return s.step()
-}
-func (s *Stmt) step() bool {
rc := res_t(s.c.call("sqlite3_step", stk_t(s.handle)))
switch rc {
case _ROW:
@@ -141,10 +138,9 @@ func (s *Stmt) Exec() error {
if s.c.interrupt.Err() != nil {
return INTERRUPT
}
- // TODO: implement this in C.
- for s.step() {
- }
- return s.Reset()
+ rc := res_t(s.c.call("sqlite3_exec_go", stk_t(s.handle)))
+ s.err = nil
+ return s.c.error(rc)
}
// Status monitors the performance characteristics of prepared statements.
@@ -649,6 +645,7 @@ func (s *Stmt) ColumnValue(col int) Value {
// [FLOAT] as float64, [NULL] as nil,
// [TEXT] as string, and [BLOB] as []byte.
func (s *Stmt) Columns(dest ...any) error {
+ defer s.c.arena.mark()()
types, ptr, err := s.columns(int64(len(dest)))
if err != nil {
return err
@@ -701,6 +698,7 @@ func (s *Stmt) Columns(dest ...any) error {
// Any []byte are owned by SQLite and may be invalidated by
// subsequent calls to [Stmt] methods.
func (s *Stmt) ColumnsRaw(dest ...any) error {
+ defer s.c.arena.mark()()
types, ptr, err := s.columns(int64(len(dest)))
if err != nil {
return err
@@ -739,7 +737,6 @@ func (s *Stmt) ColumnsRaw(dest ...any) error {
}
func (s *Stmt) columns(count int64) ([]byte, ptr_t, error) {
- defer s.c.arena.mark()()
typePtr := s.c.arena.new(count)
dataPtr := s.c.arena.new(count * 8)
diff --git a/vendor/github.com/ncruces/go-sqlite3/util/sql3util/wasm/sql3parse_table.wasm b/vendor/github.com/ncruces/go-sqlite3/util/sql3util/wasm/sql3parse_table.wasm
index 4d3357ea1..fa8af14a0 100644
--- a/vendor/github.com/ncruces/go-sqlite3/util/sql3util/wasm/sql3parse_table.wasm
+++ b/vendor/github.com/ncruces/go-sqlite3/util/sql3util/wasm/sql3parse_table.wasm
Binary files differ
diff --git a/vendor/github.com/yuin/goldmark/ast/block.go b/vendor/github.com/yuin/goldmark/ast/block.go
index eae7acdca..d17564a99 100644
--- a/vendor/github.com/yuin/goldmark/ast/block.go
+++ b/vendor/github.com/yuin/goldmark/ast/block.go
@@ -515,6 +515,7 @@ func (n *HTMLBlock) Dump(source []byte, level int) {
cl := n.ClosureLine
fmt.Printf("%sClosure: \"%s\"\n", indent2, string(cl.Value(source)))
}
+ fmt.Printf("%sHasBlankPreviousLines: %v\n", indent2, n.HasBlankPreviousLines())
fmt.Printf("%s}\n", indent)
}
diff --git a/vendor/github.com/yuin/goldmark/parser/parser.go b/vendor/github.com/yuin/goldmark/parser/parser.go
index b05db1356..7ec40b31a 100644
--- a/vendor/github.com/yuin/goldmark/parser/parser.go
+++ b/vendor/github.com/yuin/goldmark/parser/parser.go
@@ -1096,7 +1096,7 @@ func (p *parser) parseBlocks(parent ast.Node, reader text.Reader, pc Context) {
// When current node is a container block and has no children,
// we try to open new child nodes
if state&HasChildren != 0 && i == lastIndex {
- isBlank = isBlankLine(lineNum-1, i, blankLines)
+ isBlank = isBlankLine(lineNum-1, i+1, blankLines)
p.openBlocks(be.Node, isBlank, reader, pc)
break
}