diff options
Diffstat (limited to 'vendor/github.com/minio/minio-go/v7/pkg')
| -rw-r--r-- | vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go | 45 | ||||
| -rw-r--r-- | vendor/github.com/minio/minio-go/v7/pkg/utils/peek-reader-closer.go | 73 |
2 files changed, 111 insertions, 7 deletions
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go index 55636ad48..2f7993f4b 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go @@ -730,6 +730,8 @@ type Metrics struct { Errors TimedErrStats `json:"failed,omitempty"` // Total number of entries that are queued for replication QStats InQueueMetric `json:"queued"` + // Total number of entries that have replication in progress + InProgress InProgressMetric `json:"inProgress"` // Deprecated fields // Total Pending size in bytes across targets PendingSize uint64 `json:"pendingReplicationSize,omitempty"` @@ -830,6 +832,9 @@ type InQueueMetric struct { Max QStat `json:"peak" msg:"pq"` } +// InProgressMetric holds stats for objects with replication in progress +type InProgressMetric InQueueMetric + // MetricName name of replication metric type MetricName string @@ -849,6 +854,14 @@ type WorkerStat struct { Max int32 `json:"max"` } +// TgtHealth holds health status of a target +type TgtHealth struct { + Online bool `json:"online"` + LastOnline time.Time `json:"lastOnline"` + TotalDowntime time.Duration `json:"totalDowntime"` + OfflineCount int64 `json:"offlineCount"` +} + // ReplMRFStats holds stats of MRF backlog saved to disk in the last 5 minutes // and number of entries that failed replication after 3 retries type ReplMRFStats struct { @@ -863,15 +876,18 @@ type ReplMRFStats struct { type ReplQNodeStats struct { NodeName string `json:"nodeName"` Uptime int64 `json:"uptime"` - Workers WorkerStat `json:"activeWorkers"` + Workers WorkerStat `json:"workers"` XferStats map[MetricName]XferStats `json:"transferSummary"` TgtXferStats map[string]map[MetricName]XferStats `json:"tgtTransferStats"` - QStats InQueueMetric `json:"queueStats"` - MRFStats ReplMRFStats `json:"mrfStats"` - Retries CounterSummary `json:"retries"` - Errors CounterSummary `json:"errors"` + QStats InQueueMetric `json:"queueStats"` + InProgressStats InProgressMetric `json:"progressStats"` + + MRFStats ReplMRFStats `json:"mrfStats"` + Retries CounterSummary `json:"retries"` + Errors CounterSummary `json:"errors"` + TgtHealth map[string]TgtHealth `json:"tgtHealth,omitempty"` } // CounterSummary denotes the stats counter summary @@ -918,6 +934,19 @@ func (q ReplQueueStats) qStatSummary() InQueueMetric { return m } +// inProgressSummary returns cluster level stats for objects with replication in progress +func (q ReplQueueStats) inProgressSummary() InProgressMetric { + m := InProgressMetric{} + for _, v := range q.Nodes { + m.Avg.Add(v.InProgressStats.Avg) + m.Curr.Add(v.InProgressStats.Curr) + if m.Max.Count < v.InProgressStats.Max.Count { + m.Max.Add(v.InProgressStats.Max) + } + } + return m +} + // ReplQStats holds stats for objects in replication queue type ReplQStats struct { Uptime int64 `json:"uptime"` @@ -926,7 +955,9 @@ type ReplQStats struct { XferStats map[MetricName]XferStats `json:"xferStats"` TgtXferStats map[string]map[MetricName]XferStats `json:"tgtXferStats"` - QStats InQueueMetric `json:"qStats"` + QStats InQueueMetric `json:"qStats"` + InProgressStats InProgressMetric `json:"progressStats"` + MRFStats ReplMRFStats `json:"mrfStats"` Retries CounterSummary `json:"retries"` Errors CounterSummary `json:"errors"` @@ -935,10 +966,10 @@ type ReplQStats struct { // QStats returns cluster level stats for objects in replication queue func (q ReplQueueStats) QStats() (r ReplQStats) { r.QStats = q.qStatSummary() + r.InProgressStats = q.inProgressSummary() r.XferStats = make(map[MetricName]XferStats) r.TgtXferStats = make(map[string]map[MetricName]XferStats) r.Workers = q.Workers() - for _, node := range q.Nodes { for arn := range node.TgtXferStats { xmap, ok := node.TgtXferStats[arn] diff --git a/vendor/github.com/minio/minio-go/v7/pkg/utils/peek-reader-closer.go b/vendor/github.com/minio/minio-go/v7/pkg/utils/peek-reader-closer.go new file mode 100644 index 000000000..d6f674fac --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/utils/peek-reader-closer.go @@ -0,0 +1,73 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2025 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package utils + +import ( + "bytes" + "errors" + "io" +) + +// PeekReadCloser offers a way to peek a ReadCloser stream and then +// return the exact stream of the underlying ReadCloser +type PeekReadCloser struct { + io.ReadCloser + + recordMode bool + recordMaxBuf int + recordBuf *bytes.Buffer +} + +// ReplayFromStart ensures next Read() will restart to stream the +// underlying ReadCloser stream from the beginning +func (prc *PeekReadCloser) ReplayFromStart() { + prc.recordMode = false +} + +func (prc *PeekReadCloser) Read(p []byte) (int, error) { + if prc.recordMode { + if prc.recordBuf.Len() > prc.recordMaxBuf { + return 0, errors.New("maximum peek buffer exceeded") + } + n, err := prc.ReadCloser.Read(p) + prc.recordBuf.Write(p[:n]) + return n, err + } + // Replay mode + if prc.recordBuf.Len() > 0 { + pn, _ := prc.recordBuf.Read(p) + return pn, nil + } + return prc.ReadCloser.Read(p) +} + +// Close releases the record buffer memory and close the underlying ReadCloser +func (prc *PeekReadCloser) Close() error { + prc.recordBuf.Reset() + return prc.ReadCloser.Close() +} + +// NewPeekReadCloser returns a new peek reader +func NewPeekReadCloser(rc io.ReadCloser, maxBufSize int) *PeekReadCloser { + return &PeekReadCloser{ + ReadCloser: rc, + recordMode: true, // recording mode by default + recordBuf: bytes.NewBuffer(make([]byte, 0, 1024)), + recordMaxBuf: maxBufSize, + } +} |
