summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLibravatar Daenney <daenney@users.noreply.github.com>2024-03-11 15:34:34 +0100
committerLibravatar GitHub <noreply@github.com>2024-03-11 15:34:34 +0100
commit5e871e81a87a638b07d540c15d1b95608843255d (patch)
tree62db65c7de651bac3d8894f4f70e0fe8de853a5e
parent[chore]: Bump github.com/minio/minio-go/v7 from 7.0.67 to 7.0.69 (#2748) (diff)
downloadgotosocial-5e871e81a87a638b07d540c15d1b95608843255d.tar.xz
[chore] Update usage of OTEL libraries (#2725)
* otel to 1.24 * prometheus exporter to 0.46 * bunotel to 1.1.17 Also: * Use schemaless URL for metrics * Add software version to tracing schema
-rw-r--r--go.mod32
-rw-r--r--go.sum70
-rw-r--r--internal/metrics/metrics.go9
-rw-r--r--internal/tracing/tracing.go5
-rw-r--r--vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE (renamed from vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE.txt)0
-rw-r--r--vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel2
-rw-r--r--vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel8
-rw-r--r--vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go4
-rw-r--r--vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go2
-rw-r--r--vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go26
-rw-r--r--vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go2
-rw-r--r--vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go32
-rw-r--r--vendor/github.com/prometheus/client_model/go/metrics.pb.go195
-rw-r--r--vendor/go.opentelemetry.io/otel/.codespellignore2
-rw-r--r--vendor/go.opentelemetry.io/otel/.gitignore4
-rw-r--r--vendor/go.opentelemetry.io/otel/CHANGELOG.md138
-rw-r--r--vendor/go.opentelemetry.io/otel/CODEOWNERS2
-rw-r--r--vendor/go.opentelemetry.io/otel/CONTRIBUTING.md27
-rw-r--r--vendor/go.opentelemetry.io/otel/Makefile4
-rw-r--r--vendor/go.opentelemetry.io/otel/README.md22
-rw-r--r--vendor/go.opentelemetry.io/otel/RELEASING.md6
-rw-r--r--vendor/go.opentelemetry.io/otel/attribute/set.go89
-rw-r--r--vendor/go.opentelemetry.io/otel/baggage/baggage.go330
-rw-r--r--vendor/go.opentelemetry.io/otel/doc.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go20
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go34
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go5
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go4
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go20
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go39
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go27
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go82
-rw-r--r--vendor/go.opentelemetry.io/otel/internal/global/meter.go4
-rw-r--r--vendor/go.opentelemetry.io/otel/internal/global/state.go6
-rw-r--r--vendor/go.opentelemetry.io/otel/propagation/trace_context.go94
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/cache.go40
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/doc.go3
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go96
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go37
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go45
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go23
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go26
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go31
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go53
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go62
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go17
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go36
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go40
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go47
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go195
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go44
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go107
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md112
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go96
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/meter.go323
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go14
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go32
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/version.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/resource/auto.go25
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/resource/container.go4
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/resource/env.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/resource/os.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/resource/process.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/resource/resource.go79
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/provider.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/span.go19
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/version.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go4
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go4398
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go20
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go211
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go20
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go2556
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go20
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go1334
-rw-r--r--vendor/go.opentelemetry.io/otel/trace/tracestate.go197
-rw-r--r--vendor/go.opentelemetry.io/otel/version.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/versions.yaml23
-rw-r--r--vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go367
-rw-r--r--vendor/google.golang.org/grpc/balancer_wrapper.go (renamed from vendor/google.golang.org/grpc/balancer_conn_wrappers.go)304
-rw-r--r--vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go2
-rw-r--r--vendor/google.golang.org/grpc/clientconn.go518
-rw-r--r--vendor/google.golang.org/grpc/codes/codes.go8
-rw-r--r--vendor/google.golang.org/grpc/credentials/tls.go75
-rw-r--r--vendor/google.golang.org/grpc/dialoptions.go48
-rw-r--r--vendor/google.golang.org/grpc/internal/buffer/unbounded.go41
-rw-r--r--vendor/google.golang.org/grpc/internal/channelz/funcs.go7
-rw-r--r--vendor/google.golang.org/grpc/internal/envconfig/envconfig.go3
-rw-r--r--vendor/google.golang.org/grpc/internal/envconfig/xds.go39
-rw-r--r--vendor/google.golang.org/grpc/internal/experimental.go28
-rw-r--r--vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go51
-rw-r--r--vendor/google.golang.org/grpc/internal/idle/idle.go175
-rw-r--r--vendor/google.golang.org/grpc/internal/internal.go29
-rw-r--r--vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go69
-rw-r--r--vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go70
-rw-r--r--vendor/google.golang.org/grpc/internal/resolver/unix/unix.go4
-rw-r--r--vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go29
-rw-r--r--vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go54
-rw-r--r--vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go54
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/controlbuf.go5
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/handler_server.go65
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/http2_client.go30
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/http2_server.go135
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/proxy.go14
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/transport.go22
-rw-r--r--vendor/google.golang.org/grpc/metadata/metadata.go31
-rw-r--r--vendor/google.golang.org/grpc/peer/peer.go2
-rw-r--r--vendor/google.golang.org/grpc/picker_wrapper.go21
-rw-r--r--vendor/google.golang.org/grpc/pickfirst.go14
-rw-r--r--vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go36
-rw-r--r--vendor/google.golang.org/grpc/resolver/map.go113
-rw-r--r--vendor/google.golang.org/grpc/resolver/resolver.go20
-rw-r--r--vendor/google.golang.org/grpc/resolver_conn_wrapper.go247
-rw-r--r--vendor/google.golang.org/grpc/resolver_wrapper.go197
-rw-r--r--vendor/google.golang.org/grpc/rpc_util.go8
-rw-r--r--vendor/google.golang.org/grpc/server.go264
-rw-r--r--vendor/google.golang.org/grpc/stream.go4
-rw-r--r--vendor/google.golang.org/grpc/version.go2
-rw-r--r--vendor/google.golang.org/grpc/vet.sh166
-rw-r--r--vendor/modules.txt39
126 files changed, 12923 insertions, 2250 deletions
diff --git a/go.mod b/go.mod
index 1dbd65162..3d36ec4d5 100644
--- a/go.mod
+++ b/go.mod
@@ -57,17 +57,17 @@ require (
github.com/uptrace/bun v1.1.17
github.com/uptrace/bun/dialect/pgdialect v1.1.17
github.com/uptrace/bun/dialect/sqlitedialect v1.1.17
- github.com/uptrace/bun/extra/bunotel v1.1.16
+ github.com/uptrace/bun/extra/bunotel v1.1.17
github.com/wagslane/go-password-validator v0.3.0
github.com/yuin/goldmark v1.7.0
- go.opentelemetry.io/otel v1.20.0
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.20.0
- go.opentelemetry.io/otel/exporters/prometheus v0.43.0
- go.opentelemetry.io/otel/metric v1.20.0
- go.opentelemetry.io/otel/sdk v1.20.0
- go.opentelemetry.io/otel/sdk/metric v1.20.0
- go.opentelemetry.io/otel/trace v1.20.0
+ go.opentelemetry.io/otel v1.24.0
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.24.0
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0
+ go.opentelemetry.io/otel/exporters/prometheus v0.46.0
+ go.opentelemetry.io/otel/metric v1.24.0
+ go.opentelemetry.io/otel/sdk v1.24.0
+ go.opentelemetry.io/otel/sdk/metric v1.24.0
+ go.opentelemetry.io/otel/trace v1.24.0
go.uber.org/automaxprocs v1.5.3
golang.org/x/crypto v0.21.0
golang.org/x/image v0.15.0
@@ -142,7 +142,7 @@ require (
github.com/gorilla/handlers v1.5.1 // indirect
github.com/gorilla/securecookie v1.1.1 // indirect
github.com/gorilla/sessions v1.2.1 // indirect
- github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/huandu/xstrings v1.3.3 // indirect
github.com/imdario/mergo v0.3.12 // indirect
@@ -176,7 +176,7 @@ require (
github.com/pelletier/go-toml/v2 v2.1.1 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
- github.com/prometheus/client_model v0.5.0 // indirect
+ github.com/prometheus/client_model v0.6.0 // indirect
github.com/prometheus/common v0.45.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b // indirect
@@ -204,8 +204,8 @@ require (
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
github.com/zeebo/xxh3 v1.0.2 // indirect
go.mongodb.org/mongo-driver v1.11.3 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0 // indirect
- go.opentelemetry.io/proto/otlp v1.0.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0 // indirect
+ go.opentelemetry.io/proto/otlp v1.1.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/arch v0.7.0 // indirect
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 // indirect
@@ -214,9 +214,9 @@ require (
golang.org/x/sys v0.18.0 // indirect
golang.org/x/tools v0.17.0 // indirect
google.golang.org/appengine v1.6.8 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect
- google.golang.org/grpc v1.59.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 // indirect
+ google.golang.org/grpc v1.61.1 // indirect
google.golang.org/protobuf v1.33.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
diff --git a/go.sum b/go.sum
index f7678788d..cc04a1010 100644
--- a/go.sum
+++ b/go.sum
@@ -312,8 +312,6 @@ github.com/golang/geo v0.0.0-20200319012246-673a6f80352d/go.mod h1:QZ0nwyI2jOfgR
github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 h1:gtexQ/VGyN+VVFRXSFiguSNcXmS6rkKT+X7FdIrTtfo=
github.com/golang/geo v0.0.0-20210211234256-740aa86cb551/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
-github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -398,8 +396,8 @@ github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/z
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=
github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0/go.mod h1:qmOFXW2epJhM0qSnUUYpldc7gVz2KMQwJ/QYCDIa7XU=
github.com/h2non/filetype v1.1.3 h1:FKkx9QbD7HR/zjK1Ia5XiBsq9zdLi5Kf3zGyFTAFkGg=
github.com/h2non/filetype v1.1.3/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
@@ -551,8 +549,8 @@ github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P
github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk=
github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
-github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
+github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos=
+github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8=
github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM=
github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
@@ -681,8 +679,8 @@ github.com/uptrace/bun/dialect/pgdialect v1.1.17 h1:NsvFVHAx1Az6ytlAD/B6ty3cVE6j
github.com/uptrace/bun/dialect/pgdialect v1.1.17/go.mod h1:fLBDclNc7nKsZLzNjFL6BqSdgJzbj2HdnyOnLoDvAME=
github.com/uptrace/bun/dialect/sqlitedialect v1.1.17 h1:i8NFU9r8YuavNFaYlNqi4ppn+MgoHtqLgpWQDrVTjm0=
github.com/uptrace/bun/dialect/sqlitedialect v1.1.17/go.mod h1:YF0FO4VVnY9GHNH6rM4r3STlVEBxkOc6L88Bm5X5mzA=
-github.com/uptrace/bun/extra/bunotel v1.1.16 h1:qkLTaTZK3FZk3b2P/stO/krS7KX9Fq5wSOj7Hlb2HG8=
-github.com/uptrace/bun/extra/bunotel v1.1.16/go.mod h1:JwEH0kdXFnzYuK8D6eXUrf9HKsYy5wmB+lqQ/+dvH4E=
+github.com/uptrace/bun/extra/bunotel v1.1.17 h1:RLEJdHH06RI9BLg06Vu1JHJ3KNHQCfwa2Fa3x+56qkk=
+github.com/uptrace/bun/extra/bunotel v1.1.17/go.mod h1:xV7AYrCFji4Sio6N9X+Cz+XJ+JuHq6TQQjuxaVbsypk=
github.com/uptrace/opentelemetry-go-extra/otelsql v0.2.3 h1:LNi0Qa7869/loPjz2kmMvp/jwZZnMZ9scMJKhDJ1DIo=
github.com/uptrace/opentelemetry-go-extra/otelsql v0.2.3/go.mod h1:jyigonKik3C5V895QNiAGpKYKEvFuqjw9qAEZks1mUg=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
@@ -736,26 +734,26 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc=
-go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0 h1:DeFD0VgTZ+Cj6hxravYYZE2W4GlneVH81iAOPjZkzk8=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0/go.mod h1:GijYcYmNpX1KazD5JmWGsi4P7dDTTTnfv1UbGn84MnU=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0 h1:gvmNvqrPYovvyRmCSygkUDyL8lC5Tl845MLEwqpxhEU=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0/go.mod h1:vNUq47TGFioo+ffTSnKNdob241vePmtNZnAODKapKd0=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.20.0 h1:CsBiKCiQPdSjS+MlRiqeTI9JDDpSuk0Hb6QTRfwer8k=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.20.0/go.mod h1:CMJYNAfooOwSZSAmAeMUV1M+TXld3BiK++z9fqIm2xk=
-go.opentelemetry.io/otel/exporters/prometheus v0.43.0 h1:Skkl6akzvdWweXX6LLAY29tyFSO6hWZ26uDbVGTDXe8=
-go.opentelemetry.io/otel/exporters/prometheus v0.43.0/go.mod h1:nZStMoc1H/YJpRjSx9IEX4abBMekORTLQcTUT1CgLkg=
-go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA=
-go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM=
-go.opentelemetry.io/otel/sdk v1.20.0 h1:5Jf6imeFZlZtKv9Qbo6qt2ZkmWtdWx/wzcCbNUlAWGM=
-go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0=
-go.opentelemetry.io/otel/sdk/metric v1.20.0 h1:5eD40l/H2CqdKmbSV7iht2KMK0faAIL2pVYzJOWobGk=
-go.opentelemetry.io/otel/sdk/metric v1.20.0/go.mod h1:AGvpC+YF/jblITiafMTYgvRBUiwi9hZf0EYE2E5XlS8=
-go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ=
-go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU=
-go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
-go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
+go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
+go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0 h1:t6wl9SPayj+c7lEIFgm4ooDBZVb01IhLB4InpomhRw8=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0/go.mod h1:iSDOcsnSA5INXzZtwaBPrKp/lWu/V14Dd+llD0oI2EA=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.24.0 h1:Mw5xcxMwlqoJd97vwPxA8isEaIoxsta9/Q51+TTJLGE=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.24.0/go.mod h1:CQNu9bj7o7mC6U7+CA/schKEYakYXWr79ucDHTMGhCM=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 h1:Xw8U6u2f8DK2XAkGRFV7BBLENgnTGX9i4rQRxJf+/vs=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0/go.mod h1:6KW1Fm6R/s6Z3PGXwSJN2K4eT6wQB3vXX6CVnYX9NmM=
+go.opentelemetry.io/otel/exporters/prometheus v0.46.0 h1:I8WIFXR351FoLJYuloU4EgXbtNX2URfU/85pUPheIEQ=
+go.opentelemetry.io/otel/exporters/prometheus v0.46.0/go.mod h1:ztwVUHe5DTR/1v7PeuGRnU5Bbd4QKYwApWmuutKsJSs=
+go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI=
+go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
+go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw=
+go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg=
+go.opentelemetry.io/otel/sdk/metric v1.24.0 h1:yyMQrPzF+k88/DbH7o4FMAs80puqd+9osbiBrJrz/w8=
+go.opentelemetry.io/otel/sdk/metric v1.24.0/go.mod h1:I6Y5FjH6rvEnTTAYQz3Mmv2kl6Ek5IIrmwTLqMrrOE0=
+go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
+go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
+go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI=
+go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY=
go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8=
go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
@@ -1063,12 +1061,12 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ=
-google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:J7XzRzVy1+IPwWHZUzoD0IccYZIrXILAQpc+Qy9CMhY=
-google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 h1:JpwMPBpFN3uKhdaekDpiNlImDdkUAyiJ6ez/uxGaUSo=
-google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f h1:ultW7fxlIvee4HYrtnaRPon9HpEgFk5zYpmfMgtKB5I=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc=
+google.golang.org/genproto v0.0.0-20231212172506-995d672761c0 h1:YJ5pD9rF8o9Qtta0Cmy9rdBwkSjrTCT6XTiUQVOtIos=
+google.golang.org/genproto v0.0.0-20231212172506-995d672761c0/go.mod h1:l/k7rMz0vFTBPy+tFSGvXEd3z+BcoG1k7EHbqm+YBsY=
+google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917 h1:rcS6EyEaoCO52hQDupoSfrxI3R6C2Tq741is7X8OvnM=
+google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917/go.mod h1:CmlNWB9lSezaYELKS5Ym1r44VrrbPUa7JTvw+6MbpJ0=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 h1:6G8oQ016D88m1xAKljMlBOOGWDZkes4kMhgGFlf8WcQ=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917/go.mod h1:xtjpI3tXFPP051KaWnhvxkiubL/6dJ18vLVf7q2pTOU=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@@ -1081,8 +1079,8 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
-google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
+google.golang.org/grpc v1.61.1 h1:kLAiWrZs7YeDM6MumDe7m3y4aM6wacLzM1Y/wiLP9XY=
+google.golang.org/grpc v1.61.1/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
diff --git a/internal/metrics/metrics.go b/internal/metrics/metrics.go
index 269974623..e22a67dad 100644
--- a/internal/metrics/metrics.go
+++ b/internal/metrics/metrics.go
@@ -34,7 +34,7 @@ import (
"go.opentelemetry.io/otel/metric"
sdk "go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/sdk/resource"
- semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
)
const (
@@ -42,7 +42,6 @@ const (
)
func Initialize(db db.DB) error {
-
if !config.GetMetricsEnabled() {
return nil
}
@@ -55,8 +54,7 @@ func Initialize(db db.DB) error {
r, _ := resource.Merge(
resource.Default(),
- resource.NewWithAttributes(
- semconv.SchemaURL,
+ resource.NewSchemaless(
semconv.ServiceName(serviceName),
semconv.ServiceVersion(config.GetSoftwareVersion()),
),
@@ -90,7 +88,6 @@ func Initialize(db db.DB) error {
return nil
}),
)
-
if err != nil {
return err
}
@@ -107,7 +104,6 @@ func Initialize(db db.DB) error {
return nil
}),
)
-
if err != nil {
return err
}
@@ -124,7 +120,6 @@ func Initialize(db db.DB) error {
return nil
}),
)
-
if err != nil {
return err
}
diff --git a/internal/tracing/tracing.go b/internal/tracing/tracing.go
index 4f37ccd09..dd1e19be9 100644
--- a/internal/tracing/tracing.go
+++ b/internal/tracing/tracing.go
@@ -35,7 +35,7 @@ import (
"go.opentelemetry.io/otel/sdk/resource"
"go.opentelemetry.io/otel/sdk/trace"
"go.opentelemetry.io/otel/semconv/v1.20.0/httpconv"
- semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
oteltrace "go.opentelemetry.io/otel/trace"
"github.com/superseriousbusiness/gotosocial/internal/config"
@@ -88,6 +88,7 @@ func Initialize() error {
resource.Default(),
resource.NewSchemaless(
semconv.ServiceName("GoToSocial"),
+ semconv.ServiceVersion(config.GetSoftwareVersion()),
),
)
if err != nil {
@@ -164,7 +165,7 @@ func InstrumentGin() gin.HandlerFunc {
status := c.Writer.Status()
span.SetStatus(httpconv.ServerStatus(status))
if status > 0 {
- span.SetAttributes(semconv.HTTPStatusCode(status))
+ span.SetAttributes(semconv.HTTPResponseStatusCode(status))
}
if len(c.Errors) > 0 {
span.SetAttributes(attribute.String("gin.errors", c.Errors.String()))
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE.txt b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE
index 364516251..364516251 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE.txt
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel
index f694f3c0d..b8fbb2b77 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel
@@ -24,7 +24,7 @@ go_test(
embed = [":httprule"],
deps = [
"//utilities",
- "@com_github_golang_glog//:glog",
+ "@org_golang_google_grpc//grpclog",
],
)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
index a8789f170..78d7c9f5c 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
@@ -26,7 +26,7 @@ go_library(
deps = [
"//internal/httprule",
"//utilities",
- "@go_googleapis//google/api:httpbody_go_proto",
+ "@org_golang_google_genproto_googleapis_api//httpbody",
"@org_golang_google_grpc//codes",
"@org_golang_google_grpc//grpclog",
"@org_golang_google_grpc//health/grpc_health_v1",
@@ -70,9 +70,9 @@ go_test(
"//utilities",
"@com_github_google_go_cmp//cmp",
"@com_github_google_go_cmp//cmp/cmpopts",
- "@go_googleapis//google/api:httpbody_go_proto",
- "@go_googleapis//google/rpc:errdetails_go_proto",
- "@go_googleapis//google/rpc:status_go_proto",
+ "@org_golang_google_genproto_googleapis_api//httpbody",
+ "@org_golang_google_genproto_googleapis_rpc//errdetails",
+ "@org_golang_google_genproto_googleapis_rpc//status",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//codes",
"@org_golang_google_grpc//health/grpc_health_v1",
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
index d2bcbb7d2..230cac7b8 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
@@ -137,7 +137,7 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh
doForwardTrailers := requestAcceptsTrailers(r)
if doForwardTrailers {
- handleForwardResponseTrailerHeader(w, md)
+ handleForwardResponseTrailerHeader(w, mux, md)
w.Header().Set("Transfer-Encoding", "chunked")
}
@@ -152,7 +152,7 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh
}
if doForwardTrailers {
- handleForwardResponseTrailer(w, md)
+ handleForwardResponseTrailer(w, mux, md)
}
}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
index a03dd166b..19d9d37ff 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
@@ -27,7 +27,7 @@ func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.Field
var root interface{}
if err := json.NewDecoder(r).Decode(&root); err != nil {
- if err == io.EOF {
+ if errors.Is(err, io.EOF) {
return fm, nil
}
return nil, err
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
index 945f3a5eb..5e14cf8b0 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
@@ -2,7 +2,7 @@ package runtime
import (
"context"
- "fmt"
+ "errors"
"io"
"net/http"
"net/textproto"
@@ -48,7 +48,7 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal
var wroteHeader bool
for {
resp, err := recv()
- if err == io.EOF {
+ if errors.Is(err, io.EOF) {
return
}
if err != nil {
@@ -108,18 +108,20 @@ func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, m
}
}
-func handleForwardResponseTrailerHeader(w http.ResponseWriter, md ServerMetadata) {
+func handleForwardResponseTrailerHeader(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) {
for k := range md.TrailerMD {
- tKey := textproto.CanonicalMIMEHeaderKey(fmt.Sprintf("%s%s", MetadataTrailerPrefix, k))
- w.Header().Add("Trailer", tKey)
+ if h, ok := mux.outgoingTrailerMatcher(k); ok {
+ w.Header().Add("Trailer", textproto.CanonicalMIMEHeaderKey(h))
+ }
}
}
-func handleForwardResponseTrailer(w http.ResponseWriter, md ServerMetadata) {
+func handleForwardResponseTrailer(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) {
for k, vs := range md.TrailerMD {
- tKey := fmt.Sprintf("%s%s", MetadataTrailerPrefix, k)
- for _, v := range vs {
- w.Header().Add(tKey, v)
+ if h, ok := mux.outgoingTrailerMatcher(k); ok {
+ for _, v := range vs {
+ w.Header().Add(h, v)
+ }
}
}
}
@@ -147,12 +149,10 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha
doForwardTrailers := requestAcceptsTrailers(req)
if doForwardTrailers {
- handleForwardResponseTrailerHeader(w, md)
+ handleForwardResponseTrailerHeader(w, mux, md)
w.Header().Set("Transfer-Encoding", "chunked")
}
- handleForwardResponseTrailerHeader(w, md)
-
contentType := marshaler.ContentType(resp)
w.Header().Set("Content-Type", contentType)
@@ -178,7 +178,7 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha
}
if doForwardTrailers {
- handleForwardResponseTrailer(w, md)
+ handleForwardResponseTrailer(w, mux, md)
}
}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go
index b86135c88..6de2e220c 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go
@@ -26,7 +26,7 @@ func (h *HTTPBodyMarshaler) ContentType(v interface{}) string {
// google.api.HttpBody message, otherwise it falls back to the default Marshaler.
func (h *HTTPBodyMarshaler) Marshal(v interface{}) ([]byte, error) {
if httpBody, ok := v.(*httpbody.HttpBody); ok {
- return httpBody.Data, nil
+ return httpBody.GetData(), nil
}
return h.Marshaler.Marshal(v)
}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
index f451cb441..628e1fde1 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
@@ -57,6 +57,7 @@ type ServeMux struct {
marshalers marshalerRegistry
incomingHeaderMatcher HeaderMatcherFunc
outgoingHeaderMatcher HeaderMatcherFunc
+ outgoingTrailerMatcher HeaderMatcherFunc
metadataAnnotators []func(context.Context, *http.Request) metadata.MD
errorHandler ErrorHandlerFunc
streamErrorHandler StreamErrorHandlerFunc
@@ -114,10 +115,18 @@ func DefaultHeaderMatcher(key string) (string, bool) {
return "", false
}
+func defaultOutgoingHeaderMatcher(key string) (string, bool) {
+ return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true
+}
+
+func defaultOutgoingTrailerMatcher(key string) (string, bool) {
+ return fmt.Sprintf("%s%s", MetadataTrailerPrefix, key), true
+}
+
// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway.
//
// This matcher will be called with each header in http.Request. If matcher returns true, that header will be
-// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header.
+// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return the modified header.
func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
for _, header := range fn.matchedMalformedHeaders() {
grpclog.Warningf("The configured forwarding filter would allow %q to be sent to the gRPC server, which will likely cause errors. See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more information.", header)
@@ -147,13 +156,24 @@ func (fn HeaderMatcherFunc) matchedMalformedHeaders() []string {
//
// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be
// passed to http response returned from gateway. To transform the header before passing to response,
-// matcher should return modified header.
+// matcher should return the modified header.
func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
return func(mux *ServeMux) {
mux.outgoingHeaderMatcher = fn
}
}
+// WithOutgoingTrailerMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway.
+//
+// This matcher will be called with each header in response trailer metadata. If matcher returns true, that header will be
+// passed to http response returned from gateway. To transform the header before passing to response,
+// matcher should return the modified header.
+func WithOutgoingTrailerMatcher(fn HeaderMatcherFunc) ServeMuxOption {
+ return func(mux *ServeMux) {
+ mux.outgoingTrailerMatcher = fn
+ }
+}
+
// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context.
//
// This can be used by services that need to read from http.Request and modify gRPC context. A common use case
@@ -273,11 +293,11 @@ func NewServeMux(opts ...ServeMuxOption) *ServeMux {
if serveMux.incomingHeaderMatcher == nil {
serveMux.incomingHeaderMatcher = DefaultHeaderMatcher
}
-
if serveMux.outgoingHeaderMatcher == nil {
- serveMux.outgoingHeaderMatcher = func(key string) (string, bool) {
- return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true
- }
+ serveMux.outgoingHeaderMatcher = defaultOutgoingHeaderMatcher
+ }
+ if serveMux.outgoingTrailerMatcher == nil {
+ serveMux.outgoingTrailerMatcher = defaultOutgoingTrailerMatcher
}
return serveMux
diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
index cee360db7..2f1549075 100644
--- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go
+++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
@@ -483,6 +483,8 @@ type Histogram struct {
// histograms.
PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` // Absolute count of each bucket.
+ // Only used for native histograms. These exemplars MUST have a timestamp.
+ Exemplars []*Exemplar `protobuf:"bytes,16,rep,name=exemplars" json:"exemplars,omitempty"`
}
func (x *Histogram) Reset() {
@@ -622,6 +624,13 @@ func (x *Histogram) GetPositiveCount() []float64 {
return nil
}
+func (x *Histogram) GetExemplars() []*Exemplar {
+ if x != nil {
+ return x.Exemplars
+ }
+ return nil
+}
+
// A Bucket of a conventional histogram, each of which is treated as
// an individual counter-like time series by Prometheus.
type Bucket struct {
@@ -923,6 +932,7 @@ type MetricFamily struct {
Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
+ Unit *string `protobuf:"bytes,5,opt,name=unit" json:"unit,omitempty"`
}
func (x *MetricFamily) Reset() {
@@ -985,6 +995,13 @@ func (x *MetricFamily) GetMetric() []*Metric {
return nil
}
+func (x *MetricFamily) GetUnit() string {
+ if x != nil && x.Unit != nil {
+ return *x.Unit
+ }
+ return ""
+}
+
var File_io_prometheus_client_metrics_proto protoreflect.FileDescriptor
var file_io_prometheus_client_metrics_proto_rawDesc = []byte{
@@ -1028,7 +1045,7 @@ var file_io_prometheus_client_metrics_proto_rawDesc = []byte{
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64,
0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74,
0x79, 0x70, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xac, 0x05, 0x0a, 0x09, 0x48,
+ 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xea, 0x05, 0x0a, 0x09, 0x48,
0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70,
0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b,
0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73,
@@ -1071,79 +1088,84 @@ var file_io_prometheus_client_metrics_proto_rawDesc = []byte{
0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c,
0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63,
0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69,
- 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x42, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69,
- 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f,
- 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12,
- 0x34, 0x0a, 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f,
- 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52,
- 0x14, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74,
- 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, 0x65, 0x72, 0x5f, 0x62,
- 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x70, 0x65,
- 0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c,
- 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72,
- 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e,
- 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c,
- 0x61, 0x72, 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e,
- 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11,
- 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67,
- 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68,
- 0x22, 0x91, 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x35, 0x0a,
+ 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x09, 0x65, 0x78, 0x65,
+ 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69,
+ 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
+ 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x09, 0x65, 0x78,
+ 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65,
+ 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x63, 0x75,
+ 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a,
+ 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e,
+ 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x14, 0x63,
+ 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c,
+ 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, 0x65, 0x72, 0x5f, 0x62, 0x6f, 0x75,
+ 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x70, 0x65, 0x72, 0x42,
+ 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d,
+ 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78,
+ 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72,
+ 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x16,
+ 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06,
+ 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x22, 0x91,
+ 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x35, 0x0a, 0x05, 0x6c,
+ 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e,
+ 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e,
+ 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62,
+ 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+ 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x35, 0x0a,
0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69,
0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c,
- 0x61, 0x62, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69,
- 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73,
- 0x74, 0x61, 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12,
- 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f,
- 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63,
- 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52,
- 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
- 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x61, 0x75,
- 0x67, 0x65, 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x75,
- 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e,
+ 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
+ 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x61, 0x75, 0x67, 0x65,
+ 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74,
+ 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72,
+ 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e,
+ 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72,
+ 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75,
+ 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79,
+ 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x75, 0x6e, 0x74,
+ 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e,
0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e,
- 0x74, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74,
- 0x65, 0x72, 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
- 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61,
- 0x72, 0x79, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x75,
- 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69,
- 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
- 0x65, 0x6e, 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, 0x07, 0x75, 0x6e, 0x74,
- 0x79, 0x70, 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61,
- 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f,
- 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x48,
- 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67,
- 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
- 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73,
- 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69,
- 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68,
- 0x65, 0x6c, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12,
- 0x34, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e,
- 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c,
- 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52,
- 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18,
- 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
- 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74,
- 0x72, 0x69, 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2a, 0x62, 0x0a, 0x0a, 0x4d,
- 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4f, 0x55,
- 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, 0x45, 0x10,
- 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x02, 0x12, 0x0b,
- 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x48,
- 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, 0x47, 0x41,
- 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x05, 0x42,
- 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73,
- 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
- 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f, 0x63,
- 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x67, 0x6f, 0x3b, 0x69,
- 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x63, 0x6c, 0x69,
- 0x65, 0x6e, 0x74,
+ 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70,
+ 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18,
+ 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
+ 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x69, 0x73,
+ 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61,
+ 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d,
+ 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+ 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xb6, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46,
+ 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x65, 0x6c,
+ 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12, 0x34, 0x0a,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x69, 0x6f,
+ 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65,
+ 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74,
+ 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x04, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
+ 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69,
+ 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69,
+ 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x2a, 0x62, 0x0a,
+ 0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43,
+ 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47,
+ 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x02,
+ 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a,
+ 0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f,
+ 0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10,
+ 0x05, 0x42, 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65,
+ 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75,
+ 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73,
+ 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x67, 0x6f,
+ 0x3b, 0x69, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x63,
+ 0x6c, 0x69, 0x65, 0x6e, 0x74,
}
var (
@@ -1185,22 +1207,23 @@ var file_io_prometheus_client_metrics_proto_depIdxs = []int32{
13, // 5: io.prometheus.client.Histogram.created_timestamp:type_name -> google.protobuf.Timestamp
9, // 6: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan
9, // 7: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan
- 10, // 8: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar
- 1, // 9: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair
- 13, // 10: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp
- 1, // 11: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair
- 2, // 12: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge
- 3, // 13: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter
- 5, // 14: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary
- 6, // 15: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped
- 7, // 16: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram
- 0, // 17: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType
- 11, // 18: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric
- 19, // [19:19] is the sub-list for method output_type
- 19, // [19:19] is the sub-list for method input_type
- 19, // [19:19] is the sub-list for extension type_name
- 19, // [19:19] is the sub-list for extension extendee
- 0, // [0:19] is the sub-list for field type_name
+ 10, // 8: io.prometheus.client.Histogram.exemplars:type_name -> io.prometheus.client.Exemplar
+ 10, // 9: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar
+ 1, // 10: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair
+ 13, // 11: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp
+ 1, // 12: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair
+ 2, // 13: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge
+ 3, // 14: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter
+ 5, // 15: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary
+ 6, // 16: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped
+ 7, // 17: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram
+ 0, // 18: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType
+ 11, // 19: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric
+ 20, // [20:20] is the sub-list for method output_type
+ 20, // [20:20] is the sub-list for method input_type
+ 20, // [20:20] is the sub-list for extension type_name
+ 20, // [20:20] is the sub-list for extension extendee
+ 0, // [0:20] is the sub-list for field type_name
}
func init() { file_io_prometheus_client_metrics_proto_init() }
diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore
index ae6a3bcf1..120b63a9c 100644
--- a/vendor/go.opentelemetry.io/otel/.codespellignore
+++ b/vendor/go.opentelemetry.io/otel/.codespellignore
@@ -3,3 +3,5 @@ fo
te
collison
consequentially
+ans
+nam
diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore
index 924805565..895c7664b 100644
--- a/vendor/go.opentelemetry.io/otel/.gitignore
+++ b/vendor/go.opentelemetry.io/otel/.gitignore
@@ -14,13 +14,9 @@ go.work.sum
gen/
/example/dice/dice
-/example/fib/fib
-/example/fib/traces.txt
-/example/jaeger/jaeger
/example/namedtracer/namedtracer
/example/otel-collector/otel-collector
/example/opencensus/opencensus
/example/passthrough/passthrough
/example/prometheus/prometheus
-/example/view/view
/example/zipkin/zipkin
diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
index c4e7ad475..98f2d2043 100644
--- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md
+++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
@@ -8,6 +8,134 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
## [Unreleased]
+## [1.24.0/0.46.0/0.0.1-alpha] 2024-02-23
+
+This release is the last to support [Go 1.20].
+The next release will require at least [Go 1.21].
+
+### Added
+
+- Support [Go 1.22]. (#4890)
+- Add exemplar support to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4900)
+- Add exemplar support to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4900)
+- The `go.opentelemetry.io/otel/log` module is added.
+ This module includes OpenTelemetry Go's implementation of the Logs Bridge API.
+ This module is in an alpha state, it is subject to breaking changes.
+ See our [versioning policy](./VERSIONING.md) for more info. (#4961)
+
+### Fixed
+
+- Fix registration of multiple callbacks when using the global meter provider from `go.opentelemetry.io/otel`. (#4945)
+- Fix negative buckets in output of exponential histograms. (#4956)
+
+## [1.23.1] 2024-02-07
+
+### Fixed
+
+- Register all callbacks passed during observable instrument creation instead of just the last one multiple times in `go.opentelemetry.io/otel/sdk/metric`. (#4888)
+
+## [1.23.0] 2024-02-06
+
+This release contains the first stable, `v1`, release of the following modules:
+
+- `go.opentelemetry.io/otel/bridge/opencensus`
+- `go.opentelemetry.io/otel/bridge/opencensus/test`
+- `go.opentelemetry.io/otel/example/opencensus`
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`
+- `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric`
+
+See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
+
+### Added
+
+- Add `WithEndpointURL` option to the `exporters/otlp/otlpmetric/otlpmetricgrpc`, `exporters/otlp/otlpmetric/otlpmetrichttp`, `exporters/otlp/otlptrace/otlptracegrpc` and `exporters/otlp/otlptrace/otlptracehttp` packages. (#4808)
+- Experimental exemplar exporting is added to the metric SDK.
+ See [metric documentation](./sdk/metric/internal/x/README.md#exemplars) for more information about this feature and how to enable it. (#4871)
+- `ErrSchemaURLConflict` is added to `go.opentelemetry.io/otel/sdk/resource`.
+ This error is returned when a merge of two `Resource`s with different (non-empty) schema URL is attempted. (#4876)
+
+### Changed
+
+- The `Merge` and `New` functions in `go.opentelemetry.io/otel/sdk/resource` now returns a partial result if there is a schema URL merge conflict.
+ Instead of returning `nil` when two `Resource`s with different (non-empty) schema URLs are merged the merged `Resource`, along with the new `ErrSchemaURLConflict` error, is returned.
+ It is up to the user to decide if they want to use the returned `Resource` or not.
+ It may have desired attributes overwritten or include stale semantic conventions. (#4876)
+
+### Fixed
+
+- Fix `ContainerID` resource detection on systemd when cgroup path has a colon. (#4449)
+- Fix `go.opentelemetry.io/otel/sdk/metric` to cache instruments to avoid leaking memory when the same instrument is created multiple times. (#4820)
+- Fix missing `Mix` and `Max` values for `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` by introducing `MarshalText` and `MarshalJSON` for the `Extrema` type in `go.opentelemetry.io/sdk/metric/metricdata`. (#4827)
+
+## [1.23.0-rc.1] 2024-01-18
+
+This is a release candidate for the v1.23.0 release.
+That release is expected to include the `v1` release of the following modules:
+
+- `go.opentelemetry.io/otel/bridge/opencensus`
+- `go.opentelemetry.io/otel/bridge/opencensus/test`
+- `go.opentelemetry.io/otel/example/opencensus`
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`
+- `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric`
+
+See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
+
+## [1.22.0/0.45.0] 2024-01-17
+
+### Added
+
+- The `go.opentelemetry.io/otel/semconv/v1.22.0` package.
+ The package contains semantic conventions from the `v1.22.0` version of the OpenTelemetry Semantic Conventions. (#4735)
+- The `go.opentelemetry.io/otel/semconv/v1.23.0` package.
+ The package contains semantic conventions from the `v1.23.0` version of the OpenTelemetry Semantic Conventions. (#4746)
+- The `go.opentelemetry.io/otel/semconv/v1.23.1` package.
+ The package contains semantic conventions from the `v1.23.1` version of the OpenTelemetry Semantic Conventions. (#4749)
+- The `go.opentelemetry.io/otel/semconv/v1.24.0` package.
+ The package contains semantic conventions from the `v1.24.0` version of the OpenTelemetry Semantic Conventions. (#4770)
+- Add `WithResourceAsConstantLabels` option to apply resource attributes for every metric emitted by the Prometheus exporter. (#4733)
+- Experimental cardinality limiting is added to the metric SDK.
+ See [metric documentation](./sdk/metric/internal/x/README.md#cardinality-limit) for more information about this feature and how to enable it. (#4457)
+- Add `NewMemberRaw` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage`. (#4804)
+
+### Changed
+
+- Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.24.0`. (#4754)
+- Update transformations in `go.opentelemetry.io/otel/exporters/zipkin` to follow `v1.24.0` version of the OpenTelemetry specification. (#4754)
+- Record synchronous measurements when the passed context is canceled instead of dropping in `go.opentelemetry.io/otel/sdk/metric`.
+ If you do not want to make a measurement when the context is cancelled, you need to handle it yourself (e.g `if ctx.Err() != nil`). (#4671)
+- Improve `go.opentelemetry.io/otel/trace.TraceState`'s performance. (#4722)
+- Improve `go.opentelemetry.io/otel/propagation.TraceContext`'s performance. (#4721)
+- Improve `go.opentelemetry.io/otel/baggage` performance. (#4743)
+- Improve performance of the `(*Set).Filter` method in `go.opentelemetry.io/otel/attribute` when the passed filter does not filter out any attributes from the set. (#4774)
+- `Member.String` in `go.opentelemetry.io/otel/baggage` percent-encodes only when necessary. (#4775)
+- Improve `go.opentelemetry.io/otel/trace.Span`'s performance when adding multiple attributes. (#4818)
+- `Property.Value` in `go.opentelemetry.io/otel/baggage` now returns a raw string instead of a percent-encoded value. (#4804)
+
+### Fixed
+
+- Fix `Parse` in `go.opentelemetry.io/otel/baggage` to validate member value before percent-decoding. (#4755)
+- Fix whitespace encoding of `Member.String` in `go.opentelemetry.io/otel/baggage`. (#4756)
+- Fix observable not registered error when the asynchronous instrument has a drop aggregation in `go.opentelemetry.io/otel/sdk/metric`. (#4772)
+- Fix baggage item key so that it is not canonicalized in `go.opentelemetry.io/otel/bridge/opentracing`. (#4776)
+- Fix `go.opentelemetry.io/otel/bridge/opentracing` to properly handle baggage values that requires escaping during propagation. (#4804)
+- Fix a bug where using multiple readers resulted in incorrect asynchronous counter values in `go.opentelemetry.io/otel/sdk/metric`. (#4742)
+
+## [1.21.0/0.44.0] 2023-11-16
+
+### Removed
+
+- Remove the deprecated `go.opentelemetry.io/otel/bridge/opencensus.NewTracer`. (#4706)
+- Remove the deprecated `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` module. (#4707)
+- Remove the deprecated `go.opentelemetry.io/otel/example/view` module. (#4708)
+- Remove the deprecated `go.opentelemetry.io/otel/example/fib` module. (#4723)
+
+### Fixed
+
+- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4719)
+- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4719)
+
## [1.20.0/0.43.0] 2023-11-10
This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementors need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this.
@@ -2721,7 +2849,13 @@ It contains api and sdk for trace and meter.
- CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project.
-[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.20.0...HEAD
+[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.24.0...HEAD
+[1.24.0/0.46.0/0.0.1-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.24.0
+[1.23.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.1
+[1.23.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0
+[1.23.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0-rc.1
+[1.22.0/0.45.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.22.0
+[1.21.0/0.44.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.21.0
[1.20.0/0.43.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.20.0
[1.19.0/0.42.0/0.0.7]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0
[1.19.0-rc.1/0.42.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0-rc.1
@@ -2794,6 +2928,8 @@ It contains api and sdk for trace and meter.
[0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1
[0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0
+[Go 1.22]: https://go.dev/doc/go1.22
+[Go 1.21]: https://go.dev/doc/go1.21
[Go 1.20]: https://go.dev/doc/go1.20
[Go 1.19]: https://go.dev/doc/go1.19
[Go 1.18]: https://go.dev/doc/go1.18
diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS
index 623740007..31d336d92 100644
--- a/vendor/go.opentelemetry.io/otel/CODEOWNERS
+++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS
@@ -14,4 +14,4 @@
* @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu
-CODEOWNERS @MrAlias @MadVikingGod @pellared \ No newline at end of file
+CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole \ No newline at end of file
diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
index a00dbca7b..c9f2bac55 100644
--- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
+++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
@@ -90,6 +90,10 @@ git push <YOUR_FORK> <YOUR_BRANCH_NAME>
Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull
request ID to the entry you added to `CHANGELOG.md`.
+Avoid rebasing and force-pushing to your branch to facilitate reviewing the pull request.
+Rewriting Git history makes it difficult to keep track of iterations during code review.
+All pull requests are squashed to a single commit upon merge to `main`.
+
### How to Receive Comments
* If the PR is not ready for review, please put `[WIP]` in the title,
@@ -587,25 +591,46 @@ this.
[^3]: https://github.com/open-telemetry/opentelemetry-go/issues/3548
+### Ignoring context cancellation
+
+OpenTelemetry API implementations need to ignore the cancellation of the context that are
+passed when recording a value (e.g. starting a span, recording a measurement, emitting a log).
+Recording methods should not return an error describing the cancellation state of the context
+when they complete, nor should they abort any work.
+
+This rule may not apply if the OpenTelemetry specification defines a timeout mechanism for
+the method. In that case the context cancellation can be used for the timeout with the
+restriction that this behavior is documented for the method. Otherwise, timeouts
+are expected to be handled by the user calling the API, not the implementation.
+
+Stoppage of the telemetry pipeline is handled by calling the appropriate `Shutdown` method
+of a provider. It is assumed the context passed from a user is not used for this purpose.
+
+Outside of the direct recording of telemetry from the API (e.g. exporting telemetry,
+force flushing telemetry, shutting down a signal provider) the context cancellation
+should be honored. This means all work done on behalf of the user provided context
+should be canceled.
+
## Approvers and Maintainers
### Approvers
- [Evan Torrie](https://github.com/evantorrie), Verizon Media
- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics
-- [David Ashpole](https://github.com/dashpole), Google
- [Chester Cheung](https://github.com/hanyuancheung), Tencent
- [Damien Mathieu](https://github.com/dmathieu), Elastic
- [Anthony Mirabella](https://github.com/Aneurysm9), AWS
### Maintainers
+- [David Ashpole](https://github.com/dashpole), Google
- [Aaron Clawson](https://github.com/MadVikingGod), LightStep
- [Robert PajÄ…k](https://github.com/pellared), Splunk
- [Tyler Yahn](https://github.com/MrAlias), Splunk
### Emeritus
+- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb
- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep
- [Josh MacDonald](https://github.com/jmacd), LightStep
diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile
index 35fc18996..6de95219b 100644
--- a/vendor/go.opentelemetry.io/otel/Makefile
+++ b/vendor/go.opentelemetry.io/otel/Makefile
@@ -192,7 +192,7 @@ test-coverage: | $(GOCOVMERGE)
done; \
$(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt
-# Adding a directory will include all benchmarks in that direcotry if a filter is not specified.
+# Adding a directory will include all benchmarks in that directory if a filter is not specified.
BENCHMARK_TARGETS := sdk/trace
.PHONY: benchmark
benchmark: $(BENCHMARK_TARGETS:%=benchmark/%)
@@ -315,4 +315,4 @@ add-tags: | $(MULTIMOD)
.PHONY: lint-markdown
lint-markdown:
- docker run -v "$(CURDIR):$(WORKDIR)" docker://avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md
+ docker run -v "$(CURDIR):$(WORKDIR)" avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md
diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md
index 2c5b0cc28..7766259a5 100644
--- a/vendor/go.opentelemetry.io/otel/README.md
+++ b/vendor/go.opentelemetry.io/otel/README.md
@@ -11,14 +11,11 @@ It provides a set of APIs to directly measure performance and behavior of your s
## Project Status
-| Signal | Status |
-|---------|------------|
-| Traces | Stable |
-| Metrics | Stable |
-| Logs | Design [1] |
-
-- [1]: Currently the logs signal development is in a design phase ([#4696](https://github.com/open-telemetry/opentelemetry-go/issues/4696)).
- No Logs Pull Requests are currently being accepted.
+| Signal | Status |
+|---------|--------------------|
+| Traces | Stable |
+| Metrics | Stable |
+| Logs | In development[^1] |
Progress and status specific to this repository is tracked in our
[project boards](https://github.com/open-telemetry/opentelemetry-go/projects)
@@ -28,6 +25,8 @@ and
Project versioning information and stability guarantees can be found in the
[versioning documentation](VERSIONING.md).
+[^1]: https://github.com/orgs/open-telemetry/projects/43
+
### Compatibility
OpenTelemetry-Go ensures compatibility with the current supported versions of
@@ -50,14 +49,19 @@ Currently, this project supports the following environments.
| OS | Go Version | Architecture |
|---------|------------|--------------|
+| Ubuntu | 1.22 | amd64 |
| Ubuntu | 1.21 | amd64 |
| Ubuntu | 1.20 | amd64 |
+| Ubuntu | 1.22 | 386 |
| Ubuntu | 1.21 | 386 |
| Ubuntu | 1.20 | 386 |
+| MacOS | 1.22 | amd64 |
| MacOS | 1.21 | amd64 |
| MacOS | 1.20 | amd64 |
+| Windows | 1.22 | amd64 |
| Windows | 1.21 | amd64 |
| Windows | 1.20 | amd64 |
+| Windows | 1.22 | 386 |
| Windows | 1.21 | 386 |
| Windows | 1.20 | 386 |
@@ -66,7 +70,7 @@ are made for those systems currently.
## Getting Started
-You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/go/getting-started/).
+You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/languages/go/getting-started/).
OpenTelemetry's goal is to provide a single set of APIs to capture distributed
traces and metrics from your application and send them to an observability
diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md
index 82ce3ee46..d2691d0bd 100644
--- a/vendor/go.opentelemetry.io/otel/RELEASING.md
+++ b/vendor/go.opentelemetry.io/otel/RELEASING.md
@@ -123,12 +123,12 @@ Once verified be sure to [make a release for the `contrib` repository](https://g
### Website Documentation
-Update the [Go instrumentation documentation] in the OpenTelemetry website under [content/en/docs/instrumentation/go].
+Update the [Go instrumentation documentation] in the OpenTelemetry website under [content/en/docs/languages/go].
Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate.
[OpenTelemetry Semantic Conventions]: https://github.com/open-telemetry/semantic-conventions
-[Go instrumentation documentation]: https://opentelemetry.io/docs/instrumentation/go/
-[content/en/docs/instrumentation/go]: https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/instrumentation/go
+[Go instrumentation documentation]: https://opentelemetry.io/docs/languages/go/
+[content/en/docs/languages/go]: https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/languages/go
### Demo Repository
diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go
index 9f9303d4f..fb6da5145 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/set.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/set.go
@@ -279,52 +279,75 @@ func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (S
position--
kvs[offset], kvs[position] = kvs[position], kvs[offset]
}
+ kvs = kvs[position:]
+
if filter != nil {
- return filterSet(kvs[position:], filter)
+ if div := filteredToFront(kvs, filter); div != 0 {
+ return Set{equivalent: computeDistinct(kvs[div:])}, kvs[:div]
+ }
}
- return Set{
- equivalent: computeDistinct(kvs[position:]),
- }, nil
+ return Set{equivalent: computeDistinct(kvs)}, nil
}
-// filterSet reorders kvs so that included keys are contiguous at the end of
-// the slice, while excluded keys precede the included keys.
-func filterSet(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
- var excluded []KeyValue
-
- // Move attributes that do not match the filter so they're adjacent before
- // calling computeDistinct().
- distinctPosition := len(kvs)
-
- // Swap indistinct keys forward and distinct keys toward the
- // end of the slice.
- offset := len(kvs) - 1
- for ; offset >= 0; offset-- {
- if filter(kvs[offset]) {
- distinctPosition--
- kvs[offset], kvs[distinctPosition] = kvs[distinctPosition], kvs[offset]
- continue
+// filteredToFront filters slice in-place using keep function. All KeyValues that need to
+// be removed are moved to the front. All KeyValues that need to be kept are
+// moved (in-order) to the back. The index for the first KeyValue to be kept is
+// returned.
+func filteredToFront(slice []KeyValue, keep Filter) int {
+ n := len(slice)
+ j := n
+ for i := n - 1; i >= 0; i-- {
+ if keep(slice[i]) {
+ j--
+ slice[i], slice[j] = slice[j], slice[i]
}
}
- excluded = kvs[:distinctPosition]
-
- return Set{
- equivalent: computeDistinct(kvs[distinctPosition:]),
- }, excluded
+ return j
}
// Filter returns a filtered copy of this Set. See the documentation for
// NewSetWithSortableFiltered for more details.
func (l *Set) Filter(re Filter) (Set, []KeyValue) {
if re == nil {
- return Set{
- equivalent: l.equivalent,
- }, nil
+ return *l, nil
}
- // Note: This could be refactored to avoid the temporary slice
- // allocation, if it proves to be expensive.
- return filterSet(l.ToSlice(), re)
+ // Iterate in reverse to the first attribute that will be filtered out.
+ n := l.Len()
+ first := n - 1
+ for ; first >= 0; first-- {
+ kv, _ := l.Get(first)
+ if !re(kv) {
+ break
+ }
+ }
+
+ // No attributes will be dropped, return the immutable Set l and nil.
+ if first < 0 {
+ return *l, nil
+ }
+
+ // Copy now that we know we need to return a modified set.
+ //
+ // Do not do this in-place on the underlying storage of *Set l. Sets are
+ // immutable and filtering should not change this.
+ slice := l.ToSlice()
+
+ // Don't re-iterate the slice if only slice[0] is filtered.
+ if first == 0 {
+ // It is safe to assume len(slice) >= 1 given we found at least one
+ // attribute above that needs to be filtered out.
+ return Set{equivalent: computeDistinct(slice[1:])}, slice[:1]
+ }
+
+ // Move the filtered slice[first] to the front (preserving order).
+ kv := slice[first]
+ copy(slice[1:first+1], slice[:first])
+ slice[0] = kv
+
+ // Do not re-evaluate re(slice[first+1:]).
+ div := filteredToFront(slice[1:first+1], re) + 1
+ return Set{equivalent: computeDistinct(slice[div:])}, slice[:div]
}
// computeDistinct returns a Distinct using either the fixed- or
@@ -404,7 +427,7 @@ func (l *Set) MarshalJSON() ([]byte, error) {
return json.Marshal(l.equivalent.iface)
}
-// MarshalLog is the marshaling function used by the logging system to represent this exporter.
+// MarshalLog is the marshaling function used by the logging system to represent this Set.
func (l Set) MarshalLog() interface{} {
kvs := make(map[string]string)
for _, kv := range l.ToSlice() {
diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go
index 84532cb1d..7d27cf77d 100644
--- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go
+++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go
@@ -18,7 +18,6 @@ import (
"errors"
"fmt"
"net/url"
- "regexp"
"strings"
"go.opentelemetry.io/otel/internal/baggage"
@@ -32,16 +31,6 @@ const (
listDelimiter = ","
keyValueDelimiter = "="
propertyDelimiter = ";"
-
- keyDef = `([\x21\x23-\x27\x2A\x2B\x2D\x2E\x30-\x39\x41-\x5a\x5e-\x7a\x7c\x7e]+)`
- valueDef = `([\x21\x23-\x2b\x2d-\x3a\x3c-\x5B\x5D-\x7e]*)`
- keyValueDef = `\s*` + keyDef + `\s*` + keyValueDelimiter + `\s*` + valueDef + `\s*`
-)
-
-var (
- keyRe = regexp.MustCompile(`^` + keyDef + `$`)
- valueRe = regexp.MustCompile(`^` + valueDef + `$`)
- propertyRe = regexp.MustCompile(`^(?:\s*` + keyDef + `\s*|` + keyValueDef + `)$`)
)
var (
@@ -67,7 +56,7 @@ type Property struct {
//
// If key is invalid, an error will be returned.
func NewKeyProperty(key string) (Property, error) {
- if !keyRe.MatchString(key) {
+ if !validateKey(key) {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
}
@@ -77,14 +66,29 @@ func NewKeyProperty(key string) (Property, error) {
// NewKeyValueProperty returns a new Property for key with value.
//
-// If key or value are invalid, an error will be returned.
+// The passed key must be compliant with W3C Baggage specification.
+// The passed value must be precent-encoded as defined in W3C Baggage specification.
+//
+// Notice: Consider using [NewKeyValuePropertyRaw] instead
+// that does not require precent-encoding of the value.
func NewKeyValueProperty(key, value string) (Property, error) {
- if !keyRe.MatchString(key) {
- return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
+ if !validateValue(value) {
+ return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
}
- if !valueRe.MatchString(value) {
+ decodedValue, err := url.PathUnescape(value)
+ if err != nil {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
}
+ return NewKeyValuePropertyRaw(key, decodedValue)
+}
+
+// NewKeyValuePropertyRaw returns a new Property for key with value.
+//
+// The passed key must be compliant with W3C Baggage specification.
+func NewKeyValuePropertyRaw(key, value string) (Property, error) {
+ if !validateKey(key) {
+ return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
+ }
p := Property{
key: key,
@@ -106,20 +110,11 @@ func parseProperty(property string) (Property, error) {
return newInvalidProperty(), nil
}
- match := propertyRe.FindStringSubmatch(property)
- if len(match) != 4 {
+ p, ok := parsePropertyInternal(property)
+ if !ok {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidProperty, property)
}
- var p Property
- if match[1] != "" {
- p.key = match[1]
- } else {
- p.key = match[2]
- p.value = match[3]
- p.hasValue = true
- }
-
return p, nil
}
@@ -130,12 +125,9 @@ func (p Property) validate() error {
return fmt.Errorf("invalid property: %w", err)
}
- if !keyRe.MatchString(p.key) {
+ if !validateKey(p.key) {
return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key))
}
- if p.hasValue && !valueRe.MatchString(p.value) {
- return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value))
- }
if !p.hasValue && p.value != "" {
return errFunc(errors.New("inconsistent value"))
}
@@ -154,11 +146,11 @@ func (p Property) Value() (string, bool) {
return p.value, p.hasValue
}
-// String encodes Property into a string compliant with the W3C Baggage
+// String encodes Property into a header string compliant with the W3C Baggage
// specification.
func (p Property) String() string {
if p.hasValue {
- return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, p.value)
+ return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value))
}
return p.key
}
@@ -218,7 +210,7 @@ func (p properties) validate() error {
return nil
}
-// String encodes properties into a string compliant with the W3C Baggage
+// String encodes properties into a header string compliant with the W3C Baggage
// specification.
func (p properties) String() string {
props := make([]string, len(p))
@@ -240,11 +232,28 @@ type Member struct {
hasData bool
}
-// NewMember returns a new Member from the passed arguments. The key will be
-// used directly while the value will be url decoded after validation. An error
-// is returned if the created Member would be invalid according to the W3C
-// Baggage specification.
+// NewMemberRaw returns a new Member from the passed arguments.
+//
+// The passed key must be compliant with W3C Baggage specification.
+// The passed value must be precent-encoded as defined in W3C Baggage specification.
+//
+// Notice: Consider using [NewMemberRaw] instead
+// that does not require precent-encoding of the value.
func NewMember(key, value string, props ...Property) (Member, error) {
+ if !validateValue(value) {
+ return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
+ }
+ decodedValue, err := url.PathUnescape(value)
+ if err != nil {
+ return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
+ }
+ return NewMemberRaw(key, decodedValue, props...)
+}
+
+// NewMemberRaw returns a new Member from the passed arguments.
+//
+// The passed key must be compliant with W3C Baggage specification.
+func NewMemberRaw(key, value string, props ...Property) (Member, error) {
m := Member{
key: key,
value: value,
@@ -254,11 +263,6 @@ func NewMember(key, value string, props ...Property) (Member, error) {
if err := m.validate(); err != nil {
return newInvalidMember(), err
}
- decodedValue, err := url.PathUnescape(value)
- if err != nil {
- return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
- }
- m.value = decodedValue
return m, nil
}
@@ -274,11 +278,7 @@ func parseMember(member string) (Member, error) {
return newInvalidMember(), fmt.Errorf("%w: %d", errMemberBytes, n)
}
- var (
- key, value string
- props properties
- )
-
+ var props properties
keyValue, properties, found := strings.Cut(member, propertyDelimiter)
if found {
// Parse the member properties.
@@ -299,36 +299,34 @@ func parseMember(member string) (Member, error) {
}
// "Leading and trailing whitespaces are allowed but MUST be trimmed
// when converting the header into a data structure."
- key = strings.TrimSpace(k)
- var err error
- value, err = url.PathUnescape(strings.TrimSpace(v))
- if err != nil {
- return newInvalidMember(), fmt.Errorf("%w: %q", err, value)
- }
- if !keyRe.MatchString(key) {
+ key := strings.TrimSpace(k)
+ if !validateKey(key) {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key)
}
- if !valueRe.MatchString(value) {
- return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
+
+ val := strings.TrimSpace(v)
+ if !validateValue(val) {
+ return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v)
}
+ // Decode a precent-encoded value.
+ value, err := url.PathUnescape(val)
+ if err != nil {
+ return newInvalidMember(), fmt.Errorf("%w: %v", errInvalidValue, err)
+ }
return Member{key: key, value: value, properties: props, hasData: true}, nil
}
// validate ensures m conforms to the W3C Baggage specification.
-// A key is just an ASCII string, but a value must be URL encoded UTF-8,
-// returning an error otherwise.
+// A key must be an ASCII string, returning an error otherwise.
func (m Member) validate() error {
if !m.hasData {
return fmt.Errorf("%w: %q", errInvalidMember, m)
}
- if !keyRe.MatchString(m.key) {
+ if !validateKey(m.key) {
return fmt.Errorf("%w: %q", errInvalidKey, m.key)
}
- if !valueRe.MatchString(m.value) {
- return fmt.Errorf("%w: %q", errInvalidValue, m.value)
- }
return m.properties.validate()
}
@@ -341,11 +339,13 @@ func (m Member) Value() string { return m.value }
// Properties returns a copy of the Member properties.
func (m Member) Properties() []Property { return m.properties.Copy() }
-// String encodes Member into a string compliant with the W3C Baggage
+// String encodes Member into a header string compliant with the W3C Baggage
// specification.
func (m Member) String() string {
- // A key is just an ASCII string, but a value is URL encoded UTF-8.
- s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, url.QueryEscape(m.value))
+ // A key is just an ASCII string. A value is restricted to be
+ // US-ASCII characters excluding CTLs, whitespace,
+ // DQUOTE, comma, semicolon, and backslash.
+ s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, valueEscape(m.value))
if len(m.properties) > 0 {
s = fmt.Sprintf("%s%s%s", s, propertyDelimiter, m.properties.String())
}
@@ -536,9 +536,8 @@ func (b Baggage) Len() int {
return len(b.list)
}
-// String encodes Baggage into a string compliant with the W3C Baggage
-// specification. The returned string will be invalid if the Baggage contains
-// any invalid list-members.
+// String encodes Baggage into a header string compliant with the W3C Baggage
+// specification.
func (b Baggage) String() string {
members := make([]string, 0, len(b.list))
for k, v := range b.list {
@@ -550,3 +549,196 @@ func (b Baggage) String() string {
}
return strings.Join(members, listDelimiter)
}
+
+// parsePropertyInternal attempts to decode a Property from the passed string.
+// It follows the spec at https://www.w3.org/TR/baggage/#definition.
+func parsePropertyInternal(s string) (p Property, ok bool) {
+ // For the entire function we will use " key = value " as an example.
+ // Attempting to parse the key.
+ // First skip spaces at the beginning "< >key = value " (they could be empty).
+ index := skipSpace(s, 0)
+
+ // Parse the key: " <key> = value ".
+ keyStart := index
+ keyEnd := index
+ for _, c := range s[keyStart:] {
+ if !validateKeyChar(c) {
+ break
+ }
+ keyEnd++
+ }
+
+ // If we couldn't find any valid key character,
+ // it means the key is either empty or invalid.
+ if keyStart == keyEnd {
+ return
+ }
+
+ // Skip spaces after the key: " key< >= value ".
+ index = skipSpace(s, keyEnd)
+
+ if index == len(s) {
+ // A key can have no value, like: " key ".
+ ok = true
+ p.key = s[keyStart:keyEnd]
+ return
+ }
+
+ // If we have not reached the end and we can't find the '=' delimiter,
+ // it means the property is invalid.
+ if s[index] != keyValueDelimiter[0] {
+ return
+ }
+
+ // Attempting to parse the value.
+ // Match: " key =< >value ".
+ index = skipSpace(s, index+1)
+
+ // Match the value string: " key = <value> ".
+ // A valid property can be: " key =".
+ // Therefore, we don't have to check if the value is empty.
+ valueStart := index
+ valueEnd := index
+ for _, c := range s[valueStart:] {
+ if !validateValueChar(c) {
+ break
+ }
+ valueEnd++
+ }
+
+ // Skip all trailing whitespaces: " key = value< >".
+ index = skipSpace(s, valueEnd)
+
+ // If after looking for the value and skipping whitespaces
+ // we have not reached the end, it means the property is
+ // invalid, something like: " key = value value1".
+ if index != len(s) {
+ return
+ }
+
+ // Decode a precent-encoded value.
+ value, err := url.PathUnescape(s[valueStart:valueEnd])
+ if err != nil {
+ return
+ }
+
+ ok = true
+ p.key = s[keyStart:keyEnd]
+ p.hasValue = true
+
+ p.value = value
+ return
+}
+
+func skipSpace(s string, offset int) int {
+ i := offset
+ for ; i < len(s); i++ {
+ c := s[i]
+ if c != ' ' && c != '\t' {
+ break
+ }
+ }
+ return i
+}
+
+func validateKey(s string) bool {
+ if len(s) == 0 {
+ return false
+ }
+
+ for _, c := range s {
+ if !validateKeyChar(c) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func validateKeyChar(c int32) bool {
+ return (c >= 0x23 && c <= 0x27) ||
+ (c >= 0x30 && c <= 0x39) ||
+ (c >= 0x41 && c <= 0x5a) ||
+ (c >= 0x5e && c <= 0x7a) ||
+ c == 0x21 ||
+ c == 0x2a ||
+ c == 0x2b ||
+ c == 0x2d ||
+ c == 0x2e ||
+ c == 0x7c ||
+ c == 0x7e
+}
+
+func validateValue(s string) bool {
+ for _, c := range s {
+ if !validateValueChar(c) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func validateValueChar(c int32) bool {
+ return c == 0x21 ||
+ (c >= 0x23 && c <= 0x2b) ||
+ (c >= 0x2d && c <= 0x3a) ||
+ (c >= 0x3c && c <= 0x5b) ||
+ (c >= 0x5d && c <= 0x7e)
+}
+
+// valueEscape escapes the string so it can be safely placed inside a baggage value,
+// replacing special characters with %XX sequences as needed.
+//
+// The implementation is based on:
+// https://github.com/golang/go/blob/f6509cf5cdbb5787061b784973782933c47f1782/src/net/url/url.go#L285.
+func valueEscape(s string) string {
+ hexCount := 0
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if shouldEscape(c) {
+ hexCount++
+ }
+ }
+
+ if hexCount == 0 {
+ return s
+ }
+
+ var buf [64]byte
+ var t []byte
+
+ required := len(s) + 2*hexCount
+ if required <= len(buf) {
+ t = buf[:required]
+ } else {
+ t = make([]byte, required)
+ }
+
+ j := 0
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if shouldEscape(s[i]) {
+ const upperhex = "0123456789ABCDEF"
+ t[j] = '%'
+ t[j+1] = upperhex[c>>4]
+ t[j+2] = upperhex[c&15]
+ j += 3
+ } else {
+ t[j] = c
+ j++
+ }
+ }
+
+ return string(t)
+}
+
+// shouldEscape returns true if the specified byte should be escaped when
+// appearing in a baggage value string.
+func shouldEscape(c byte) bool {
+ if c == '%' {
+ // The percent character must be encoded so that percent-encoding can work.
+ return true
+ }
+ return !validateValueChar(int32(c))
+}
diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go
index daa36c89d..36d7c24e8 100644
--- a/vendor/go.opentelemetry.io/otel/doc.go
+++ b/vendor/go.opentelemetry.io/otel/doc.go
@@ -22,7 +22,7 @@ transmitted anywhere. An implementation of the OpenTelemetry SDK, like the
default SDK implementation (go.opentelemetry.io/otel/sdk), and associated
exporters are used to process and transport this data.
-To read the getting started guide, see https://opentelemetry.io/docs/go/getting-started/.
+To read the getting started guide, see https://opentelemetry.io/docs/languages/go/getting-started/.
To read more about tracing, see go.opentelemetry.io/otel/trace.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go
index b46a38d60..cb41c7d58 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go
@@ -104,7 +104,7 @@ func NewUnstarted(client Client) *Exporter {
}
}
-// MarshalLog is the marshaling function used by the logging system to represent this exporter.
+// MarshalLog is the marshaling function used by the logging system to represent this Exporter.
func (e *Exporter) MarshalLog() interface{} {
return struct {
Type string
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go
index 1f514ef9e..a3c2690c5 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go
@@ -28,7 +28,7 @@ The value may additionally a port, a scheme, and a path.
The value accepts "http" and "https" scheme.
The value should not contain a query string or fragment.
OTEL_EXPORTER_OTLP_TRACES_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT.
-The configuration can be overridden by [WithEndpoint], [WithInsecure], [WithGRPCConn] options.
+The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithGRPCConn] options.
OTEL_EXPORTER_OTLP_INSECURE, OTEL_EXPORTER_OTLP_TRACES_INSECURE (default: "false") -
setting "true" disables client transport security for the exporter's gRPC connection.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go
index dddb1f334..f0203cbe7 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go
@@ -20,6 +20,7 @@ package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/
import (
"crypto/tls"
"fmt"
+ "net/url"
"path"
"strings"
"time"
@@ -32,6 +33,7 @@ import (
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry"
+ "go.opentelemetry.io/otel/internal/global"
)
const (
@@ -265,6 +267,24 @@ func WithEndpoint(endpoint string) GenericOption {
})
}
+func WithEndpointURL(v string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ u, err := url.Parse(v)
+ if err != nil {
+ global.Error(err, "otlptrace: parse endpoint url", "url", v)
+ return cfg
+ }
+
+ cfg.Traces.Endpoint = u.Host
+ cfg.Traces.URLPath = u.Path
+ if u.Scheme != "https" {
+ cfg.Traces.Insecure = true
+ }
+
+ return cfg
+ })
+}
+
func WithCompression(compression Compression) GenericOption {
return newGenericOption(func(cfg Config) Config {
cfg.Traces.Compression = compression
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go
index 17ffeaf6e..461610c6b 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go
@@ -64,14 +64,44 @@ func WithInsecure() Option {
return wrappedOption{otlpconfig.WithInsecure()}
}
-// WithEndpoint sets the target endpoint the exporter will connect to. If
-// unset, localhost:4317 will be used as a default.
+// WithEndpoint sets the target endpoint the Exporter will connect to.
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. If both are set, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
+// will take precedence.
+//
+// If both this option and WithEndpointURL are used, the last used option will
+// take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "localhost:4317" will be used.
//
// This option has no effect if WithGRPCConn is used.
func WithEndpoint(endpoint string) Option {
return wrappedOption{otlpconfig.WithEndpoint(endpoint)}
}
+// WithEndpointURL sets the target endpoint URL the Exporter will connect to.
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. If both are set, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
+// will take precedence.
+//
+// If both this option and WithEndpoint are used, the last used option will
+// take precedence.
+//
+// If an invalid URL is provided, the default value will be kept.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "localhost:4317" will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithEndpointURL(u string) Option {
+ return wrappedOption{otlpconfig.WithEndpointURL(u)}
+}
+
// WithReconnectionPeriod set the minimum amount of time between connection
// attempts to the target endpoint.
//
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go
index 068aef300..3b5f3839f 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go
@@ -177,8 +177,11 @@ func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc
if _, err := io.Copy(&respData, resp.Body); err != nil {
return err
}
+ if respData.Len() == 0 {
+ return nil
+ }
- if respData.Len() != 0 {
+ if resp.Header.Get("Content-Type") == "application/x-protobuf" {
var respProto coltracepb.ExportTraceServiceResponse
if err := proto.Unmarshal(respData.Bytes(), &respProto); err != nil {
return err
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go
index 854cc38c8..cb4f19ad1 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go
@@ -26,14 +26,14 @@ The value must contain a scheme ("http" or "https") and host.
The value may additionally contain a port and a path.
The value should not contain a query string or fragment.
The configuration can be overridden by OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
-environment variable and by [WithEndpoint], [WithInsecure] options.
+environment variable and by [WithEndpoint], [WithEndpointURL], [WithInsecure] options.
OTEL_EXPORTER_OTLP_TRACES_ENDPOINT (default: "https://localhost:4318/v1/traces") -
target URL to which the exporter sends telemetry.
The value must contain a scheme ("http" or "https") and host.
The value may additionally contain a port and a path.
The value should not contain a query string or fragment.
-The configuration can be overridden by [WithEndpoint], [WitnInsecure], [WithURLPath] options.
+The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WitnInsecure], and [WithURLPath] options.
OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_TRACES_HEADERS (default: none) -
key-value pairs used as headers associated with HTTP requests.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go
index 8401bd7f1..3b81641a7 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go
@@ -20,6 +20,7 @@ package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/
import (
"crypto/tls"
"fmt"
+ "net/url"
"path"
"strings"
"time"
@@ -32,6 +33,7 @@ import (
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry"
+ "go.opentelemetry.io/otel/internal/global"
)
const (
@@ -265,6 +267,24 @@ func WithEndpoint(endpoint string) GenericOption {
})
}
+func WithEndpointURL(v string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ u, err := url.Parse(v)
+ if err != nil {
+ global.Error(err, "otlptrace: parse endpoint url", "url", v)
+ return cfg
+ }
+
+ cfg.Traces.Endpoint = u.Host
+ cfg.Traces.URLPath = u.Path
+ if u.Scheme != "https" {
+ cfg.Traces.Insecure = true
+ }
+
+ return cfg
+ })
+}
+
func WithCompression(compression Compression) GenericOption {
return newGenericOption(func(cfg Config) Config {
cfg.Traces.Compression = compression
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go
index e3ed6494c..7b4465c4a 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go
@@ -60,15 +60,44 @@ func (w wrappedOption) applyHTTPOption(cfg otlpconfig.Config) otlpconfig.Config
return w.ApplyHTTPOption(cfg)
}
-// WithEndpoint allows one to set the address of the collector
-// endpoint that the driver will use to send spans. If
-// unset, it will instead try to use
-// the default endpoint (localhost:4318). Note that the endpoint
-// must not contain any URL path.
+// WithEndpoint sets the target endpoint the Exporter will connect to.
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. If both are set, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
+// will take precedence.
+//
+// If both this option and WithEndpointURL are used, the last used option will
+// take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "localhost:4317" will be used.
+//
+// This option has no effect if WithGRPCConn is used.
func WithEndpoint(endpoint string) Option {
return wrappedOption{otlpconfig.WithEndpoint(endpoint)}
}
+// WithEndpointURL sets the target endpoint URL the Exporter will connect to.
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. If both are set, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
+// will take precedence.
+//
+// If both this option and WithEndpoint are used, the last used option will
+// take precedence.
+//
+// If an invalid URL is provided, the default value will be kept.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "localhost:4317" will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithEndpointURL(u string) Option {
+ return wrappedOption{otlpconfig.WithEndpointURL(u)}
+}
+
// WithCompression tells the driver to compress the sent data.
func WithCompression(compression Compression) Option {
return wrappedOption{otlpconfig.WithCompression(otlpconfig.Compression(compression))}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go
index 620ea88bf..afc89644e 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go
@@ -16,5 +16,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
// Version is the current release version of the OpenTelemetry OTLP trace exporter in use.
func Version() string {
- return "1.20.0"
+ return "1.24.0"
}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go
index fe1421278..03ce27b13 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go
@@ -19,18 +19,20 @@ import (
"github.com/prometheus/client_golang/prometheus"
+ "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk/metric"
)
// config contains options for the exporter.
type config struct {
- registerer prometheus.Registerer
- disableTargetInfo bool
- withoutUnits bool
- withoutCounterSuffixes bool
- readerOpts []metric.ManualReaderOption
- disableScopeInfo bool
- namespace string
+ registerer prometheus.Registerer
+ disableTargetInfo bool
+ withoutUnits bool
+ withoutCounterSuffixes bool
+ readerOpts []metric.ManualReaderOption
+ disableScopeInfo bool
+ namespace string
+ resourceAttributesFilter attribute.Filter
}
// newConfig creates a validated config configured with options.
@@ -151,3 +153,14 @@ func WithNamespace(ns string) Option {
return cfg
})
}
+
+// WithResourceAsConstantLabels configures the Exporter to add the resource attributes the
+// resourceFilter returns true for as attributes on all exported metrics.
+//
+// The does not affect the target info generated from resource attributes.
+func WithResourceAsConstantLabels(resourceFilter attribute.Filter) Option {
+ return optionFunc(func(cfg config) config {
+ cfg.resourceAttributesFilter = resourceFilter
+ return cfg
+ })
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go
index 92651c38c..16df309be 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go
@@ -78,14 +78,21 @@ func (e *Exporter) MarshalLog() interface{} {
var _ metric.Reader = &Exporter{}
+// keyVals is used to store resource attribute key value pairs.
+type keyVals struct {
+ keys []string
+ vals []string
+}
+
// collector is used to implement prometheus.Collector.
type collector struct {
reader metric.Reader
- withoutUnits bool
- withoutCounterSuffixes bool
- disableScopeInfo bool
- namespace string
+ withoutUnits bool
+ withoutCounterSuffixes bool
+ disableScopeInfo bool
+ namespace string
+ resourceAttributesFilter attribute.Filter
mu sync.Mutex // mu protects all members below from the concurrent access.
disableTargetInfo bool
@@ -93,6 +100,7 @@ type collector struct {
scopeInfos map[instrumentation.Scope]prometheus.Metric
scopeInfosInvalid map[instrumentation.Scope]struct{}
metricFamilies map[string]*dto.MetricFamily
+ resourceKeyVals keyVals
}
// prometheus counters MUST have a _total suffix by default:
@@ -109,15 +117,16 @@ func New(opts ...Option) (*Exporter, error) {
reader := metric.NewManualReader(cfg.readerOpts...)
collector := &collector{
- reader: reader,
- disableTargetInfo: cfg.disableTargetInfo,
- withoutUnits: cfg.withoutUnits,
- withoutCounterSuffixes: cfg.withoutCounterSuffixes,
- disableScopeInfo: cfg.disableScopeInfo,
- scopeInfos: make(map[instrumentation.Scope]prometheus.Metric),
- scopeInfosInvalid: make(map[instrumentation.Scope]struct{}),
- metricFamilies: make(map[string]*dto.MetricFamily),
- namespace: cfg.namespace,
+ reader: reader,
+ disableTargetInfo: cfg.disableTargetInfo,
+ withoutUnits: cfg.withoutUnits,
+ withoutCounterSuffixes: cfg.withoutCounterSuffixes,
+ disableScopeInfo: cfg.disableScopeInfo,
+ scopeInfos: make(map[instrumentation.Scope]prometheus.Metric),
+ scopeInfosInvalid: make(map[instrumentation.Scope]struct{}),
+ metricFamilies: make(map[string]*dto.MetricFamily),
+ namespace: cfg.namespace,
+ resourceAttributesFilter: cfg.resourceAttributesFilter,
}
if err := cfg.registerer.Register(collector); err != nil {
@@ -181,6 +190,10 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) {
ch <- c.targetInfo
}
+ if c.resourceAttributesFilter != nil && len(c.resourceKeyVals.keys) == 0 {
+ c.createResourceAttributes(metrics.Resource)
+ }
+
for _, scopeMetrics := range metrics.ScopeMetrics {
var keys, values [2]string
@@ -219,26 +232,26 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) {
switch v := m.Data.(type) {
case metricdata.Histogram[int64]:
- addHistogramMetric(ch, v, m, keys, values, name)
+ addHistogramMetric(ch, v, m, keys, values, name, c.resourceKeyVals)
case metricdata.Histogram[float64]:
- addHistogramMetric(ch, v, m, keys, values, name)
+ addHistogramMetric(ch, v, m, keys, values, name, c.resourceKeyVals)
case metricdata.Sum[int64]:
- addSumMetric(ch, v, m, keys, values, name)
+ addSumMetric(ch, v, m, keys, values, name, c.resourceKeyVals)
case metricdata.Sum[float64]:
- addSumMetric(ch, v, m, keys, values, name)
+ addSumMetric(ch, v, m, keys, values, name, c.resourceKeyVals)
case metricdata.Gauge[int64]:
- addGaugeMetric(ch, v, m, keys, values, name)
+ addGaugeMetric(ch, v, m, keys, values, name, c.resourceKeyVals)
case metricdata.Gauge[float64]:
- addGaugeMetric(ch, v, m, keys, values, name)
+ addGaugeMetric(ch, v, m, keys, values, name, c.resourceKeyVals)
}
}
}
}
-func addHistogramMetric[N int64 | float64](ch chan<- prometheus.Metric, histogram metricdata.Histogram[N], m metricdata.Metrics, ks, vs [2]string, name string) {
+func addHistogramMetric[N int64 | float64](ch chan<- prometheus.Metric, histogram metricdata.Histogram[N], m metricdata.Metrics, ks, vs [2]string, name string, resourceKV keyVals) {
// TODO(https://github.com/open-telemetry/opentelemetry-go/issues/3163): support exemplars
for _, dp := range histogram.DataPoints {
- keys, values := getAttrs(dp.Attributes, ks, vs)
+ keys, values := getAttrs(dp.Attributes, ks, vs, resourceKV)
desc := prometheus.NewDesc(name, m.Description, keys, nil)
buckets := make(map[float64]uint64, len(dp.Bounds))
@@ -257,14 +270,14 @@ func addHistogramMetric[N int64 | float64](ch chan<- prometheus.Metric, histogra
}
}
-func addSumMetric[N int64 | float64](ch chan<- prometheus.Metric, sum metricdata.Sum[N], m metricdata.Metrics, ks, vs [2]string, name string) {
+func addSumMetric[N int64 | float64](ch chan<- prometheus.Metric, sum metricdata.Sum[N], m metricdata.Metrics, ks, vs [2]string, name string, resourceKV keyVals) {
valueType := prometheus.CounterValue
if !sum.IsMonotonic {
valueType = prometheus.GaugeValue
}
for _, dp := range sum.DataPoints {
- keys, values := getAttrs(dp.Attributes, ks, vs)
+ keys, values := getAttrs(dp.Attributes, ks, vs, resourceKV)
desc := prometheus.NewDesc(name, m.Description, keys, nil)
m, err := prometheus.NewConstMetric(desc, valueType, float64(dp.Value), values...)
@@ -276,9 +289,9 @@ func addSumMetric[N int64 | float64](ch chan<- prometheus.Metric, sum metricdata
}
}
-func addGaugeMetric[N int64 | float64](ch chan<- prometheus.Metric, gauge metricdata.Gauge[N], m metricdata.Metrics, ks, vs [2]string, name string) {
+func addGaugeMetric[N int64 | float64](ch chan<- prometheus.Metric, gauge metricdata.Gauge[N], m metricdata.Metrics, ks, vs [2]string, name string, resourceKV keyVals) {
for _, dp := range gauge.DataPoints {
- keys, values := getAttrs(dp.Attributes, ks, vs)
+ keys, values := getAttrs(dp.Attributes, ks, vs, resourceKV)
desc := prometheus.NewDesc(name, m.Description, keys, nil)
m, err := prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(dp.Value), values...)
@@ -293,7 +306,7 @@ func addGaugeMetric[N int64 | float64](ch chan<- prometheus.Metric, gauge metric
// getAttrs parses the attribute.Set to two lists of matching Prometheus-style
// keys and values. It sanitizes invalid characters and handles duplicate keys
// (due to sanitization) by sorting and concatenating the values following the spec.
-func getAttrs(attrs attribute.Set, ks, vs [2]string) ([]string, []string) {
+func getAttrs(attrs attribute.Set, ks, vs [2]string, resourceKV keyVals) ([]string, []string) {
keysMap := make(map[string][]string)
itr := attrs.Iter()
for itr.Next() {
@@ -321,11 +334,17 @@ func getAttrs(attrs attribute.Set, ks, vs [2]string) ([]string, []string) {
keys = append(keys, ks[:]...)
values = append(values, vs[:]...)
}
+
+ for idx := range resourceKV.keys {
+ keys = append(keys, resourceKV.keys[idx])
+ values = append(values, resourceKV.vals[idx])
+ }
+
return keys, values
}
func createInfoMetric(name, description string, res *resource.Resource) (prometheus.Metric, error) {
- keys, values := getAttrs(*res.Set(), [2]string{}, [2]string{})
+ keys, values := getAttrs(*res.Set(), [2]string{}, [2]string{}, keyVals{})
desc := prometheus.NewDesc(name, description, keys, nil)
return prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(1), values...)
}
@@ -473,6 +492,15 @@ func (c *collector) metricType(m metricdata.Metrics) *dto.MetricType {
return nil
}
+func (c *collector) createResourceAttributes(res *resource.Resource) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ resourceAttrs, _ := res.Set().Filter(c.resourceAttributesFilter)
+ resourceKeys, resourceValues := getAttrs(resourceAttrs, [2]string{}, [2]string{}, keyVals{})
+ c.resourceKeyVals = keyVals{keys: resourceKeys, vals: resourceValues}
+}
+
func (c *collector) scopeInfo(scope instrumentation.Scope) (prometheus.Metric, error) {
c.mu.Lock()
defer c.mu.Unlock()
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go
index 0097db478..7ed61c0e2 100644
--- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go
+++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go
@@ -130,9 +130,11 @@ func (m *meter) setDelegate(provider metric.MeterProvider) {
inst.setDelegate(meter)
}
- for e := m.registry.Front(); e != nil; e = e.Next() {
+ var n *list.Element
+ for e := m.registry.Front(); e != nil; e = n {
r := e.Value.(*registration)
r.setDelegate(meter)
+ n = e.Next()
m.registry.Remove(e)
}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/state.go b/vendor/go.opentelemetry.io/otel/internal/global/state.go
index 7985005bc..386c8bfdc 100644
--- a/vendor/go.opentelemetry.io/otel/internal/global/state.go
+++ b/vendor/go.opentelemetry.io/otel/internal/global/state.go
@@ -63,7 +63,7 @@ func SetTracerProvider(tp trace.TracerProvider) {
// to itself.
Error(
errors.New("no delegate configured in tracer provider"),
- "Setting tracer provider to it's current value. No delegate will be configured",
+ "Setting tracer provider to its current value. No delegate will be configured",
)
return
}
@@ -92,7 +92,7 @@ func SetTextMapPropagator(p propagation.TextMapPropagator) {
// delegate to itself.
Error(
errors.New("no delegate configured in text map propagator"),
- "Setting text map propagator to it's current value. No delegate will be configured",
+ "Setting text map propagator to its current value. No delegate will be configured",
)
return
}
@@ -123,7 +123,7 @@ func SetMeterProvider(mp metric.MeterProvider) {
// to itself.
Error(
errors.New("no delegate configured in meter provider"),
- "Setting meter provider to it's current value. No delegate will be configured",
+ "Setting meter provider to its current value. No delegate will be configured",
)
return
}
diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
index 75a8f3435..63e5d6222 100644
--- a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
+++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
@@ -18,7 +18,7 @@ import (
"context"
"encoding/hex"
"fmt"
- "regexp"
+ "strings"
"go.opentelemetry.io/otel/trace"
)
@@ -28,6 +28,7 @@ const (
maxVersion = 254
traceparentHeader = "traceparent"
tracestateHeader = "tracestate"
+ delimiter = "-"
)
// TraceContext is a propagator that supports the W3C Trace Context format
@@ -41,8 +42,8 @@ const (
type TraceContext struct{}
var (
- _ TextMapPropagator = TraceContext{}
- traceCtxRegExp = regexp.MustCompile("^(?P<version>[0-9a-f]{2})-(?P<traceID>[a-f0-9]{32})-(?P<spanID>[a-f0-9]{16})-(?P<traceFlags>[a-f0-9]{2})(?:-.*)?$")
+ _ TextMapPropagator = TraceContext{}
+ versionPart = fmt.Sprintf("%.2X", supportedVersion)
)
// Inject set tracecontext from the Context into the carrier.
@@ -59,12 +60,19 @@ func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) {
// Clear all flags other than the trace-context supported sampling bit.
flags := sc.TraceFlags() & trace.FlagsSampled
- h := fmt.Sprintf("%.2x-%s-%s-%s",
- supportedVersion,
- sc.TraceID(),
- sc.SpanID(),
- flags)
- carrier.Set(traceparentHeader, h)
+ var sb strings.Builder
+ sb.Grow(2 + 32 + 16 + 2 + 3)
+ _, _ = sb.WriteString(versionPart)
+ traceID := sc.TraceID()
+ spanID := sc.SpanID()
+ flagByte := [1]byte{byte(flags)}
+ var buf [32]byte
+ for _, src := range [][]byte{traceID[:], spanID[:], flagByte[:]} {
+ _ = sb.WriteByte(delimiter[0])
+ n := hex.Encode(buf[:], src)
+ _, _ = sb.Write(buf[:n])
+ }
+ carrier.Set(traceparentHeader, sb.String())
}
// Extract reads tracecontext from the carrier into a returned Context.
@@ -86,21 +94,8 @@ func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext {
return trace.SpanContext{}
}
- matches := traceCtxRegExp.FindStringSubmatch(h)
-
- if len(matches) == 0 {
- return trace.SpanContext{}
- }
-
- if len(matches) < 5 { // four subgroups plus the overall match
- return trace.SpanContext{}
- }
-
- if len(matches[1]) != 2 {
- return trace.SpanContext{}
- }
- ver, err := hex.DecodeString(matches[1])
- if err != nil {
+ var ver [1]byte
+ if !extractPart(ver[:], &h, 2) {
return trace.SpanContext{}
}
version := int(ver[0])
@@ -108,36 +103,24 @@ func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext {
return trace.SpanContext{}
}
- if version == 0 && len(matches) != 5 { // four subgroups plus the overall match
- return trace.SpanContext{}
- }
-
- if len(matches[2]) != 32 {
- return trace.SpanContext{}
- }
-
var scc trace.SpanContextConfig
-
- scc.TraceID, err = trace.TraceIDFromHex(matches[2][:32])
- if err != nil {
+ if !extractPart(scc.TraceID[:], &h, 32) {
return trace.SpanContext{}
}
-
- if len(matches[3]) != 16 {
- return trace.SpanContext{}
- }
- scc.SpanID, err = trace.SpanIDFromHex(matches[3])
- if err != nil {
+ if !extractPart(scc.SpanID[:], &h, 16) {
return trace.SpanContext{}
}
- if len(matches[4]) != 2 {
+ var opts [1]byte
+ if !extractPart(opts[:], &h, 2) {
return trace.SpanContext{}
}
- opts, err := hex.DecodeString(matches[4])
- if err != nil || len(opts) < 1 || (version == 0 && opts[0] > 2) {
+ if version == 0 && (h != "" || opts[0] > 2) {
+ // version 0 not allow extra
+ // version 0 not allow other flag
return trace.SpanContext{}
}
+
// Clear all flags other than the trace-context supported sampling bit.
scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled
@@ -155,6 +138,29 @@ func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext {
return sc
}
+// upperHex detect hex is upper case Unicode characters.
+func upperHex(v string) bool {
+ for _, c := range v {
+ if c >= 'A' && c <= 'F' {
+ return true
+ }
+ }
+ return false
+}
+
+func extractPart(dst []byte, h *string, n int) bool {
+ part, left, _ := strings.Cut(*h, delimiter)
+ *h = left
+ // hex.Decode decodes unsupported upper-case characters, so exclude explicitly.
+ if len(part) != n || upperHex(part) {
+ return false
+ }
+ if p, err := hex.Decode(dst, []byte(part)); err != nil || p != n/2 {
+ return false
+ }
+ return true
+}
+
// Fields returns the keys who's values are set with Inject.
func (tc TraceContext) Fields() []string {
return []string{traceparentHeader, tracestateHeader}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/cache.go b/vendor/go.opentelemetry.io/otel/sdk/metric/cache.go
index de9d6f001..e9c0b38d0 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/cache.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/cache.go
@@ -52,3 +52,43 @@ func (c *cache[K, V]) Lookup(key K, f func() V) V {
c.data[key] = val
return val
}
+
+// HasKey returns true if Lookup has previously been called with that key
+//
+// HasKey is safe to call concurrently.
+func (c *cache[K, V]) HasKey(key K) bool {
+ c.Lock()
+ defer c.Unlock()
+ _, ok := c.data[key]
+ return ok
+}
+
+// cacheWithErr is a locking storage used to quickly return already computed values and an error.
+//
+// The zero value of a cacheWithErr is empty and ready to use.
+//
+// A cacheWithErr must not be copied after first use.
+//
+// All methods of a cacheWithErr are safe to call concurrently.
+type cacheWithErr[K comparable, V any] struct {
+ cache[K, valAndErr[V]]
+}
+
+type valAndErr[V any] struct {
+ val V
+ err error
+}
+
+// Lookup returns the value stored in the cacheWithErr with the associated key
+// if it exists. Otherwise, f is called and its returned value is set in the
+// cacheWithErr for key and returned.
+//
+// Lookup is safe to call concurrently. It will hold the cacheWithErr lock, so f
+// should not block excessively.
+func (c *cacheWithErr[K, V]) Lookup(key K, f func() (V, error)) (V, error) {
+ combined := c.cache.Lookup(key, func() valAndErr[V] {
+ val, err := f()
+ return valAndErr[V]{val: val, err: err}
+ })
+ return combined.val, combined.err
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go
index 53f80c428..475d3e394 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go
@@ -44,4 +44,7 @@
//
// See [go.opentelemetry.io/otel/metric] for more information about
// the metric API.
+//
+// See [go.opentelemetry.io/otel/sdk/metric/internal/x] for information about
+// the experimental features.
package metric // import "go.opentelemetry.io/otel/sdk/metric"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go
new file mode 100644
index 000000000..3f1ce9f1d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go
@@ -0,0 +1,96 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "os"
+ "runtime"
+
+ "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+ "go.opentelemetry.io/otel/sdk/metric/internal/x"
+)
+
+// reservoirFunc returns the appropriately configured exemplar reservoir
+// creation func based on the passed InstrumentKind and user defined
+// environment variables.
+//
+// Note: This will only return non-nil values when the experimental exemplar
+// feature is enabled and the OTEL_METRICS_EXEMPLAR_FILTER environment variable
+// is not set to always_off.
+func reservoirFunc[N int64 | float64](agg Aggregation) func() exemplar.Reservoir[N] {
+ if !x.Exemplars.Enabled() {
+ return nil
+ }
+
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/metrics/sdk.md#exemplar-defaults
+ resF := func() func() exemplar.Reservoir[N] {
+ // Explicit bucket histogram aggregation with more than 1 bucket will
+ // use AlignedHistogramBucketExemplarReservoir.
+ a, ok := agg.(AggregationExplicitBucketHistogram)
+ if ok && len(a.Boundaries) > 0 {
+ cp := make([]float64, len(a.Boundaries))
+ copy(cp, a.Boundaries)
+ return func() exemplar.Reservoir[N] {
+ bounds := cp
+ return exemplar.Histogram[N](bounds)
+ }
+ }
+
+ var n int
+ if a, ok := agg.(AggregationBase2ExponentialHistogram); ok {
+ // Base2 Exponential Histogram Aggregation SHOULD use a
+ // SimpleFixedSizeExemplarReservoir with a reservoir equal to the
+ // smaller of the maximum number of buckets configured on the
+ // aggregation or twenty (e.g. min(20, max_buckets)).
+ n = int(a.MaxSize)
+ if n > 20 {
+ n = 20
+ }
+ } else {
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/e94af89e3d0c01de30127a0f423e912f6cda7bed/specification/metrics/sdk.md#simplefixedsizeexemplarreservoir
+ // This Exemplar reservoir MAY take a configuration parameter for
+ // the size of the reservoir. If no size configuration is
+ // provided, the default size MAY be the number of possible
+ // concurrent threads (e.g. number of CPUs) to help reduce
+ // contention. Otherwise, a default size of 1 SHOULD be used.
+ n = runtime.NumCPU()
+ if n < 1 {
+ // Should never be the case, but be defensive.
+ n = 1
+ }
+ }
+
+ return func() exemplar.Reservoir[N] {
+ return exemplar.FixedSize[N](n)
+ }
+ }
+
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/configuration/sdk-environment-variables.md#exemplar
+ const filterEnvKey = "OTEL_METRICS_EXEMPLAR_FILTER"
+
+ switch os.Getenv(filterEnvKey) {
+ case "always_on":
+ return resF()
+ case "always_off":
+ return exemplar.Drop[N]
+ case "trace_based":
+ fallthrough
+ default:
+ newR := resF()
+ return func() exemplar.Reservoir[N] {
+ return exemplar.SampledFilter(newR())
+ }
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go b/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go
index bb52f6ec7..a4cfcbb95 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go
@@ -205,9 +205,6 @@ func (i *int64Inst) Record(ctx context.Context, val int64, opts ...metric.Record
}
func (i *int64Inst) aggregate(ctx context.Context, val int64, s attribute.Set) { // nolint:revive // okay to shadow pkg with method.
- if err := ctx.Err(); err != nil {
- return
- }
for _, in := range i.measures {
in(ctx, val, s)
}
@@ -238,9 +235,6 @@ func (i *float64Inst) Record(ctx context.Context, val float64, opts ...metric.Re
}
func (i *float64Inst) aggregate(ctx context.Context, val float64, s attribute.Set) {
- if err := ctx.Err(); err != nil {
- return
- }
for _, in := range i.measures {
in(ctx, val, s)
}
@@ -270,9 +264,9 @@ var (
_ metric.Float64ObservableGauge = float64Observable{}
)
-func newFloat64Observable(m *meter, kind InstrumentKind, name, desc, u string, meas []aggregate.Measure[float64]) float64Observable {
+func newFloat64Observable(m *meter, kind InstrumentKind, name, desc, u string) float64Observable {
return float64Observable{
- observable: newObservable(m, kind, name, desc, u, meas),
+ observable: newObservable[float64](m, kind, name, desc, u),
}
}
@@ -291,9 +285,9 @@ var (
_ metric.Int64ObservableGauge = int64Observable{}
)
-func newInt64Observable(m *meter, kind InstrumentKind, name, desc, u string, meas []aggregate.Measure[int64]) int64Observable {
+func newInt64Observable(m *meter, kind InstrumentKind, name, desc, u string) int64Observable {
return int64Observable{
- observable: newObservable(m, kind, name, desc, u, meas),
+ observable: newObservable[int64](m, kind, name, desc, u),
}
}
@@ -301,11 +295,12 @@ type observable[N int64 | float64] struct {
metric.Observable
observablID[N]
- meter *meter
- measures []aggregate.Measure[N]
+ meter *meter
+ measures measures[N]
+ dropAggregation bool
}
-func newObservable[N int64 | float64](m *meter, kind InstrumentKind, name, desc, u string, meas []aggregate.Measure[N]) *observable[N] {
+func newObservable[N int64 | float64](m *meter, kind InstrumentKind, name, desc, u string) *observable[N] {
return &observable[N]{
observablID: observablID[N]{
name: name,
@@ -314,14 +309,24 @@ func newObservable[N int64 | float64](m *meter, kind InstrumentKind, name, desc,
unit: u,
scope: m.scope,
},
- meter: m,
- measures: meas,
+ meter: m,
}
}
// observe records the val for the set of attrs.
func (o *observable[N]) observe(val N, s attribute.Set) {
- for _, in := range o.measures {
+ o.measures.observe(val, s)
+}
+
+func (o *observable[N]) appendMeasures(meas []aggregate.Measure[N]) {
+ o.measures = append(o.measures, meas...)
+}
+
+type measures[N int64 | float64] []aggregate.Measure[N]
+
+// observe records the val for the set of attrs.
+func (m measures[N]) observe(val N, s attribute.Set) {
+ for _, in := range m {
in(context.Background(), val, s)
}
}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go
index 8dec14237..4060a2f76 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go
@@ -19,6 +19,7 @@ import (
"time"
"go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
)
@@ -44,17 +45,43 @@ type Builder[N int64 | float64] struct {
// Filter is the attribute filter the aggregate function will use on the
// input of measurements.
Filter attribute.Filter
+ // ReservoirFunc is the factory function used by aggregate functions to
+ // create new exemplar reservoirs for a new seen attribute set.
+ //
+ // If this is not provided a default factory function that returns an
+ // exemplar.Drop reservoir will be used.
+ ReservoirFunc func() exemplar.Reservoir[N]
+ // AggregationLimit is the cardinality limit of measurement attributes. Any
+ // measurement for new attributes once the limit has been reached will be
+ // aggregated into a single aggregate for the "otel.metric.overflow"
+ // attribute.
+ //
+ // If AggregationLimit is less than or equal to zero there will not be an
+ // aggregation limit imposed (i.e. unlimited attribute sets).
+ AggregationLimit int
+}
+
+func (b Builder[N]) resFunc() func() exemplar.Reservoir[N] {
+ if b.ReservoirFunc != nil {
+ return b.ReservoirFunc
+ }
+
+ return exemplar.Drop[N]
}
-func (b Builder[N]) filter(f Measure[N]) Measure[N] {
+type fltrMeasure[N int64 | float64] func(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue)
+
+func (b Builder[N]) filter(f fltrMeasure[N]) Measure[N] {
if b.Filter != nil {
fltr := b.Filter // Copy to make it immutable after assignment.
return func(ctx context.Context, n N, a attribute.Set) {
- fAttr, _ := a.Filter(fltr)
- f(ctx, n, fAttr)
+ fAttr, dropped := a.Filter(fltr)
+ f(ctx, n, fAttr, dropped)
}
}
- return f
+ return func(ctx context.Context, n N, a attribute.Set) {
+ f(ctx, n, a, nil)
+ }
}
// LastValue returns a last-value aggregate function input and output.
@@ -63,7 +90,7 @@ func (b Builder[N]) filter(f Measure[N]) Measure[N] {
func (b Builder[N]) LastValue() (Measure[N], ComputeAggregation) {
// Delta temporality is the only temporality that makes semantic sense for
// a last-value aggregate.
- lv := newLastValue[N]()
+ lv := newLastValue[N](b.AggregationLimit, b.resFunc())
return b.filter(lv.measure), func(dest *metricdata.Aggregation) int {
// Ignore if dest is not a metricdata.Gauge. The chance for memory
@@ -79,7 +106,7 @@ func (b Builder[N]) LastValue() (Measure[N], ComputeAggregation) {
// PrecomputedSum returns a sum aggregate function input and output. The
// arguments passed to the input are expected to be the precomputed sum values.
func (b Builder[N]) PrecomputedSum(monotonic bool) (Measure[N], ComputeAggregation) {
- s := newPrecomputedSum[N](monotonic)
+ s := newPrecomputedSum[N](monotonic, b.AggregationLimit, b.resFunc())
switch b.Temporality {
case metricdata.DeltaTemporality:
return b.filter(s.measure), s.delta
@@ -90,7 +117,7 @@ func (b Builder[N]) PrecomputedSum(monotonic bool) (Measure[N], ComputeAggregati
// Sum returns a sum aggregate function input and output.
func (b Builder[N]) Sum(monotonic bool) (Measure[N], ComputeAggregation) {
- s := newSum[N](monotonic)
+ s := newSum[N](monotonic, b.AggregationLimit, b.resFunc())
switch b.Temporality {
case metricdata.DeltaTemporality:
return b.filter(s.measure), s.delta
@@ -102,7 +129,7 @@ func (b Builder[N]) Sum(monotonic bool) (Measure[N], ComputeAggregation) {
// ExplicitBucketHistogram returns a histogram aggregate function input and
// output.
func (b Builder[N]) ExplicitBucketHistogram(boundaries []float64, noMinMax, noSum bool) (Measure[N], ComputeAggregation) {
- h := newHistogram[N](boundaries, noMinMax, noSum)
+ h := newHistogram[N](boundaries, noMinMax, noSum, b.AggregationLimit, b.resFunc())
switch b.Temporality {
case metricdata.DeltaTemporality:
return b.filter(h.measure), h.delta
@@ -114,7 +141,7 @@ func (b Builder[N]) ExplicitBucketHistogram(boundaries []float64, noMinMax, noSu
// ExponentialBucketHistogram returns a histogram aggregate function input and
// output.
func (b Builder[N]) ExponentialBucketHistogram(maxSize, maxScale int32, noMinMax, noSum bool) (Measure[N], ComputeAggregation) {
- h := newExponentialHistogram[N](maxSize, maxScale, noMinMax, noSum)
+ h := newExponentialHistogram[N](maxSize, maxScale, noMinMax, noSum, b.AggregationLimit, b.resFunc())
switch b.Temporality {
case metricdata.DeltaTemporality:
return b.filter(h.measure), h.delta
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go
index 98b7dc1e0..4139a6d15 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go
@@ -23,6 +23,7 @@ import (
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
)
@@ -40,6 +41,8 @@ const (
// expoHistogramDataPoint is a single data point in an exponential histogram.
type expoHistogramDataPoint[N int64 | float64] struct {
+ res exemplar.Reservoir[N]
+
count uint64
min N
max N
@@ -288,13 +291,15 @@ func (b *expoBuckets) downscale(delta int) {
// newExponentialHistogram returns an Aggregator that summarizes a set of
// measurements as an exponential histogram. Each histogram is scoped by attributes
// and the aggregation cycle the measurements were made in.
-func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool) *expoHistogram[N] {
+func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func() exemplar.Reservoir[N]) *expoHistogram[N] {
return &expoHistogram[N]{
noSum: noSum,
noMinMax: noMinMax,
maxSize: int(maxSize),
maxScale: int(maxScale),
+ newRes: r,
+ limit: newLimiter[*expoHistogramDataPoint[N]](limit),
values: make(map[attribute.Set]*expoHistogramDataPoint[N]),
start: now(),
@@ -309,27 +314,35 @@ type expoHistogram[N int64 | float64] struct {
maxSize int
maxScale int
+ newRes func() exemplar.Reservoir[N]
+ limit limiter[*expoHistogramDataPoint[N]]
values map[attribute.Set]*expoHistogramDataPoint[N]
valuesMu sync.Mutex
start time.Time
}
-func (e *expoHistogram[N]) measure(_ context.Context, value N, attr attribute.Set) {
+func (e *expoHistogram[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) {
// Ignore NaN and infinity.
if math.IsInf(float64(value), 0) || math.IsNaN(float64(value)) {
return
}
+ t := now()
+
e.valuesMu.Lock()
defer e.valuesMu.Unlock()
+ attr := e.limit.Attributes(fltrAttr, e.values)
v, ok := e.values[attr]
if !ok {
v = newExpoHistogramDataPoint[N](e.maxSize, e.maxScale, e.noMinMax, e.noSum)
+ v.res = e.newRes()
+
e.values[attr] = v
}
v.record(value)
+ v.res.Offer(ctx, t, value, droppedAttr)
}
func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int {
@@ -362,6 +375,7 @@ func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int {
hDPts[i].NegativeBucket.Offset = int32(b.negBuckets.startBin)
hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(b.negBuckets.counts), len(b.negBuckets.counts))
+ copy(hDPts[i].NegativeBucket.Counts, b.negBuckets.counts)
if !e.noSum {
hDPts[i].Sum = b.sum
@@ -371,6 +385,8 @@ func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int {
hDPts[i].Max = metricdata.NewExtrema(b.max)
}
+ b.res.Collect(&hDPts[i].Exemplars)
+
delete(e.values, a)
i++
}
@@ -410,6 +426,7 @@ func (e *expoHistogram[N]) cumulative(dest *metricdata.Aggregation) int {
hDPts[i].NegativeBucket.Offset = int32(b.negBuckets.startBin)
hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(b.negBuckets.counts), len(b.negBuckets.counts))
+ copy(hDPts[i].NegativeBucket.Counts, b.negBuckets.counts)
if !e.noSum {
hDPts[i].Sum = b.sum
@@ -419,6 +436,8 @@ func (e *expoHistogram[N]) cumulative(dest *metricdata.Aggregation) int {
hDPts[i].Max = metricdata.NewExtrema(b.max)
}
+ b.res.Collect(&hDPts[i].Exemplars)
+
i++
// TODO (#3006): This will use an unbounded amount of memory if there
// are unbounded number of attribute sets being aggregated. Attribute
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go
index 62ec51e1f..a9a4706bf 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go
@@ -21,10 +21,13 @@ import (
"time"
"go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
)
type buckets[N int64 | float64] struct {
+ res exemplar.Reservoir[N]
+
counts []uint64
count uint64
total N
@@ -54,11 +57,13 @@ type histValues[N int64 | float64] struct {
noSum bool
bounds []float64
+ newRes func() exemplar.Reservoir[N]
+ limit limiter[*buckets[N]]
values map[attribute.Set]*buckets[N]
valuesMu sync.Mutex
}
-func newHistValues[N int64 | float64](bounds []float64, noSum bool) *histValues[N] {
+func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func() exemplar.Reservoir[N]) *histValues[N] {
// The responsibility of keeping all buckets correctly associated with the
// passed boundaries is ultimately this type's responsibility. Make a copy
// here so we can always guarantee this. Or, in the case of failure, have
@@ -69,13 +74,15 @@ func newHistValues[N int64 | float64](bounds []float64, noSum bool) *histValues[
return &histValues[N]{
noSum: noSum,
bounds: b,
+ newRes: r,
+ limit: newLimiter[*buckets[N]](limit),
values: make(map[attribute.Set]*buckets[N]),
}
}
// Aggregate records the measurement value, scoped by attr, and aggregates it
// into a histogram.
-func (s *histValues[N]) measure(_ context.Context, value N, attr attribute.Set) {
+func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) {
// This search will return an index in the range [0, len(s.bounds)], where
// it will return len(s.bounds) if value is greater than the last element
// of s.bounds. This aligns with the buckets in that the length of buckets
@@ -83,9 +90,12 @@ func (s *histValues[N]) measure(_ context.Context, value N, attr attribute.Set)
// (s.bounds[len(s.bounds)-1], +∞).
idx := sort.SearchFloat64s(s.bounds, float64(value))
+ t := now()
+
s.valuesMu.Lock()
defer s.valuesMu.Unlock()
+ attr := s.limit.Attributes(fltrAttr, s.values)
b, ok := s.values[attr]
if !ok {
// N+1 buckets. For example:
@@ -96,6 +106,8 @@ func (s *histValues[N]) measure(_ context.Context, value N, attr attribute.Set)
//
// buckets = (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, +∞)
b = newBuckets[N](len(s.bounds) + 1)
+ b.res = s.newRes()
+
// Ensure min and max are recorded values (not zero), for new buckets.
b.min, b.max = value, value
s.values[attr] = b
@@ -104,13 +116,14 @@ func (s *histValues[N]) measure(_ context.Context, value N, attr attribute.Set)
if !s.noSum {
b.sum(value)
}
+ b.res.Offer(ctx, t, value, droppedAttr)
}
// newHistogram returns an Aggregator that summarizes a set of measurements as
// an histogram.
-func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool) *histogram[N] {
+func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func() exemplar.Reservoir[N]) *histogram[N] {
return &histogram[N]{
- histValues: newHistValues[N](boundaries, noSum),
+ histValues: newHistValues[N](boundaries, noSum, limit, r),
noMinMax: noMinMax,
start: now(),
}
@@ -161,6 +174,8 @@ func (s *histogram[N]) delta(dest *metricdata.Aggregation) int {
hDPts[i].Max = metricdata.NewExtrema(b.max)
}
+ b.res.Collect(&hDPts[i].Exemplars)
+
// Unused attribute sets do not report.
delete(s.values, a)
i++
@@ -217,6 +232,9 @@ func (s *histogram[N]) cumulative(dest *metricdata.Aggregation) int {
hDPts[i].Min = metricdata.NewExtrema(b.min)
hDPts[i].Max = metricdata.NewExtrema(b.max)
}
+
+ b.res.Collect(&hDPts[i].Exemplars)
+
i++
// TODO (#3006): This will use an unbounded amount of memory if there
// are unbounded number of attribute sets being aggregated. Attribute
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go
index 6af2d6061..5699e728f 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go
@@ -20,6 +20,7 @@ import (
"time"
"go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
)
@@ -27,24 +28,43 @@ import (
type datapoint[N int64 | float64] struct {
timestamp time.Time
value N
+ res exemplar.Reservoir[N]
}
-func newLastValue[N int64 | float64]() *lastValue[N] {
- return &lastValue[N]{values: make(map[attribute.Set]datapoint[N])}
+func newLastValue[N int64 | float64](limit int, r func() exemplar.Reservoir[N]) *lastValue[N] {
+ return &lastValue[N]{
+ newRes: r,
+ limit: newLimiter[datapoint[N]](limit),
+ values: make(map[attribute.Set]datapoint[N]),
+ }
}
// lastValue summarizes a set of measurements as the last one made.
type lastValue[N int64 | float64] struct {
sync.Mutex
+ newRes func() exemplar.Reservoir[N]
+ limit limiter[datapoint[N]]
values map[attribute.Set]datapoint[N]
}
-func (s *lastValue[N]) measure(ctx context.Context, value N, attr attribute.Set) {
- d := datapoint[N]{timestamp: now(), value: value}
+func (s *lastValue[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) {
+ t := now()
+
s.Lock()
+ defer s.Unlock()
+
+ attr := s.limit.Attributes(fltrAttr, s.values)
+ d, ok := s.values[attr]
+ if !ok {
+ d.res = s.newRes()
+ }
+
+ d.timestamp = t
+ d.value = value
+ d.res.Offer(ctx, t, value, droppedAttr)
+
s.values[attr] = d
- s.Unlock()
}
func (s *lastValue[N]) computeAggregation(dest *[]metricdata.DataPoint[N]) {
@@ -61,6 +81,7 @@ func (s *lastValue[N]) computeAggregation(dest *[]metricdata.DataPoint[N]) {
// ignored.
(*dest)[i].Time = v.timestamp
(*dest)[i].Value = v.value
+ v.res.Collect(&(*dest)[i].Exemplars)
// Do not report stale values.
delete(s.values, a)
i++
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go
new file mode 100644
index 000000000..d3de84272
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go
@@ -0,0 +1,53 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// overflowSet is the attribute set used to record a measurement when adding
+// another distinct attribute set to the aggregate would exceed the aggregate
+// limit.
+var overflowSet = attribute.NewSet(attribute.Bool("otel.metric.overflow", true))
+
+// limiter limits aggregate values.
+type limiter[V any] struct {
+ // aggLimit is the maximum number of metric streams that can be aggregated.
+ //
+ // Any metric stream with attributes distinct from any set already
+ // aggregated once the aggLimit will be meet will instead be aggregated
+ // into an "overflow" metric stream. That stream will only contain the
+ // "otel.metric.overflow"=true attribute.
+ aggLimit int
+}
+
+// newLimiter returns a new Limiter with the provided aggregation limit.
+func newLimiter[V any](aggregation int) limiter[V] {
+ return limiter[V]{aggLimit: aggregation}
+}
+
+// Attributes checks if adding a measurement for attrs will exceed the
+// aggregation cardinality limit for the existing measurements. If it will,
+// overflowSet is returned. Otherwise, if it will not exceed the limit, or the
+// limit is not set (limit <= 0), attr is returned.
+func (l limiter[V]) Attributes(attrs attribute.Set, measurements map[attribute.Set]V) attribute.Set {
+ if l.aggLimit > 0 {
+ _, exists := measurements[attrs]
+ if !exists && len(measurements) >= l.aggLimit-1 {
+ return overflowSet
+ }
+ }
+
+ return attrs
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go
index 1e52ff0d1..02de2483f 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go
@@ -20,31 +20,55 @@ import (
"time"
"go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
)
+type sumValue[N int64 | float64] struct {
+ n N
+ res exemplar.Reservoir[N]
+}
+
// valueMap is the storage for sums.
type valueMap[N int64 | float64] struct {
sync.Mutex
- values map[attribute.Set]N
+ newRes func() exemplar.Reservoir[N]
+ limit limiter[sumValue[N]]
+ values map[attribute.Set]sumValue[N]
}
-func newValueMap[N int64 | float64]() *valueMap[N] {
- return &valueMap[N]{values: make(map[attribute.Set]N)}
+func newValueMap[N int64 | float64](limit int, r func() exemplar.Reservoir[N]) *valueMap[N] {
+ return &valueMap[N]{
+ newRes: r,
+ limit: newLimiter[sumValue[N]](limit),
+ values: make(map[attribute.Set]sumValue[N]),
+ }
}
-func (s *valueMap[N]) measure(_ context.Context, value N, attr attribute.Set) {
+func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) {
+ t := now()
+
s.Lock()
- s.values[attr] += value
- s.Unlock()
+ defer s.Unlock()
+
+ attr := s.limit.Attributes(fltrAttr, s.values)
+ v, ok := s.values[attr]
+ if !ok {
+ v.res = s.newRes()
+ }
+
+ v.n += value
+ v.res.Offer(ctx, t, value, droppedAttr)
+
+ s.values[attr] = v
}
// newSum returns an aggregator that summarizes a set of measurements as their
// arithmetic sum. Each sum is scoped by attributes and the aggregation cycle
// the measurements were made in.
-func newSum[N int64 | float64](monotonic bool) *sum[N] {
+func newSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.Reservoir[N]) *sum[N] {
return &sum[N]{
- valueMap: newValueMap[N](),
+ valueMap: newValueMap[N](limit, r),
monotonic: monotonic,
start: now(),
}
@@ -74,11 +98,12 @@ func (s *sum[N]) delta(dest *metricdata.Aggregation) int {
dPts := reset(sData.DataPoints, n, n)
var i int
- for attr, value := range s.values {
+ for attr, val := range s.values {
dPts[i].Attributes = attr
dPts[i].StartTime = s.start
dPts[i].Time = t
- dPts[i].Value = value
+ dPts[i].Value = val.n
+ val.res.Collect(&dPts[i].Exemplars)
// Do not report stale values.
delete(s.values, attr)
i++
@@ -112,7 +137,8 @@ func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int {
dPts[i].Attributes = attr
dPts[i].StartTime = s.start
dPts[i].Time = t
- dPts[i].Value = value
+ dPts[i].Value = value.n
+ value.res.Collect(&dPts[i].Exemplars)
// TODO (#3006): This will use an unbounded amount of memory if there
// are unbounded number of attribute sets being aggregated. Attribute
// sets that become "stale" need to be forgotten so this will not
@@ -129,9 +155,9 @@ func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int {
// newPrecomputedSum returns an aggregator that summarizes a set of
// observatrions as their arithmetic sum. Each sum is scoped by attributes and
// the aggregation cycle the measurements were made in.
-func newPrecomputedSum[N int64 | float64](monotonic bool) *precomputedSum[N] {
+func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.Reservoir[N]) *precomputedSum[N] {
return &precomputedSum[N]{
- valueMap: newValueMap[N](),
+ valueMap: newValueMap[N](limit, r),
monotonic: monotonic,
start: now(),
}
@@ -165,14 +191,15 @@ func (s *precomputedSum[N]) delta(dest *metricdata.Aggregation) int {
var i int
for attr, value := range s.values {
- delta := value - s.reported[attr]
+ delta := value.n - s.reported[attr]
dPts[i].Attributes = attr
dPts[i].StartTime = s.start
dPts[i].Time = t
dPts[i].Value = delta
+ value.res.Collect(&dPts[i].Exemplars)
- newReported[attr] = value
+ newReported[attr] = value.n
// Unused attribute sets do not report.
delete(s.values, attr)
i++
@@ -204,11 +231,12 @@ func (s *precomputedSum[N]) cumulative(dest *metricdata.Aggregation) int {
dPts := reset(sData.DataPoints, n, n)
var i int
- for attr, value := range s.values {
+ for attr, val := range s.values {
dPts[i].Attributes = attr
dPts[i].StartTime = s.start
dPts[i].Time = t
- dPts[i].Value = value
+ dPts[i].Value = val.n
+ val.res.Collect(&dPts[i].Exemplars)
// Unused attribute sets do not report.
delete(s.values, attr)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go
new file mode 100644
index 000000000..3caeb542c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go
@@ -0,0 +1,17 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package exemplar provides an implementation of the OpenTelemetry exemplar
+// reservoir to be used in metric collection pipelines.
+package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go
new file mode 100644
index 000000000..39bf37b9e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go
@@ -0,0 +1,36 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+
+import (
+ "context"
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// Drop returns a [Reservoir] that drops all measurements it is offered.
+func Drop[N int64 | float64]() Reservoir[N] { return &dropRes[N]{} }
+
+type dropRes[N int64 | float64] struct{}
+
+// Offer does nothing, all measurements offered will be dropped.
+func (r *dropRes[N]) Offer(context.Context, time.Time, N, []attribute.KeyValue) {}
+
+// Collect resets dest. No exemplars will ever be returned.
+func (r *dropRes[N]) Collect(dest *[]metricdata.Exemplar[N]) {
+ *dest = (*dest)[:0]
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go
new file mode 100644
index 000000000..4f5946fb9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go
@@ -0,0 +1,40 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+
+import (
+ "context"
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// SampledFilter returns a [Reservoir] wrapping r that will only offer measurements
+// to r if the passed context associated with the measurement contains a sampled
+// [go.opentelemetry.io/otel/trace.SpanContext].
+func SampledFilter[N int64 | float64](r Reservoir[N]) Reservoir[N] {
+ return filtered[N]{Reservoir: r}
+}
+
+type filtered[N int64 | float64] struct {
+ Reservoir[N]
+}
+
+func (f filtered[N]) Offer(ctx context.Context, t time.Time, n N, a []attribute.KeyValue) {
+ if trace.SpanContextFromContext(ctx).IsSampled() {
+ f.Reservoir.Offer(ctx, t, n, a)
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go
new file mode 100644
index 000000000..6f4fe5524
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go
@@ -0,0 +1,47 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+
+import (
+ "context"
+ "sort"
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+)
+
+// Histogram returns a [Reservoir] that samples the last measurement that falls
+// within a histogram bucket. The histogram bucket upper-boundaries are define
+// by bounds.
+//
+// The passed bounds will be sorted by this function.
+func Histogram[N int64 | float64](bounds []float64) Reservoir[N] {
+ sort.Float64s(bounds)
+ return &histRes[N]{
+ bounds: bounds,
+ storage: newStorage[N](len(bounds) + 1),
+ }
+}
+
+type histRes[N int64 | float64] struct {
+ *storage[N]
+
+ // bounds are bucket bounds in ascending order.
+ bounds []float64
+}
+
+func (r *histRes[N]) Offer(ctx context.Context, t time.Time, n N, a []attribute.KeyValue) {
+ r.store[sort.SearchFloat64s(r.bounds, float64(n))] = newMeasurement(ctx, t, n, a)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go
new file mode 100644
index 000000000..7f9fda5b4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go
@@ -0,0 +1,195 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+
+import (
+ "context"
+ "math"
+ "math/rand"
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// rng is used to make sampling decisions.
+//
+// Do not use crypto/rand. There is no reason for the decrease in performance
+// given this is not a security sensitive decision.
+var rng = rand.New(rand.NewSource(time.Now().UnixNano()))
+
+// random returns, as a float64, a uniform pseudo-random number in the open
+// interval (0.0,1.0).
+func random() float64 {
+ // TODO: This does not return a uniform number. rng.Float64 returns a
+ // uniformly random int in [0,2^53) that is divided by 2^53. Meaning it
+ // returns multiples of 2^-53, and not all floating point numbers between 0
+ // and 1 (i.e. for values less than 2^-4 the 4 last bits of the significand
+ // are always going to be 0).
+ //
+ // An alternative algorithm should be considered that will actually return
+ // a uniform number in the interval (0,1). For example, since the default
+ // rand source provides a uniform distribution for Int63, this can be
+ // converted following the prototypical code of Mersenne Twister 64 (Takuji
+ // Nishimura and Makoto Matsumoto:
+ // http://www.math.sci.hiroshima-u.ac.jp/m-mat/MT/VERSIONS/C-LANG/mt19937-64.c)
+ //
+ // (float64(rng.Int63()>>11) + 0.5) * (1.0 / 4503599627370496.0)
+ //
+ // There are likely many other methods to explore here as well.
+
+ f := rng.Float64()
+ for f == 0 {
+ f = rng.Float64()
+ }
+ return f
+}
+
+// FixedSize returns a [Reservoir] that samples at most k exemplars. If there
+// are k or less measurements made, the Reservoir will sample each one. If
+// there are more than k, the Reservoir will then randomly sample all
+// additional measurement with a decreasing probability.
+func FixedSize[N int64 | float64](k int) Reservoir[N] {
+ r := &randRes[N]{storage: newStorage[N](k)}
+ r.reset()
+ return r
+}
+
+type randRes[N int64 | float64] struct {
+ *storage[N]
+
+ // count is the number of measurement seen.
+ count int64
+ // next is the next count that will store a measurement at a random index
+ // once the reservoir has been filled.
+ next int64
+ // w is the largest random number in a distribution that is used to compute
+ // the next next.
+ w float64
+}
+
+func (r *randRes[N]) Offer(ctx context.Context, t time.Time, n N, a []attribute.KeyValue) {
+ // The following algorithm is "Algorithm L" from Li, Kim-Hung (4 December
+ // 1994). "Reservoir-Sampling Algorithms of Time Complexity
+ // O(n(1+log(N/n)))". ACM Transactions on Mathematical Software. 20 (4):
+ // 481–493 (https://dl.acm.org/doi/10.1145/198429.198435).
+ //
+ // A high-level overview of "Algorithm L":
+ // 0) Pre-calculate the random count greater than the storage size when
+ // an exemplar will be replaced.
+ // 1) Accept all measurements offered until the configured storage size is
+ // reached.
+ // 2) Loop:
+ // a) When the pre-calculate count is reached, replace a random
+ // existing exemplar with the offered measurement.
+ // b) Calculate the next random count greater than the existing one
+ // which will replace another exemplars
+ //
+ // The way a "replacement" count is computed is by looking at `n` number of
+ // independent random numbers each corresponding to an offered measurement.
+ // Of these numbers the smallest `k` (the same size as the storage
+ // capacity) of them are kept as a subset. The maximum value in this
+ // subset, called `w` is used to weight another random number generation
+ // for the next count that will be considered.
+ //
+ // By weighting the next count computation like described, it is able to
+ // perform a uniformly-weighted sampling algorithm based on the number of
+ // samples the reservoir has seen so far. The sampling will "slow down" as
+ // more and more samples are offered so as to reduce a bias towards those
+ // offered just prior to the end of the collection.
+ //
+ // This algorithm is preferred because of its balance of simplicity and
+ // performance. It will compute three random numbers (the bulk of
+ // computation time) for each item that becomes part of the reservoir, but
+ // it does not spend any time on items that do not. In particular it has an
+ // asymptotic runtime of O(k(1 + log(n/k)) where n is the number of
+ // measurements offered and k is the reservoir size.
+ //
+ // See https://en.wikipedia.org/wiki/Reservoir_sampling for an overview of
+ // this and other reservoir sampling algorithms. See
+ // https://github.com/MrAlias/reservoir-sampling for a performance
+ // comparison of reservoir sampling algorithms.
+
+ if int(r.count) < cap(r.store) {
+ r.store[r.count] = newMeasurement(ctx, t, n, a)
+ } else {
+ if r.count == r.next {
+ // Overwrite a random existing measurement with the one offered.
+ idx := int(rng.Int63n(int64(cap(r.store))))
+ r.store[idx] = newMeasurement(ctx, t, n, a)
+ r.advance()
+ }
+ }
+ r.count++
+}
+
+// reset resets r to the initial state.
+func (r *randRes[N]) reset() {
+ // This resets the number of exemplars known.
+ r.count = 0
+ // Random index inserts should only happen after the storage is full.
+ r.next = int64(cap(r.store))
+
+ // Initial random number in the series used to generate r.next.
+ //
+ // This is set before r.advance to reset or initialize the random number
+ // series. Without doing so it would always be 0 or never restart a new
+ // random number series.
+ //
+ // This maps the uniform random number in (0,1) to a geometric distribution
+ // over the same interval. The mean of the distribution is inversely
+ // proportional to the storage capacity.
+ r.w = math.Exp(math.Log(random()) / float64(cap(r.store)))
+
+ r.advance()
+}
+
+// advance updates the count at which the offered measurement will overwrite an
+// existing exemplar.
+func (r *randRes[N]) advance() {
+ // Calculate the next value in the random number series.
+ //
+ // The current value of r.w is based on the max of a distribution of random
+ // numbers (i.e. `w = max(u_1,u_2,...,u_k)` for `k` equal to the capacity
+ // of the storage and each `u` in the interval (0,w)). To calculate the
+ // next r.w we use the fact that when the next exemplar is selected to be
+ // included in the storage an existing one will be dropped, and the
+ // corresponding random number in the set used to calculate r.w will also
+ // be replaced. The replacement random number will also be within (0,w),
+ // therefore the next r.w will be based on the same distribution (i.e.
+ // `max(u_1,u_2,...,u_k)`). Therefore, we can sample the next r.w by
+ // computing the next random number `u` and take r.w as `w * u^(1/k)`.
+ r.w *= math.Exp(math.Log(random()) / float64(cap(r.store)))
+ // Use the new random number in the series to calculate the count of the
+ // next measurement that will be stored.
+ //
+ // Given 0 < r.w < 1, each iteration will result in subsequent r.w being
+ // smaller. This translates here into the next next being selected against
+ // a distribution with a higher mean (i.e. the expected value will increase
+ // and replacements become less likely)
+ //
+ // Important to note, the new r.next will always be at least 1 more than
+ // the last r.next.
+ r.next += int64(math.Log(random())/math.Log(1-r.w)) + 1
+}
+
+func (r *randRes[N]) Collect(dest *[]metricdata.Exemplar[N]) {
+ r.storage.Collect(dest)
+ // Call reset here even though it will reset r.count and restart the random
+ // number series. This will persist any old exemplars as long as no new
+ // measurements are offered, but it will also prioritize those new
+ // measurements that are made over the older collection cycle ones.
+ r.reset()
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go
new file mode 100644
index 000000000..7d5276a34
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go
@@ -0,0 +1,44 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+
+import (
+ "context"
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// Reservoir holds the sampled exemplar of measurements made.
+type Reservoir[N int64 | float64] interface {
+ // Offer accepts the parameters associated with a measurement. The
+ // parameters will be stored as an exemplar if the Reservoir decides to
+ // sample the measurement.
+ //
+ // The passed ctx needs to contain any baggage or span that were active
+ // when the measurement was made. This information may be used by the
+ // Reservoir in making a sampling decision.
+ //
+ // The time t is the time when the measurement was made. The val and attr
+ // parameters are the value and dropped (filtered) attributes of the
+ // measurement respectively.
+ Offer(ctx context.Context, t time.Time, val N, attr []attribute.KeyValue)
+
+ // Collect returns all the held exemplars.
+ //
+ // The Reservoir state is preserved after this call.
+ Collect(dest *[]metricdata.Exemplar[N])
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go
new file mode 100644
index 000000000..e2c2b90a3
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go
@@ -0,0 +1,107 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+
+import (
+ "context"
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// storage is an exemplar storage for [Reservoir] implementations.
+type storage[N int64 | float64] struct {
+ // store are the measurements sampled.
+ //
+ // This does not use []metricdata.Exemplar because it potentially would
+ // require an allocation for trace and span IDs in the hot path of Offer.
+ store []measurement[N]
+}
+
+func newStorage[N int64 | float64](n int) *storage[N] {
+ return &storage[N]{store: make([]measurement[N], n)}
+}
+
+// Collect returns all the held exemplars.
+//
+// The Reservoir state is preserved after this call.
+func (r *storage[N]) Collect(dest *[]metricdata.Exemplar[N]) {
+ *dest = reset(*dest, len(r.store), len(r.store))
+ var n int
+ for _, m := range r.store {
+ if !m.valid {
+ continue
+ }
+
+ m.Exemplar(&(*dest)[n])
+ n++
+ }
+ *dest = (*dest)[:n]
+}
+
+// measurement is a measurement made by a telemetry system.
+type measurement[N int64 | float64] struct {
+ // FilteredAttributes are the attributes dropped during the measurement.
+ FilteredAttributes []attribute.KeyValue
+ // Time is the time when the measurement was made.
+ Time time.Time
+ // Value is the value of the measurement.
+ Value N
+ // SpanContext is the SpanContext active when a measurement was made.
+ SpanContext trace.SpanContext
+
+ valid bool
+}
+
+// newMeasurement returns a new non-empty Measurement.
+func newMeasurement[N int64 | float64](ctx context.Context, ts time.Time, v N, droppedAttr []attribute.KeyValue) measurement[N] {
+ return measurement[N]{
+ FilteredAttributes: droppedAttr,
+ Time: ts,
+ Value: v,
+ SpanContext: trace.SpanContextFromContext(ctx),
+ valid: true,
+ }
+}
+
+// Exemplar returns m as a [metricdata.Exemplar].
+func (m measurement[N]) Exemplar(dest *metricdata.Exemplar[N]) {
+ dest.FilteredAttributes = m.FilteredAttributes
+ dest.Time = m.Time
+ dest.Value = m.Value
+
+ if m.SpanContext.HasTraceID() {
+ traceID := m.SpanContext.TraceID()
+ dest.TraceID = traceID[:]
+ } else {
+ dest.TraceID = dest.TraceID[:0]
+ }
+
+ if m.SpanContext.HasSpanID() {
+ spanID := m.SpanContext.SpanID()
+ dest.SpanID = spanID[:]
+ } else {
+ dest.SpanID = dest.SpanID[:0]
+ }
+}
+
+func reset[T any](s []T, length, capacity int) []T {
+ if cap(s) < capacity {
+ return make([]T, length, capacity)
+ }
+ return s[:length]
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md
new file mode 100644
index 000000000..aba69d654
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md
@@ -0,0 +1,112 @@
+# Experimental Features
+
+The metric SDK contains features that have not yet stabilized in the OpenTelemetry specification.
+These features are added to the OpenTelemetry Go metric SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback.
+
+These feature may change in backwards incompatible ways as feedback is applied.
+See the [Compatibility and Stability](#compatibility-and-stability) section for more information.
+
+## Features
+
+- [Cardinality Limit](#cardinality-limit)
+- [Exemplars](#exemplars)
+
+### Cardinality Limit
+
+The cardinality limit is the hard limit on the number of metric streams that can be collected for a single instrument.
+
+This experimental feature can be enabled by setting the `OTEL_GO_X_CARDINALITY_LIMIT` environment value.
+The value must be an integer value.
+All other values are ignored.
+
+If the value set is less than or equal to `0`, no limit will be applied.
+
+#### Examples
+
+Set the cardinality limit to 2000.
+
+```console
+export OTEL_GO_X_CARDINALITY_LIMIT=2000
+```
+
+Set an infinite cardinality limit (functionally equivalent to disabling the feature).
+
+```console
+export OTEL_GO_X_CARDINALITY_LIMIT=-1
+```
+
+Disable the cardinality limit.
+
+```console
+unset OTEL_GO_X_CARDINALITY_LIMIT
+```
+
+### Exemplars
+
+A sample of measurements made may be exported directly as a set of exemplars.
+
+This experimental feature can be enabled by setting the `OTEL_GO_X_EXEMPLAR` environment variable.
+The value of must be the case-insensitive string of `"true"` to enable the feature.
+All other values are ignored.
+
+Exemplar filters are a supported.
+The exemplar filter applies to all measurements made.
+They filter these measurements, only allowing certain measurements to be passed to the underlying exemplar reservoir.
+
+To change the exemplar filter from the default `"trace_based"` filter set the `OTEL_METRICS_EXEMPLAR_FILTER` environment variable.
+The value must be the case-sensitive string defined by the [OpenTelemetry specification].
+
+- `"always_on"`: allows all measurements
+- `"always_off"`: denies all measurements
+- `"trace_based"`: allows only sampled measurements
+
+All values other than these will result in the default, `"trace_based"`, exemplar filter being used.
+
+[OpenTelemetry specification]: https://github.com/open-telemetry/opentelemetry-specification/blob/a6ca2fd484c9e76fe1d8e1c79c99f08f4745b5ee/specification/configuration/sdk-environment-variables.md#exemplar
+
+#### Examples
+
+Enable exemplars to be exported.
+
+```console
+export OTEL_GO_X_EXEMPLAR=true
+```
+
+Disable exemplars from being exported.
+
+```console
+unset OTEL_GO_X_EXEMPLAR
+```
+
+Set the exemplar filter to allow all measurements.
+
+```console
+export OTEL_METRICS_EXEMPLAR_FILTER=always_on
+```
+
+Set the exemplar filter to deny all measurements.
+
+```console
+export OTEL_METRICS_EXEMPLAR_FILTER=always_off
+```
+
+Set the exemplar filter to only allow sampled measurements.
+
+```console
+export OTEL_METRICS_EXEMPLAR_FILTER=trace_based
+```
+
+Revert to the default exemplar filter (`"trace_based"`)
+
+```console
+unset OTEL_METRICS_EXEMPLAR_FILTER
+```
+
+## Compatibility and Stability
+
+Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../VERSIONING.md).
+These features may be removed or modified in successive version releases, including patch versions.
+
+When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release.
+There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version.
+If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go
new file mode 100644
index 000000000..541160f94
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go
@@ -0,0 +1,96 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package x contains support for OTel metric SDK experimental features.
+//
+// This package should only be used for features defined in the specification.
+// It should not be used for experiments or new project ideas.
+package x // import "go.opentelemetry.io/otel/sdk/metric/internal/x"
+
+import (
+ "os"
+ "strconv"
+ "strings"
+)
+
+var (
+ // Exemplars is an experimental feature flag that defines if exemplars
+ // should be recorded for metric data-points.
+ //
+ // To enable this feature set the OTEL_GO_X_EXEMPLAR environment variable
+ // to the case-insensitive string value of "true" (i.e. "True" and "TRUE"
+ // will also enable this).
+ Exemplars = newFeature("EXEMPLAR", func(v string) (string, bool) {
+ if strings.ToLower(v) == "true" {
+ return v, true
+ }
+ return "", false
+ })
+
+ // CardinalityLimit is an experimental feature flag that defines if
+ // cardinality limits should be applied to the recorded metric data-points.
+ //
+ // To enable this feature set the OTEL_GO_X_CARDINALITY_LIMIT environment
+ // variable to the integer limit value you want to use.
+ //
+ // Setting OTEL_GO_X_CARDINALITY_LIMIT to a value less than or equal to 0
+ // will disable the cardinality limits.
+ CardinalityLimit = newFeature("CARDINALITY_LIMIT", func(v string) (int, bool) {
+ n, err := strconv.Atoi(v)
+ if err != nil {
+ return 0, false
+ }
+ return n, true
+ })
+)
+
+// Feature is an experimental feature control flag. It provides a uniform way
+// to interact with these feature flags and parse their values.
+type Feature[T any] struct {
+ key string
+ parse func(v string) (T, bool)
+}
+
+func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] {
+ const envKeyRoot = "OTEL_GO_X_"
+ return Feature[T]{
+ key: envKeyRoot + suffix,
+ parse: parse,
+ }
+}
+
+// Key returns the environment variable key that needs to be set to enable the
+// feature.
+func (f Feature[T]) Key() string { return f.key }
+
+// Lookup returns the user configured value for the feature and true if the
+// user has enabled the feature. Otherwise, if the feature is not enabled, a
+// zero-value and false are returned.
+func (f Feature[T]) Lookup() (v T, ok bool) {
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value
+ //
+ // > The SDK MUST interpret an empty value of an environment variable the
+ // > same way as when the variable is unset.
+ vRaw := os.Getenv(f.key)
+ if vRaw == "" {
+ return v, ok
+ }
+ return f.parse(vRaw)
+}
+
+// Enabled returns if the feature is enabled.
+func (f Feature[T]) Enabled() bool {
+ _, ok := f.Lookup()
+ return ok
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go
index 7f51ec512..beb7876ec 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go
@@ -23,6 +23,7 @@ import (
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/embedded"
"go.opentelemetry.io/otel/sdk/instrumentation"
+
"go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
)
@@ -40,6 +41,11 @@ type meter struct {
scope instrumentation.Scope
pipes pipelines
+ int64Insts *cacheWithErr[instID, *int64Inst]
+ float64Insts *cacheWithErr[instID, *float64Inst]
+ int64ObservableInsts *cacheWithErr[instID, int64Observable]
+ float64ObservableInsts *cacheWithErr[instID, float64Observable]
+
int64Resolver resolver[int64]
float64Resolver resolver[float64]
}
@@ -49,11 +55,20 @@ func newMeter(s instrumentation.Scope, p pipelines) *meter {
// meter is asked to create are logged to the user.
var viewCache cache[string, instID]
+ var int64Insts cacheWithErr[instID, *int64Inst]
+ var float64Insts cacheWithErr[instID, *float64Inst]
+ var int64ObservableInsts cacheWithErr[instID, int64Observable]
+ var float64ObservableInsts cacheWithErr[instID, float64Observable]
+
return &meter{
- scope: s,
- pipes: p,
- int64Resolver: newResolver[int64](p, &viewCache),
- float64Resolver: newResolver[float64](p, &viewCache),
+ scope: s,
+ pipes: p,
+ int64Insts: &int64Insts,
+ float64Insts: &float64Insts,
+ int64ObservableInsts: &int64ObservableInsts,
+ float64ObservableInsts: &float64ObservableInsts,
+ int64Resolver: newResolver[int64](p, &viewCache),
+ float64Resolver: newResolver[float64](p, &viewCache),
}
}
@@ -104,20 +119,62 @@ func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOpti
return i, validateInstrumentName(name)
}
+// int64ObservableInstrument returns a new observable identified by the Instrument.
+// It registers callbacks for each reader's pipeline.
+func (m *meter) int64ObservableInstrument(id Instrument, callbacks []metric.Int64Callback) (int64Observable, error) {
+ key := instID{
+ Name: id.Name,
+ Description: id.Description,
+ Unit: id.Unit,
+ Kind: id.Kind,
+ }
+ if m.int64ObservableInsts.HasKey(key) && len(callbacks) > 0 {
+ warnRepeatedObservableCallbacks(id)
+ }
+ return m.int64ObservableInsts.Lookup(key, func() (int64Observable, error) {
+ inst := newInt64Observable(m, id.Kind, id.Name, id.Description, id.Unit)
+ for _, insert := range m.int64Resolver.inserters {
+ // Connect the measure functions for instruments in this pipeline with the
+ // callbacks for this pipeline.
+ in, err := insert.Instrument(id, insert.readerDefaultAggregation(id.Kind))
+ if err != nil {
+ return inst, err
+ }
+ // Drop aggregation
+ if len(in) == 0 {
+ inst.dropAggregation = true
+ continue
+ }
+ inst.appendMeasures(in)
+ for _, cback := range callbacks {
+ inst := int64Observer{measures: in}
+ fn := cback
+ insert.addCallback(func(ctx context.Context) error { return fn(ctx, inst) })
+ }
+ }
+ return inst, validateInstrumentName(id.Name)
+ })
+}
+
// Int64ObservableCounter returns a new instrument identified by name and
// configured with options. The instrument is used to asynchronously record
// increasing int64 measurements once per a measurement collection cycle.
// Only the measurements recorded during the collection cycle are exported.
+//
+// If Int64ObservableCounter is invoked repeatedly with the same Name,
+// Description, and Unit, only the first set of callbacks provided are used.
+// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
+// if instrumentation can be created multiple times with different callbacks.
func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) {
cfg := metric.NewInt64ObservableCounterConfig(options...)
- const kind = InstrumentKindObservableCounter
- p := int64ObservProvider{m}
- inst, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
- if err != nil {
- return nil, err
+ id := Instrument{
+ Name: name,
+ Description: cfg.Description(),
+ Unit: cfg.Unit(),
+ Kind: InstrumentKindObservableCounter,
+ Scope: m.scope,
}
- p.registerCallbacks(inst, cfg.Callbacks())
- return inst, validateInstrumentName(name)
+ return m.int64ObservableInstrument(id, cfg.Callbacks())
}
// Int64ObservableUpDownCounter returns a new instrument identified by name and
@@ -126,14 +183,14 @@ func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64Obser
// measurements recorded during the collection cycle are exported.
func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) {
cfg := metric.NewInt64ObservableUpDownCounterConfig(options...)
- const kind = InstrumentKindObservableUpDownCounter
- p := int64ObservProvider{m}
- inst, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
- if err != nil {
- return nil, err
+ id := Instrument{
+ Name: name,
+ Description: cfg.Description(),
+ Unit: cfg.Unit(),
+ Kind: InstrumentKindObservableUpDownCounter,
+ Scope: m.scope,
}
- p.registerCallbacks(inst, cfg.Callbacks())
- return inst, validateInstrumentName(name)
+ return m.int64ObservableInstrument(id, cfg.Callbacks())
}
// Int64ObservableGauge returns a new instrument identified by name and
@@ -142,14 +199,14 @@ func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int6
// Only the measurements recorded during the collection cycle are exported.
func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) {
cfg := metric.NewInt64ObservableGaugeConfig(options...)
- const kind = InstrumentKindObservableGauge
- p := int64ObservProvider{m}
- inst, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
- if err != nil {
- return nil, err
+ id := Instrument{
+ Name: name,
+ Description: cfg.Description(),
+ Unit: cfg.Unit(),
+ Kind: InstrumentKindObservableGauge,
+ Scope: m.scope,
}
- p.registerCallbacks(inst, cfg.Callbacks())
- return inst, validateInstrumentName(name)
+ return m.int64ObservableInstrument(id, cfg.Callbacks())
}
// Float64Counter returns a new instrument identified by name and configured
@@ -196,20 +253,62 @@ func (m *meter) Float64Histogram(name string, options ...metric.Float64Histogram
return i, validateInstrumentName(name)
}
+// float64ObservableInstrument returns a new observable identified by the Instrument.
+// It registers callbacks for each reader's pipeline.
+func (m *meter) float64ObservableInstrument(id Instrument, callbacks []metric.Float64Callback) (float64Observable, error) {
+ key := instID{
+ Name: id.Name,
+ Description: id.Description,
+ Unit: id.Unit,
+ Kind: id.Kind,
+ }
+ if m.int64ObservableInsts.HasKey(key) && len(callbacks) > 0 {
+ warnRepeatedObservableCallbacks(id)
+ }
+ return m.float64ObservableInsts.Lookup(key, func() (float64Observable, error) {
+ inst := newFloat64Observable(m, id.Kind, id.Name, id.Description, id.Unit)
+ for _, insert := range m.float64Resolver.inserters {
+ // Connect the measure functions for instruments in this pipeline with the
+ // callbacks for this pipeline.
+ in, err := insert.Instrument(id, insert.readerDefaultAggregation(id.Kind))
+ if err != nil {
+ return inst, err
+ }
+ // Drop aggregation
+ if len(in) == 0 {
+ inst.dropAggregation = true
+ continue
+ }
+ inst.appendMeasures(in)
+ for _, cback := range callbacks {
+ inst := float64Observer{measures: in}
+ fn := cback
+ insert.addCallback(func(ctx context.Context) error { return fn(ctx, inst) })
+ }
+ }
+ return inst, validateInstrumentName(id.Name)
+ })
+}
+
// Float64ObservableCounter returns a new instrument identified by name and
// configured with options. The instrument is used to asynchronously record
// increasing float64 measurements once per a measurement collection cycle.
// Only the measurements recorded during the collection cycle are exported.
+//
+// If Float64ObservableCounter is invoked repeatedly with the same Name,
+// Description, and Unit, only the first set of callbacks provided are used.
+// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
+// if instrumentation can be created multiple times with different callbacks.
func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) {
cfg := metric.NewFloat64ObservableCounterConfig(options...)
- const kind = InstrumentKindObservableCounter
- p := float64ObservProvider{m}
- inst, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
- if err != nil {
- return nil, err
+ id := Instrument{
+ Name: name,
+ Description: cfg.Description(),
+ Unit: cfg.Unit(),
+ Kind: InstrumentKindObservableCounter,
+ Scope: m.scope,
}
- p.registerCallbacks(inst, cfg.Callbacks())
- return inst, validateInstrumentName(name)
+ return m.float64ObservableInstrument(id, cfg.Callbacks())
}
// Float64ObservableUpDownCounter returns a new instrument identified by name
@@ -218,14 +317,14 @@ func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64O
// measurements recorded during the collection cycle are exported.
func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) {
cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...)
- const kind = InstrumentKindObservableUpDownCounter
- p := float64ObservProvider{m}
- inst, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
- if err != nil {
- return nil, err
+ id := Instrument{
+ Name: name,
+ Description: cfg.Description(),
+ Unit: cfg.Unit(),
+ Kind: InstrumentKindObservableUpDownCounter,
+ Scope: m.scope,
}
- p.registerCallbacks(inst, cfg.Callbacks())
- return inst, validateInstrumentName(name)
+ return m.float64ObservableInstrument(id, cfg.Callbacks())
}
// Float64ObservableGauge returns a new instrument identified by name and
@@ -234,14 +333,14 @@ func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Fl
// Only the measurements recorded during the collection cycle are exported.
func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) {
cfg := metric.NewFloat64ObservableGaugeConfig(options...)
- const kind = InstrumentKindObservableGauge
- p := float64ObservProvider{m}
- inst, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
- if err != nil {
- return nil, err
+ id := Instrument{
+ Name: name,
+ Description: cfg.Description(),
+ Unit: cfg.Unit(),
+ Kind: InstrumentKindObservableGauge,
+ Scope: m.scope,
}
- p.registerCallbacks(inst, cfg.Callbacks())
- return inst, validateInstrumentName(name)
+ return m.float64ObservableInstrument(id, cfg.Callbacks())
}
func validateInstrumentName(name string) error {
@@ -273,6 +372,16 @@ func isAlphanumeric(c rune) bool {
return isAlpha(c) || ('0' <= c && c <= '9')
}
+func warnRepeatedObservableCallbacks(id Instrument) {
+ inst := fmt.Sprintf(
+ "Instrument{Name: %q, Description: %q, Kind: %q, Unit: %q}",
+ id.Name, id.Description, "InstrumentKind"+id.Kind.String(), id.Unit,
+ )
+ global.Warn("Repeated observable instrument creation with callbacks. Ignoring new callbacks. Use meter.RegisterCallback and Registration.Unregister to manage callbacks.",
+ "instrument", inst,
+ )
+}
+
// RegisterCallback registers f to be called each collection cycle so it will
// make observations for insts during those cycles.
//
@@ -389,12 +498,14 @@ func (r observer) ObserveFloat64(o metric.Float64Observable, v float64, opts ...
}
if _, registered := r.float64[oImpl.observablID]; !registered {
- global.Error(errUnregObserver, "failed to record",
- "name", oImpl.name,
- "description", oImpl.description,
- "unit", oImpl.unit,
- "number", fmt.Sprintf("%T", float64(0)),
- )
+ if !oImpl.dropAggregation {
+ global.Error(errUnregObserver, "failed to record",
+ "name", oImpl.name,
+ "description", oImpl.description,
+ "unit", oImpl.unit,
+ "number", fmt.Sprintf("%T", float64(0)),
+ )
+ }
return
}
c := metric.NewObserveConfig(opts)
@@ -422,12 +533,14 @@ func (r observer) ObserveInt64(o metric.Int64Observable, v int64, opts ...metric
}
if _, registered := r.int64[oImpl.observablID]; !registered {
- global.Error(errUnregObserver, "failed to record",
- "name", oImpl.name,
- "description", oImpl.description,
- "unit", oImpl.unit,
- "number", fmt.Sprintf("%T", int64(0)),
- )
+ if !oImpl.dropAggregation {
+ global.Error(errUnregObserver, "failed to record",
+ "name", oImpl.name,
+ "description", oImpl.description,
+ "unit", oImpl.unit,
+ "number", fmt.Sprintf("%T", int64(0)),
+ )
+ }
return
}
c := metric.NewObserveConfig(opts)
@@ -474,14 +587,28 @@ func (p int64InstProvider) histogramAggs(name string, cfg metric.Int64HistogramC
// lookup returns the resolved instrumentImpl.
func (p int64InstProvider) lookup(kind InstrumentKind, name, desc, u string) (*int64Inst, error) {
- aggs, err := p.aggs(kind, name, desc, u)
- return &int64Inst{measures: aggs}, err
+ return p.meter.int64Insts.Lookup(instID{
+ Name: name,
+ Description: desc,
+ Unit: u,
+ Kind: kind,
+ }, func() (*int64Inst, error) {
+ aggs, err := p.aggs(kind, name, desc, u)
+ return &int64Inst{measures: aggs}, err
+ })
}
// lookupHistogram returns the resolved instrumentImpl.
func (p int64InstProvider) lookupHistogram(name string, cfg metric.Int64HistogramConfig) (*int64Inst, error) {
- aggs, err := p.histogramAggs(name, cfg)
- return &int64Inst{measures: aggs}, err
+ return p.meter.int64Insts.Lookup(instID{
+ Name: name,
+ Description: cfg.Description(),
+ Unit: cfg.Unit(),
+ Kind: InstrumentKindHistogram,
+ }, func() (*int64Inst, error) {
+ aggs, err := p.histogramAggs(name, cfg)
+ return &int64Inst{measures: aggs}, err
+ })
}
// float64InstProvider provides float64 OpenTelemetry instruments.
@@ -518,42 +645,33 @@ func (p float64InstProvider) histogramAggs(name string, cfg metric.Float64Histog
// lookup returns the resolved instrumentImpl.
func (p float64InstProvider) lookup(kind InstrumentKind, name, desc, u string) (*float64Inst, error) {
- aggs, err := p.aggs(kind, name, desc, u)
- return &float64Inst{measures: aggs}, err
+ return p.meter.float64Insts.Lookup(instID{
+ Name: name,
+ Description: desc,
+ Unit: u,
+ Kind: kind,
+ }, func() (*float64Inst, error) {
+ aggs, err := p.aggs(kind, name, desc, u)
+ return &float64Inst{measures: aggs}, err
+ })
}
// lookupHistogram returns the resolved instrumentImpl.
func (p float64InstProvider) lookupHistogram(name string, cfg metric.Float64HistogramConfig) (*float64Inst, error) {
- aggs, err := p.histogramAggs(name, cfg)
- return &float64Inst{measures: aggs}, err
-}
-
-type int64ObservProvider struct{ *meter }
-
-func (p int64ObservProvider) lookup(kind InstrumentKind, name, desc, u string) (int64Observable, error) {
- aggs, err := (int64InstProvider)(p).aggs(kind, name, desc, u)
- return newInt64Observable(p.meter, kind, name, desc, u, aggs), err
-}
-
-func (p int64ObservProvider) registerCallbacks(inst int64Observable, cBacks []metric.Int64Callback) {
- if inst.observable == nil || len(inst.measures) == 0 {
- // Drop aggregator.
- return
- }
-
- for _, cBack := range cBacks {
- p.pipes.registerCallback(p.callback(inst, cBack))
- }
-}
-
-func (p int64ObservProvider) callback(i int64Observable, f metric.Int64Callback) func(context.Context) error {
- inst := int64Observer{int64Observable: i}
- return func(ctx context.Context) error { return f(ctx, inst) }
+ return p.meter.float64Insts.Lookup(instID{
+ Name: name,
+ Description: cfg.Description(),
+ Unit: cfg.Unit(),
+ Kind: InstrumentKindHistogram,
+ }, func() (*float64Inst, error) {
+ aggs, err := p.histogramAggs(name, cfg)
+ return &float64Inst{measures: aggs}, err
+ })
}
type int64Observer struct {
embedded.Int64Observer
- int64Observable
+ measures[int64]
}
func (o int64Observer) Observe(val int64, opts ...metric.ObserveOption) {
@@ -561,32 +679,9 @@ func (o int64Observer) Observe(val int64, opts ...metric.ObserveOption) {
o.observe(val, c.Attributes())
}
-type float64ObservProvider struct{ *meter }
-
-func (p float64ObservProvider) lookup(kind InstrumentKind, name, desc, u string) (float64Observable, error) {
- aggs, err := (float64InstProvider)(p).aggs(kind, name, desc, u)
- return newFloat64Observable(p.meter, kind, name, desc, u, aggs), err
-}
-
-func (p float64ObservProvider) registerCallbacks(inst float64Observable, cBacks []metric.Float64Callback) {
- if inst.observable == nil || len(inst.measures) == 0 {
- // Drop aggregator.
- return
- }
-
- for _, cBack := range cBacks {
- p.pipes.registerCallback(p.callback(inst, cBack))
- }
-}
-
-func (p float64ObservProvider) callback(i float64Observable, f metric.Float64Callback) func(context.Context) error {
- inst := float64Observer{float64Observable: i}
- return func(ctx context.Context) error { return f(ctx, inst) }
-}
-
type float64Observer struct {
embedded.Float64Observer
- float64Observable
+ measures[float64]
}
func (o float64Observer) Observe(val float64, opts ...metric.ObserveOption) {
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go
index 995d42b38..32c17934f 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go
@@ -15,6 +15,7 @@
package metricdata // import "go.opentelemetry.io/otel/sdk/metric/metricdata"
import (
+ "encoding/json"
"time"
"go.opentelemetry.io/otel/attribute"
@@ -211,6 +212,19 @@ type Extrema[N int64 | float64] struct {
valid bool
}
+// MarshalText converts the Extrema value to text.
+func (e Extrema[N]) MarshalText() ([]byte, error) {
+ if !e.valid {
+ return json.Marshal(nil)
+ }
+ return json.Marshal(e.value)
+}
+
+// MarshalJSON converts the Extrema value to JSON number.
+func (e *Extrema[N]) MarshalJSON() ([]byte, error) {
+ return e.MarshalText()
+}
+
// NewExtrema returns an Extrema set to v.
func NewExtrema[N int64 | float64](v N) Extrema[N] {
return Extrema[N]{value: v, valid: true}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go
index 48abcc8a7..da39ab961 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go
@@ -29,6 +29,7 @@ import (
"go.opentelemetry.io/otel/sdk/instrumentation"
"go.opentelemetry.io/otel/sdk/metric/internal"
"go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+ "go.opentelemetry.io/otel/sdk/metric/internal/x"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
"go.opentelemetry.io/otel/sdk/resource"
)
@@ -93,14 +94,6 @@ func (p *pipeline) addSync(scope instrumentation.Scope, iSync instrumentSync) {
p.aggregations[scope] = append(p.aggregations[scope], iSync)
}
-// addCallback registers a single instrument callback to be run when
-// `produce()` is called.
-func (p *pipeline) addCallback(cback func(context.Context) error) {
- p.Lock()
- defer p.Unlock()
- p.callbacks = append(p.callbacks, cback)
-}
-
type multiCallback func(context.Context) error
// addMultiCallback registers a multi-instrument callback to be run when
@@ -281,6 +274,14 @@ func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation)
return measures, errs.errorOrNil()
}
+// addCallback registers a single instrument callback to be run when
+// `produce()` is called.
+func (i *inserter[N]) addCallback(cback func(context.Context) error) {
+ i.pipeline.Lock()
+ defer i.pipeline.Unlock()
+ i.pipeline.callbacks = append(i.pipeline.callbacks, cback)
+}
+
var aggIDCount uint64
// aggVal is the cached value in an aggregators cache.
@@ -358,9 +359,16 @@ func (i *inserter[N]) cachedAggregator(scope instrumentation.Scope, kind Instrum
normID := id.normalize()
cv := i.aggregators.Lookup(normID, func() aggVal[N] {
b := aggregate.Builder[N]{
- Temporality: i.pipeline.reader.temporality(kind),
+ Temporality: i.pipeline.reader.temporality(kind),
+ ReservoirFunc: reservoirFunc[N](stream.Aggregation),
}
b.Filter = stream.AttributeFilter
+ // A value less than or equal to zero will disable the aggregation
+ // limits for the builder (an all the created aggregates).
+ // CardinalityLimit.Lookup returns 0 by default if unset (or
+ // unrecognized input). Use that value directly.
+ b.AggregationLimit, _ = x.CardinalityLimit.Lookup()
+
in, out, err := i.aggregateFunc(b, stream.Aggregation, kind)
if err != nil {
return aggVal[N]{0, nil, err}
@@ -557,12 +565,6 @@ func newPipelines(res *resource.Resource, readers []Reader, views []View) pipeli
return pipes
}
-func (p pipelines) registerCallback(cback func(context.Context) error) {
- for _, pipe := range p {
- pipe.addCallback(cback)
- }
-}
-
func (p pipelines) registerMultiCallback(c multiCallback) metric.Registration {
unregs := make([]func(), len(p))
for i, pipe := range p {
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go
index 4437747f2..310fa5a53 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go
@@ -16,5 +16,5 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric"
// version is the current release version of the metric SDK in use.
func version() string {
- return "1.20.0"
+ return "1.24.0"
}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go
index 4279013be..aed756c5e 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go
@@ -41,8 +41,20 @@ type Detector interface {
// must never be done outside of a new major release.
}
-// Detect calls all input detectors sequentially and merges each result with the previous one.
-// It returns the merged error too.
+// Detect returns a new [Resource] merged from all the Resources each of the
+// detectors produces. Each of the detectors are called sequentially, in the
+// order they are passed, merging the produced resource into the previous.
+//
+// This may return a partial Resource along with an error containing
+// [ErrPartialResource] if that error is returned from a detector. It may also
+// return a merge-conflicting Resource along with an error containing
+// [ErrSchemaURLConflict] if merging Resources from different detectors results
+// in a schema URL conflict. It is up to the caller to determine if this
+// returned Resource should be used or not.
+//
+// If one of the detectors returns an error that is not [ErrPartialResource],
+// the resource produced by the detector will not be merged and the returned
+// error will wrap that detector's error.
func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) {
r := new(Resource)
return r, detect(ctx, r, detectors)
@@ -50,6 +62,10 @@ func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) {
// detect runs all detectors using ctx and merges the result into res. This
// assumes res is allocated and not nil, it will panic otherwise.
+//
+// If the detectors or merging resources produces any errors (i.e.
+// [ErrPartialResource] [ErrSchemaURLConflict]), a single error wrapping all of
+// these errors will be returned. Otherwise, nil is returned.
func detect(ctx context.Context, res *Resource, detectors []Detector) error {
var (
r *Resource
@@ -78,6 +94,11 @@ func detect(ctx context.Context, res *Resource, detectors []Detector) error {
if len(errs) == 0 {
return nil
}
+ if errors.Is(errs, ErrSchemaURLConflict) {
+ // If there has been a merge conflict, ensure the resource has no
+ // schema URL.
+ res.schemaURL = ""
+ }
return errs
}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
index c63a0dd1f..6a2c08293 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
@@ -22,7 +22,7 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk"
- semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
)
type (
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go
index 3d5362282..c1b47193f 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go
@@ -22,14 +22,14 @@ import (
"os"
"regexp"
- semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
)
type containerIDProvider func() (string, error)
var (
containerID containerIDProvider = getContainerIDFromCGroup
- cgroupContainerIDRe = regexp.MustCompile(`^.*/(?:.*-)?([0-9a-f]+)(?:\.|\s*$)`)
+ cgroupContainerIDRe = regexp.MustCompile(`^.*/(?:.*[-:])?([0-9a-f]+)(?:\.|\s*$)`)
)
type cgroupContainerIDDetector struct{}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
index e29ae563a..be4cbe423 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
@@ -23,7 +23,7 @@ import (
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
- semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
)
const (
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go
index fb1ebf2ca..f579329c2 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go
@@ -19,7 +19,7 @@ import (
"errors"
"strings"
- semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
)
type hostIDProvider func() (string, error)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
index 0cbd55973..8fbf071c1 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
@@ -19,7 +19,7 @@ import (
"strings"
"go.opentelemetry.io/otel/attribute"
- semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
)
type osDescriptionProvider func() (string, error)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
index ecdd11dd7..739ea4512 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
@@ -22,7 +22,7 @@ import (
"path/filepath"
"runtime"
- semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
)
type (
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go
index 176ff1066..cb1ee0a9c 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go
@@ -17,6 +17,7 @@ package resource // import "go.opentelemetry.io/otel/sdk/resource"
import (
"context"
"errors"
+ "fmt"
"sync"
"go.opentelemetry.io/otel"
@@ -40,9 +41,20 @@ var (
defaultResourceOnce sync.Once
)
-var errMergeConflictSchemaURL = errors.New("cannot merge resource due to conflicting Schema URL")
+// ErrSchemaURLConflict is an error returned when two Resources are merged
+// together that contain different, non-empty, schema URLs.
+var ErrSchemaURLConflict = errors.New("conflicting Schema URL")
-// New returns a Resource combined from the user-provided detectors.
+// New returns a [Resource] built using opts.
+//
+// This may return a partial Resource along with an error containing
+// [ErrPartialResource] if options that provide a [Detector] are used and that
+// error is returned from one or more of the Detectors. It may also return a
+// merge-conflict Resource along with an error containing
+// [ErrSchemaURLConflict] if merging Resources from the opts results in a
+// schema URL conflict (see [Resource.Merge] for more information). It is up to
+// the caller to determine if this returned Resource should be used or not
+// based on these errors.
func New(ctx context.Context, opts ...Option) (*Resource, error) {
cfg := config{}
for _, opt := range opts {
@@ -98,7 +110,7 @@ func (r *Resource) String() string {
return r.attrs.Encoded(attribute.DefaultEncoder())
}
-// MarshalLog is the marshaling function used by the logging system to represent this exporter.
+// MarshalLog is the marshaling function used by the logging system to represent this Resource.
func (r *Resource) MarshalLog() interface{} {
return struct {
Attributes attribute.Set
@@ -146,16 +158,29 @@ func (r *Resource) Equal(eq *Resource) bool {
return r.Equivalent() == eq.Equivalent()
}
-// Merge creates a new resource by combining resource a and b.
+// Merge creates a new [Resource] by merging a and b.
+//
+// If there are common keys between a and b, then the value from b will
+// overwrite the value from a, even if b's value is empty.
//
-// If there are common keys between resource a and b, then the value
-// from resource b will overwrite the value from resource a, even
-// if resource b's value is empty.
+// The SchemaURL of the resources will be merged according to the
+// [OpenTelemetry specification rules]:
//
-// The SchemaURL of the resources will be merged according to the spec rules:
-// https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/resource/sdk.md#merge
-// If the resources have different non-empty schemaURL an empty resource and an error
-// will be returned.
+// - If a's schema URL is empty then the returned Resource's schema URL will
+// be set to the schema URL of b,
+// - Else if b's schema URL is empty then the returned Resource's schema URL
+// will be set to the schema URL of a,
+// - Else if the schema URLs of a and b are the same then that will be the
+// schema URL of the returned Resource,
+// - Else this is a merging error. If the resources have different,
+// non-empty, schema URLs an error containing [ErrSchemaURLConflict] will
+// be returned with the merged Resource. The merged Resource will have an
+// empty schema URL. It may be the case that some unintended attributes
+// have been overwritten or old semantic conventions persisted in the
+// returned Resource. It is up to the caller to determine if this returned
+// Resource should be used or not.
+//
+// [OpenTelemetry specification rules]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/resource/sdk.md#merge
func Merge(a, b *Resource) (*Resource, error) {
if a == nil && b == nil {
return Empty(), nil
@@ -167,19 +192,6 @@ func Merge(a, b *Resource) (*Resource, error) {
return a, nil
}
- // Merge the schema URL.
- var schemaURL string
- switch true {
- case a.schemaURL == "":
- schemaURL = b.schemaURL
- case b.schemaURL == "":
- schemaURL = a.schemaURL
- case a.schemaURL == b.schemaURL:
- schemaURL = a.schemaURL
- default:
- return Empty(), errMergeConflictSchemaURL
- }
-
// Note: 'b' attributes will overwrite 'a' with last-value-wins in attribute.Key()
// Meaning this is equivalent to: append(a.Attributes(), b.Attributes()...)
mi := attribute.NewMergeIterator(b.Set(), a.Set())
@@ -187,8 +199,23 @@ func Merge(a, b *Resource) (*Resource, error) {
for mi.Next() {
combine = append(combine, mi.Attribute())
}
- merged := NewWithAttributes(schemaURL, combine...)
- return merged, nil
+
+ switch {
+ case a.schemaURL == "":
+ return NewWithAttributes(b.schemaURL, combine...), nil
+ case b.schemaURL == "":
+ return NewWithAttributes(a.schemaURL, combine...), nil
+ case a.schemaURL == b.schemaURL:
+ return NewWithAttributes(a.schemaURL, combine...), nil
+ }
+ // Return the merged resource with an appropriate error. It is up to
+ // the user to decide if the returned resource can be used or not.
+ return NewSchemaless(combine...), fmt.Errorf(
+ "%w: %s and %s",
+ ErrSchemaURLConflict,
+ a.schemaURL,
+ b.schemaURL,
+ )
}
// Empty returns an instance of Resource with no attributes. It is
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
index c9c7effbf..fca26f2e7 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
@@ -406,7 +406,7 @@ func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan)
return false
}
-// MarshalLog is the marshaling function used by the logging system to represent this exporter.
+// MarshalLog is the marshaling function used by the logging system to represent this Span Processor.
func (bsp *batchSpanProcessor) MarshalLog() interface{} {
return struct {
Type string
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
index 7d46c4b48..b1ac60846 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
@@ -55,7 +55,7 @@ type tracerProviderConfig struct {
resource *resource.Resource
}
-// MarshalLog is the marshaling function used by the logging system to represent this exporter.
+// MarshalLog is the marshaling function used by the logging system to represent this Provider.
func (cfg tracerProviderConfig) MarshalLog() interface{} {
return struct {
SpanProcessors []SpanProcessor
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
index 36dbf6776..85bc702a0 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
@@ -30,7 +30,7 @@ import (
"go.opentelemetry.io/otel/sdk/instrumentation"
"go.opentelemetry.io/otel/sdk/internal"
"go.opentelemetry.io/otel/sdk/resource"
- semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/embedded"
)
@@ -208,6 +208,16 @@ func (s *recordingSpan) SetStatus(code codes.Code, description string) {
s.status = status
}
+// ensureAttributesCapacity inlines functionality from slices.Grow
+// so that we can avoid needing to import golang.org/x/exp for go1.20.
+// Once support for go1.20 is dropped, we can use slices.Grow available since go1.21 instead.
+// Tracking issue: https://github.com/open-telemetry/opentelemetry-go/issues/4819.
+func (s *recordingSpan) ensureAttributesCapacity(minCapacity int) {
+ if n := minCapacity - cap(s.attributes); n > 0 {
+ s.attributes = append(s.attributes[:cap(s.attributes)], make([]attribute.KeyValue, n)...)[:len(s.attributes)]
+ }
+}
+
// SetAttributes sets attributes of this span.
//
// If a key from attributes already exists the value associated with that key
@@ -242,6 +252,7 @@ func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) {
// Otherwise, add without deduplication. When attributes are read they
// will be deduplicated, optimizing the operation.
+ s.ensureAttributesCapacity(len(s.attributes) + len(attributes))
for _, a := range attributes {
if !a.Valid() {
// Drop all invalid attributes.
@@ -277,6 +288,12 @@ func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) {
// Now that s.attributes is deduplicated, adding unique attributes up to
// the capacity of s will not over allocate s.attributes.
+ if sum := len(attrs) + len(s.attributes); sum < limit {
+ // After support for go1.20 is dropped, simplify if-else to min(sum, limit).
+ s.ensureAttributesCapacity(sum)
+ } else {
+ s.ensureAttributesCapacity(limit)
+ }
for _, a := range attrs {
if !a.Valid() {
// Drop all invalid attributes.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go
index 7048c788e..42de0b9a7 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/version.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/version.go
@@ -16,5 +16,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk"
// Version is the current release version of the OpenTelemetry SDK in use.
func Version() string {
- return "1.20.0"
+ return "1.24.0"
}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go
index 7cf424855..0318b5ec4 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go
@@ -15,6 +15,6 @@
// Package semconv implements OpenTelemetry semantic conventions.
//
// OpenTelemetry semantic conventions are agreed standardized naming
-// patterns for OpenTelemetry things. This package represents the conventions
-// as of the v1.21.0 version of the OpenTelemetry specification.
+// patterns for OpenTelemetry things. This package represents the v1.21.0
+// version of the OpenTelemetry semantic conventions.
package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go
new file mode 100644
index 000000000..31726598d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go
@@ -0,0 +1,4398 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// Describes FaaS attributes.
+const (
+ // FaaSInvokedNameKey is the attribute Key conforming to the
+ // "faas.invoked_name" semantic conventions. It represents the name of the
+ // invoked function.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: 'my-function'
+ // Note: SHOULD be equal to the `faas.name` resource attribute of the
+ // invoked function.
+ FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
+
+ // FaaSInvokedProviderKey is the attribute Key conforming to the
+ // "faas.invoked_provider" semantic conventions. It represents the cloud
+ // provider of the invoked function.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Note: SHOULD be equal to the `cloud.provider` resource attribute of the
+ // invoked function.
+ FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
+
+ // FaaSInvokedRegionKey is the attribute Key conforming to the
+ // "faas.invoked_region" semantic conventions. It represents the cloud
+ // region of the invoked function.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (For some cloud providers, like
+ // AWS or GCP, the region in which a function is hosted is essential to
+ // uniquely identify the function and also part of its endpoint. Since it's
+ // part of the endpoint being called, the region is always known to
+ // clients. In these cases, `faas.invoked_region` MUST be set accordingly.
+ // If the region is unknown to the client or not required for identifying
+ // the invoked function, setting `faas.invoked_region` is optional.)
+ // Stability: experimental
+ // Examples: 'eu-central-1'
+ // Note: SHOULD be equal to the `cloud.region` resource attribute of the
+ // invoked function.
+ FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
+
+ // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger"
+ // semantic conventions. It represents the type of the trigger which caused
+ // this function invocation.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ FaaSTriggerKey = attribute.Key("faas.trigger")
+)
+
+var (
+ // Alibaba Cloud
+ FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
+ // Microsoft Azure
+ FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
+ // Google Cloud Platform
+ FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
+ // Tencent Cloud
+ FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
+)
+
+var (
+ // A response to some data source operation such as a database or filesystem read/write
+ FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
+ // To provide an answer to an inbound HTTP request
+ FaaSTriggerHTTP = FaaSTriggerKey.String("http")
+ // A function is set to be executed when messages are sent to a messaging system
+ FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
+ // A function is scheduled to be executed regularly
+ FaaSTriggerTimer = FaaSTriggerKey.String("timer")
+ // If none of the others apply
+ FaaSTriggerOther = FaaSTriggerKey.String("other")
+)
+
+// FaaSInvokedName returns an attribute KeyValue conforming to the
+// "faas.invoked_name" semantic conventions. It represents the name of the
+// invoked function.
+func FaaSInvokedName(val string) attribute.KeyValue {
+ return FaaSInvokedNameKey.String(val)
+}
+
+// FaaSInvokedRegion returns an attribute KeyValue conforming to the
+// "faas.invoked_region" semantic conventions. It represents the cloud region
+// of the invoked function.
+func FaaSInvokedRegion(val string) attribute.KeyValue {
+ return FaaSInvokedRegionKey.String(val)
+}
+
+// Attributes for Events represented using Log Records.
+const (
+ // EventNameKey is the attribute Key conforming to the "event.name"
+ // semantic conventions. It represents the identifies the class / type of
+ // event.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: 'browser.mouse.click', 'device.app.lifecycle'
+ // Note: Event names are subject to the same rules as [attribute
+ // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.26.0/specification/common/attribute-naming.md).
+ // Notably, event names are namespaced to avoid collisions and provide a
+ // clean separation of semantics for events in separate domains like
+ // browser, mobile, and kubernetes.
+ EventNameKey = attribute.Key("event.name")
+)
+
+// EventName returns an attribute KeyValue conforming to the "event.name"
+// semantic conventions. It represents the identifies the class / type of
+// event.
+func EventName(val string) attribute.KeyValue {
+ return EventNameKey.String(val)
+}
+
+// The attributes described in this section are rather generic. They may be
+// used in any Log Record they apply to.
+const (
+ // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid"
+ // semantic conventions. It represents a unique identifier for the Log
+ // Record.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV'
+ // Note: If an id is provided, other log records with the same id will be
+ // considered duplicates and can be removed safely. This means, that two
+ // distinguishable log records MUST have different values.
+ // The id MAY be an [Universally Unique Lexicographically Sortable
+ // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers
+ // (e.g. UUID) may be used as needed.
+ LogRecordUIDKey = attribute.Key("log.record.uid")
+)
+
+// LogRecordUID returns an attribute KeyValue conforming to the
+// "log.record.uid" semantic conventions. It represents a unique identifier for
+// the Log Record.
+func LogRecordUID(val string) attribute.KeyValue {
+ return LogRecordUIDKey.String(val)
+}
+
+// Describes Log attributes
+const (
+ // LogIostreamKey is the attribute Key conforming to the "log.iostream"
+ // semantic conventions. It represents the stream associated with the log.
+ // See below for a list of well-known values.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ LogIostreamKey = attribute.Key("log.iostream")
+)
+
+var (
+ // Logs from stdout stream
+ LogIostreamStdout = LogIostreamKey.String("stdout")
+ // Events from stderr stream
+ LogIostreamStderr = LogIostreamKey.String("stderr")
+)
+
+// A file to which log was emitted.
+const (
+ // LogFileNameKey is the attribute Key conforming to the "log.file.name"
+ // semantic conventions. It represents the basename of the file.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: experimental
+ // Examples: 'audit.log'
+ LogFileNameKey = attribute.Key("log.file.name")
+
+ // LogFileNameResolvedKey is the attribute Key conforming to the
+ // "log.file.name_resolved" semantic conventions. It represents the
+ // basename of the file, with symlinks resolved.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'uuid.log'
+ LogFileNameResolvedKey = attribute.Key("log.file.name_resolved")
+
+ // LogFilePathKey is the attribute Key conforming to the "log.file.path"
+ // semantic conventions. It represents the full path to the file.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/var/log/mysql/audit.log'
+ LogFilePathKey = attribute.Key("log.file.path")
+
+ // LogFilePathResolvedKey is the attribute Key conforming to the
+ // "log.file.path_resolved" semantic conventions. It represents the full
+ // path to the file, with symlinks resolved.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/var/lib/docker/uuid.log'
+ LogFilePathResolvedKey = attribute.Key("log.file.path_resolved")
+)
+
+// LogFileName returns an attribute KeyValue conforming to the
+// "log.file.name" semantic conventions. It represents the basename of the
+// file.
+func LogFileName(val string) attribute.KeyValue {
+ return LogFileNameKey.String(val)
+}
+
+// LogFileNameResolved returns an attribute KeyValue conforming to the
+// "log.file.name_resolved" semantic conventions. It represents the basename of
+// the file, with symlinks resolved.
+func LogFileNameResolved(val string) attribute.KeyValue {
+ return LogFileNameResolvedKey.String(val)
+}
+
+// LogFilePath returns an attribute KeyValue conforming to the
+// "log.file.path" semantic conventions. It represents the full path to the
+// file.
+func LogFilePath(val string) attribute.KeyValue {
+ return LogFilePathKey.String(val)
+}
+
+// LogFilePathResolved returns an attribute KeyValue conforming to the
+// "log.file.path_resolved" semantic conventions. It represents the full path
+// to the file, with symlinks resolved.
+func LogFilePathResolved(val string) attribute.KeyValue {
+ return LogFilePathResolvedKey.String(val)
+}
+
+// Describes Database attributes
+const (
+ // PoolNameKey is the attribute Key conforming to the "pool.name" semantic
+ // conventions. It represents the name of the connection pool; unique
+ // within the instrumented application. In case the connection pool
+ // implementation doesn't provide a name, then the
+ // [db.connection_string](/docs/database/database-spans.md#connection-level-attributes)
+ // should be used
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: 'myDataSource'
+ PoolNameKey = attribute.Key("pool.name")
+
+ // StateKey is the attribute Key conforming to the "state" semantic
+ // conventions. It represents the state of a connection in the pool
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: 'idle'
+ StateKey = attribute.Key("state")
+)
+
+var (
+ // idle
+ StateIdle = StateKey.String("idle")
+ // used
+ StateUsed = StateKey.String("used")
+)
+
+// PoolName returns an attribute KeyValue conforming to the "pool.name"
+// semantic conventions. It represents the name of the connection pool; unique
+// within the instrumented application. In case the connection pool
+// implementation doesn't provide a name, then the
+// [db.connection_string](/docs/database/database-spans.md#connection-level-attributes)
+// should be used
+func PoolName(val string) attribute.KeyValue {
+ return PoolNameKey.String(val)
+}
+
+// ASP.NET Core attributes
+const (
+ // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to
+ // the "aspnetcore.diagnostics.handler.type" semantic conventions. It
+ // represents the full type name of the
+ // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler)
+ // implementation that handled the exception.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (if and only if the exception
+ // was handled by this handler.)
+ // Stability: experimental
+ // Examples: 'Contoso.MyHandler'
+ AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type")
+
+ // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the
+ // "aspnetcore.rate_limiting.policy" semantic conventions. It represents
+ // the rate limiting policy name.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (if the matched endpoint for the
+ // request had a rate-limiting policy.)
+ // Stability: experimental
+ // Examples: 'fixed', 'sliding', 'token'
+ AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy")
+
+ // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the
+ // "aspnetcore.rate_limiting.result" semantic conventions. It represents
+ // the rate-limiting result, shows whether the lease was acquired or
+ // contains a rejection reason
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: 'acquired', 'request_canceled'
+ AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result")
+
+ // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the
+ // "aspnetcore.request.is_unhandled" semantic conventions. It represents
+ // the flag indicating if request was handled by the application pipeline.
+ //
+ // Type: boolean
+ // RequirementLevel: ConditionallyRequired (if and only if the request was
+ // not handled.)
+ // Stability: experimental
+ // Examples: True
+ AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled")
+
+ // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the
+ // "aspnetcore.routing.is_fallback" semantic conventions. It represents a
+ // value that indicates whether the matched route is a fallback route.
+ //
+ // Type: boolean
+ // RequirementLevel: ConditionallyRequired (If and only if a route was
+ // successfully matched.)
+ // Stability: experimental
+ // Examples: True
+ AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback")
+)
+
+var (
+ // Lease was acquired
+ AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired")
+ // Lease request was rejected by the endpoint limiter
+ AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter")
+ // Lease request was rejected by the global limiter
+ AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter")
+ // Lease request was canceled
+ AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled")
+)
+
+// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming
+// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It
+// represents the full type name of the
+// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler)
+// implementation that handled the exception.
+func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue {
+ return AspnetcoreDiagnosticsHandlerTypeKey.String(val)
+}
+
+// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to
+// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents
+// the rate limiting policy name.
+func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue {
+ return AspnetcoreRateLimitingPolicyKey.String(val)
+}
+
+// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to
+// the "aspnetcore.request.is_unhandled" semantic conventions. It represents
+// the flag indicating if request was handled by the application pipeline.
+func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue {
+ return AspnetcoreRequestIsUnhandledKey.Bool(val)
+}
+
+// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to
+// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a
+// value that indicates whether the matched route is a fallback route.
+func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue {
+ return AspnetcoreRoutingIsFallbackKey.Bool(val)
+}
+
+// SignalR attributes
+const (
+ // SignalrConnectionStatusKey is the attribute Key conforming to the
+ // "signalr.connection.status" semantic conventions. It represents the
+ // signalR HTTP connection closure status.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'app_shutdown', 'timeout'
+ SignalrConnectionStatusKey = attribute.Key("signalr.connection.status")
+
+ // SignalrTransportKey is the attribute Key conforming to the
+ // "signalr.transport" semantic conventions. It represents the [SignalR
+ // transport
+ // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md)
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'web_sockets', 'long_polling'
+ SignalrTransportKey = attribute.Key("signalr.transport")
+)
+
+var (
+ // The connection was closed normally
+ SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure")
+ // The connection was closed due to a timeout
+ SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout")
+ // The connection was closed because the app is shutting down
+ SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown")
+)
+
+var (
+ // ServerSentEvents protocol
+ SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events")
+ // LongPolling protocol
+ SignalrTransportLongPolling = SignalrTransportKey.String("long_polling")
+ // WebSockets protocol
+ SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets")
+)
+
+// Describes JVM buffer metric attributes.
+const (
+ // JvmBufferPoolNameKey is the attribute Key conforming to the
+ // "jvm.buffer.pool.name" semantic conventions. It represents the name of
+ // the buffer pool.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: experimental
+ // Examples: 'mapped', 'direct'
+ // Note: Pool names are generally obtained via
+ // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()).
+ JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name")
+)
+
+// JvmBufferPoolName returns an attribute KeyValue conforming to the
+// "jvm.buffer.pool.name" semantic conventions. It represents the name of the
+// buffer pool.
+func JvmBufferPoolName(val string) attribute.KeyValue {
+ return JvmBufferPoolNameKey.String(val)
+}
+
+// Describes JVM memory metric attributes.
+const (
+ // JvmMemoryPoolNameKey is the attribute Key conforming to the
+ // "jvm.memory.pool.name" semantic conventions. It represents the name of
+ // the memory pool.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space'
+ // Note: Pool names are generally obtained via
+ // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()).
+ JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name")
+
+ // JvmMemoryTypeKey is the attribute Key conforming to the
+ // "jvm.memory.type" semantic conventions. It represents the type of
+ // memory.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'heap', 'non_heap'
+ JvmMemoryTypeKey = attribute.Key("jvm.memory.type")
+)
+
+var (
+ // Heap memory
+ JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap")
+ // Non-heap memory
+ JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap")
+)
+
+// JvmMemoryPoolName returns an attribute KeyValue conforming to the
+// "jvm.memory.pool.name" semantic conventions. It represents the name of the
+// memory pool.
+func JvmMemoryPoolName(val string) attribute.KeyValue {
+ return JvmMemoryPoolNameKey.String(val)
+}
+
+// Describes System metric attributes
+const (
+ // SystemDeviceKey is the attribute Key conforming to the "system.device"
+ // semantic conventions. It represents the device identifier
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '(identifier)'
+ SystemDeviceKey = attribute.Key("system.device")
+)
+
+// SystemDevice returns an attribute KeyValue conforming to the
+// "system.device" semantic conventions. It represents the device identifier
+func SystemDevice(val string) attribute.KeyValue {
+ return SystemDeviceKey.String(val)
+}
+
+// Describes System CPU metric attributes
+const (
+ // SystemCPULogicalNumberKey is the attribute Key conforming to the
+ // "system.cpu.logical_number" semantic conventions. It represents the
+ // logical CPU number [0..n-1]
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1
+ SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number")
+
+ // SystemCPUStateKey is the attribute Key conforming to the
+ // "system.cpu.state" semantic conventions. It represents the state of the
+ // CPU
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'idle', 'interrupt'
+ SystemCPUStateKey = attribute.Key("system.cpu.state")
+)
+
+var (
+ // user
+ SystemCPUStateUser = SystemCPUStateKey.String("user")
+ // system
+ SystemCPUStateSystem = SystemCPUStateKey.String("system")
+ // nice
+ SystemCPUStateNice = SystemCPUStateKey.String("nice")
+ // idle
+ SystemCPUStateIdle = SystemCPUStateKey.String("idle")
+ // iowait
+ SystemCPUStateIowait = SystemCPUStateKey.String("iowait")
+ // interrupt
+ SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt")
+ // steal
+ SystemCPUStateSteal = SystemCPUStateKey.String("steal")
+)
+
+// SystemCPULogicalNumber returns an attribute KeyValue conforming to the
+// "system.cpu.logical_number" semantic conventions. It represents the logical
+// CPU number [0..n-1]
+func SystemCPULogicalNumber(val int) attribute.KeyValue {
+ return SystemCPULogicalNumberKey.Int(val)
+}
+
+// Describes System Memory metric attributes
+const (
+ // SystemMemoryStateKey is the attribute Key conforming to the
+ // "system.memory.state" semantic conventions. It represents the memory
+ // state
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'free', 'cached'
+ SystemMemoryStateKey = attribute.Key("system.memory.state")
+)
+
+var (
+ // used
+ SystemMemoryStateUsed = SystemMemoryStateKey.String("used")
+ // free
+ SystemMemoryStateFree = SystemMemoryStateKey.String("free")
+ // shared
+ SystemMemoryStateShared = SystemMemoryStateKey.String("shared")
+ // buffers
+ SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers")
+ // cached
+ SystemMemoryStateCached = SystemMemoryStateKey.String("cached")
+)
+
+// Describes System Memory Paging metric attributes
+const (
+ // SystemPagingDirectionKey is the attribute Key conforming to the
+ // "system.paging.direction" semantic conventions. It represents the paging
+ // access direction
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'in'
+ SystemPagingDirectionKey = attribute.Key("system.paging.direction")
+
+ // SystemPagingStateKey is the attribute Key conforming to the
+ // "system.paging.state" semantic conventions. It represents the memory
+ // paging state
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'free'
+ SystemPagingStateKey = attribute.Key("system.paging.state")
+
+ // SystemPagingTypeKey is the attribute Key conforming to the
+ // "system.paging.type" semantic conventions. It represents the memory
+ // paging type
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'minor'
+ SystemPagingTypeKey = attribute.Key("system.paging.type")
+)
+
+var (
+ // in
+ SystemPagingDirectionIn = SystemPagingDirectionKey.String("in")
+ // out
+ SystemPagingDirectionOut = SystemPagingDirectionKey.String("out")
+)
+
+var (
+ // used
+ SystemPagingStateUsed = SystemPagingStateKey.String("used")
+ // free
+ SystemPagingStateFree = SystemPagingStateKey.String("free")
+)
+
+var (
+ // major
+ SystemPagingTypeMajor = SystemPagingTypeKey.String("major")
+ // minor
+ SystemPagingTypeMinor = SystemPagingTypeKey.String("minor")
+)
+
+// Describes Filesystem metric attributes
+const (
+ // SystemFilesystemModeKey is the attribute Key conforming to the
+ // "system.filesystem.mode" semantic conventions. It represents the
+ // filesystem mode
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'rw, ro'
+ SystemFilesystemModeKey = attribute.Key("system.filesystem.mode")
+
+ // SystemFilesystemMountpointKey is the attribute Key conforming to the
+ // "system.filesystem.mountpoint" semantic conventions. It represents the
+ // filesystem mount path
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/mnt/data'
+ SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint")
+
+ // SystemFilesystemStateKey is the attribute Key conforming to the
+ // "system.filesystem.state" semantic conventions. It represents the
+ // filesystem state
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'used'
+ SystemFilesystemStateKey = attribute.Key("system.filesystem.state")
+
+ // SystemFilesystemTypeKey is the attribute Key conforming to the
+ // "system.filesystem.type" semantic conventions. It represents the
+ // filesystem type
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'ext4'
+ SystemFilesystemTypeKey = attribute.Key("system.filesystem.type")
+)
+
+var (
+ // used
+ SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used")
+ // free
+ SystemFilesystemStateFree = SystemFilesystemStateKey.String("free")
+ // reserved
+ SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved")
+)
+
+var (
+ // fat32
+ SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32")
+ // exfat
+ SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat")
+ // ntfs
+ SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs")
+ // refs
+ SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs")
+ // hfsplus
+ SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus")
+ // ext4
+ SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4")
+)
+
+// SystemFilesystemMode returns an attribute KeyValue conforming to the
+// "system.filesystem.mode" semantic conventions. It represents the filesystem
+// mode
+func SystemFilesystemMode(val string) attribute.KeyValue {
+ return SystemFilesystemModeKey.String(val)
+}
+
+// SystemFilesystemMountpoint returns an attribute KeyValue conforming to
+// the "system.filesystem.mountpoint" semantic conventions. It represents the
+// filesystem mount path
+func SystemFilesystemMountpoint(val string) attribute.KeyValue {
+ return SystemFilesystemMountpointKey.String(val)
+}
+
+// Describes Network metric attributes
+const (
+ // SystemNetworkStateKey is the attribute Key conforming to the
+ // "system.network.state" semantic conventions. It represents a stateless
+ // protocol MUST NOT set this attribute
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'close_wait'
+ SystemNetworkStateKey = attribute.Key("system.network.state")
+)
+
+var (
+ // close
+ SystemNetworkStateClose = SystemNetworkStateKey.String("close")
+ // close_wait
+ SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait")
+ // closing
+ SystemNetworkStateClosing = SystemNetworkStateKey.String("closing")
+ // delete
+ SystemNetworkStateDelete = SystemNetworkStateKey.String("delete")
+ // established
+ SystemNetworkStateEstablished = SystemNetworkStateKey.String("established")
+ // fin_wait_1
+ SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1")
+ // fin_wait_2
+ SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2")
+ // last_ack
+ SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack")
+ // listen
+ SystemNetworkStateListen = SystemNetworkStateKey.String("listen")
+ // syn_recv
+ SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv")
+ // syn_sent
+ SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent")
+ // time_wait
+ SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait")
+)
+
+// Describes System Process metric attributes
+const (
+ // SystemProcessesStatusKey is the attribute Key conforming to the
+ // "system.processes.status" semantic conventions. It represents the
+ // process state, e.g., [Linux Process State
+ // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES)
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'running'
+ SystemProcessesStatusKey = attribute.Key("system.processes.status")
+)
+
+var (
+ // running
+ SystemProcessesStatusRunning = SystemProcessesStatusKey.String("running")
+ // sleeping
+ SystemProcessesStatusSleeping = SystemProcessesStatusKey.String("sleeping")
+ // stopped
+ SystemProcessesStatusStopped = SystemProcessesStatusKey.String("stopped")
+ // defunct
+ SystemProcessesStatusDefunct = SystemProcessesStatusKey.String("defunct")
+)
+
+// These attributes may be used to describe the client in a connection-based
+// network interaction where there is one side that initiates the connection
+// (the client is the side that initiates the connection). This covers all TCP
+// network interactions since TCP is connection-based and one side initiates
+// the connection (an exception is made for peer-to-peer communication over TCP
+// where the "user-facing" surface of the protocol / API doesn't expose a clear
+// notion of client and server). This also covers UDP network interactions
+// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS.
+const (
+ // ClientAddressKey is the attribute Key conforming to the "client.address"
+ // semantic conventions. It represents the client address - domain name if
+ // available without reverse DNS lookup; otherwise, IP address or Unix
+ // domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock'
+ // Note: When observed from the server side, and when communicating through
+ // an intermediary, `client.address` SHOULD represent the client address
+ // behind any intermediaries, for example proxies, if it's available.
+ ClientAddressKey = attribute.Key("client.address")
+
+ // ClientPortKey is the attribute Key conforming to the "client.port"
+ // semantic conventions. It represents the client port number.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 65123
+ // Note: When observed from the server side, and when communicating through
+ // an intermediary, `client.port` SHOULD represent the client port behind
+ // any intermediaries, for example proxies, if it's available.
+ ClientPortKey = attribute.Key("client.port")
+)
+
+// ClientAddress returns an attribute KeyValue conforming to the
+// "client.address" semantic conventions. It represents the client address -
+// domain name if available without reverse DNS lookup; otherwise, IP address
+// or Unix domain socket name.
+func ClientAddress(val string) attribute.KeyValue {
+ return ClientAddressKey.String(val)
+}
+
+// ClientPort returns an attribute KeyValue conforming to the "client.port"
+// semantic conventions. It represents the client port number.
+func ClientPort(val int) attribute.KeyValue {
+ return ClientPortKey.Int(val)
+}
+
+// The attributes used to describe telemetry in the context of databases.
+const (
+ // DBCassandraConsistencyLevelKey is the attribute Key conforming to the
+ // "db.cassandra.consistency_level" semantic conventions. It represents the
+ // consistency level of the query. Based on consistency values from
+ // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html).
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
+
+ // DBCassandraCoordinatorDCKey is the attribute Key conforming to the
+ // "db.cassandra.coordinator.dc" semantic conventions. It represents the
+ // data center of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'us-west-2'
+ DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
+
+ // DBCassandraCoordinatorIDKey is the attribute Key conforming to the
+ // "db.cassandra.coordinator.id" semantic conventions. It represents the ID
+ // of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
+ DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
+
+ // DBCassandraIdempotenceKey is the attribute Key conforming to the
+ // "db.cassandra.idempotence" semantic conventions. It represents the
+ // whether or not the query is idempotent.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
+
+ // DBCassandraPageSizeKey is the attribute Key conforming to the
+ // "db.cassandra.page_size" semantic conventions. It represents the fetch
+ // size used for paging, i.e. how many rows will be returned at once.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 5000
+ DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
+
+ // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming
+ // to the "db.cassandra.speculative_execution_count" semantic conventions.
+ // It represents the number of times a query was speculatively executed.
+ // Not set or `0` if the query was not executed speculatively.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 0, 2
+ DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
+
+ // DBCassandraTableKey is the attribute Key conforming to the
+ // "db.cassandra.table" semantic conventions. It represents the name of the
+ // primary Cassandra table that the operation is acting upon, including the
+ // keyspace name (if applicable).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'mytable'
+ // Note: This mirrors the db.sql.table attribute but references cassandra
+ // rather than sql. It is not recommended to attempt any client-side
+ // parsing of `db.statement` just to get this property, but it should be
+ // set if it is provided by the library being instrumented. If the
+ // operation is acting upon an anonymous table, or more than one table,
+ // this value MUST NOT be set.
+ DBCassandraTableKey = attribute.Key("db.cassandra.table")
+
+ // DBConnectionStringKey is the attribute Key conforming to the
+ // "db.connection_string" semantic conventions. It represents the
+ // connection string used to connect to the database. It is recommended to
+ // remove embedded credentials.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;'
+ DBConnectionStringKey = attribute.Key("db.connection_string")
+
+ // DBCosmosDBClientIDKey is the attribute Key conforming to the
+ // "db.cosmosdb.client_id" semantic conventions. It represents the unique
+ // Cosmos client instance id.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d'
+ DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id")
+
+ // DBCosmosDBConnectionModeKey is the attribute Key conforming to the
+ // "db.cosmosdb.connection_mode" semantic conventions. It represents the
+ // cosmos client connection mode.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode")
+
+ // DBCosmosDBContainerKey is the attribute Key conforming to the
+ // "db.cosmosdb.container" semantic conventions. It represents the cosmos
+ // DB container name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'anystring'
+ DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container")
+
+ // DBCosmosDBOperationTypeKey is the attribute Key conforming to the
+ // "db.cosmosdb.operation_type" semantic conventions. It represents the
+ // cosmosDB Operation Type.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type")
+
+ // DBCosmosDBRequestChargeKey is the attribute Key conforming to the
+ // "db.cosmosdb.request_charge" semantic conventions. It represents the rU
+ // consumed for that operation
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 46.18, 1.0
+ DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge")
+
+ // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the
+ // "db.cosmosdb.request_content_length" semantic conventions. It represents
+ // the request payload size in bytes
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length")
+
+ // DBCosmosDBStatusCodeKey is the attribute Key conforming to the
+ // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos
+ // DB status code.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 200, 201
+ DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code")
+
+ // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the
+ // "db.cosmosdb.sub_status_code" semantic conventions. It represents the
+ // cosmos DB sub status code.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1000, 1002
+ DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code")
+
+ // DBElasticsearchClusterNameKey is the attribute Key conforming to the
+ // "db.elasticsearch.cluster.name" semantic conventions. It represents the
+ // represents the identifier of an Elasticsearch cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f'
+ DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name")
+
+ // DBElasticsearchNodeNameKey is the attribute Key conforming to the
+ // "db.elasticsearch.node.name" semantic conventions. It represents the
+ // represents the human-readable identifier of the node/instance to which a
+ // request was routed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'instance-0000000001'
+ DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name")
+
+ // DBInstanceIDKey is the attribute Key conforming to the "db.instance.id"
+ // semantic conventions. It represents an identifier (address, unique name,
+ // or any other identifier) of the database instance that is executing
+ // queries or mutations on the current connection. This is useful in cases
+ // where the database is running in a clustered environment and the
+ // instrumentation is able to record the node executing the query. The
+ // client may obtain this value in databases like MySQL using queries like
+ // `select @@hostname`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'mysql-e26b99z.example.com'
+ DBInstanceIDKey = attribute.Key("db.instance.id")
+
+ // DBJDBCDriverClassnameKey is the attribute Key conforming to the
+ // "db.jdbc.driver_classname" semantic conventions. It represents the
+ // fully-qualified class name of the [Java Database Connectivity
+ // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/)
+ // driver used to connect.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'org.postgresql.Driver',
+ // 'com.microsoft.sqlserver.jdbc.SQLServerDriver'
+ DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname")
+
+ // DBMongoDBCollectionKey is the attribute Key conforming to the
+ // "db.mongodb.collection" semantic conventions. It represents the MongoDB
+ // collection being accessed within the database stated in `db.name`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'customers', 'products'
+ DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection")
+
+ // DBMSSQLInstanceNameKey is the attribute Key conforming to the
+ // "db.mssql.instance_name" semantic conventions. It represents the
+ // Microsoft SQL Server [instance
+ // name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+ // connecting to. This name is used to determine the port of a named
+ // instance.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MSSQLSERVER'
+ // Note: If setting a `db.mssql.instance_name`, `server.port` is no longer
+ // required (but still recommended if non-standard).
+ DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name")
+
+ // DBNameKey is the attribute Key conforming to the "db.name" semantic
+ // conventions. It represents the this attribute is used to report the name
+ // of the database being accessed. For commands that switch the database,
+ // this should be set to the target database (even if the command fails).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'customers', 'main'
+ // Note: In some SQL databases, the database name to be used is called
+ // "schema name". In case there are multiple layers that could be
+ // considered for database name (e.g. Oracle instance name and schema
+ // name), the database name to be used is the more specific layer (e.g.
+ // Oracle schema name).
+ DBNameKey = attribute.Key("db.name")
+
+ // DBOperationKey is the attribute Key conforming to the "db.operation"
+ // semantic conventions. It represents the name of the operation being
+ // executed, e.g. the [MongoDB command
+ // name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+ // such as `findAndModify`, or the SQL keyword.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'findAndModify', 'HMSET', 'SELECT'
+ // Note: When setting this to an SQL keyword, it is not recommended to
+ // attempt any client-side parsing of `db.statement` just to get this
+ // property, but it should be set if the operation name is provided by the
+ // library being instrumented. If the SQL statement has an ambiguous
+ // operation, or performs more than one operation, this value may be
+ // omitted.
+ DBOperationKey = attribute.Key("db.operation")
+
+ // DBRedisDBIndexKey is the attribute Key conforming to the
+ // "db.redis.database_index" semantic conventions. It represents the index
+ // of the database being accessed as used in the [`SELECT`
+ // command](https://redis.io/commands/select), provided as an integer. To
+ // be used instead of the generic `db.name` attribute.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 0, 1, 15
+ DBRedisDBIndexKey = attribute.Key("db.redis.database_index")
+
+ // DBSQLTableKey is the attribute Key conforming to the "db.sql.table"
+ // semantic conventions. It represents the name of the primary table that
+ // the operation is acting upon, including the database name (if
+ // applicable).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'public.users', 'customers'
+ // Note: It is not recommended to attempt any client-side parsing of
+ // `db.statement` just to get this property, but it should be set if it is
+ // provided by the library being instrumented. If the operation is acting
+ // upon an anonymous table, or more than one table, this value MUST NOT be
+ // set.
+ DBSQLTableKey = attribute.Key("db.sql.table")
+
+ // DBStatementKey is the attribute Key conforming to the "db.statement"
+ // semantic conventions. It represents the database statement being
+ // executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"'
+ DBStatementKey = attribute.Key("db.statement")
+
+ // DBSystemKey is the attribute Key conforming to the "db.system" semantic
+ // conventions. It represents an identifier for the database management
+ // system (DBMS) product being used. See below for a list of well-known
+ // identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ DBSystemKey = attribute.Key("db.system")
+
+ // DBUserKey is the attribute Key conforming to the "db.user" semantic
+ // conventions. It represents the username for accessing the database.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'readonly_user', 'reporting_user'
+ DBUserKey = attribute.Key("db.user")
+)
+
+var (
+ // all
+ DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
+ // each_quorum
+ DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
+ // quorum
+ DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
+ // local_quorum
+ DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
+ // one
+ DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
+ // two
+ DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
+ // three
+ DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
+ // local_one
+ DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
+ // any
+ DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
+ // serial
+ DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
+ // local_serial
+ DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
+)
+
+var (
+ // Gateway (HTTP) connections mode
+ DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway")
+ // Direct connection
+ DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct")
+)
+
+var (
+ // invalid
+ DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid")
+ // create
+ DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create")
+ // patch
+ DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch")
+ // read
+ DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read")
+ // read_feed
+ DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed")
+ // delete
+ DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete")
+ // replace
+ DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace")
+ // execute
+ DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute")
+ // query
+ DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query")
+ // head
+ DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head")
+ // head_feed
+ DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed")
+ // upsert
+ DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert")
+ // batch
+ DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch")
+ // query_plan
+ DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan")
+ // execute_javascript
+ DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript")
+)
+
+var (
+ // Some other SQL database. Fallback only. See notes
+ DBSystemOtherSQL = DBSystemKey.String("other_sql")
+ // Microsoft SQL Server
+ DBSystemMSSQL = DBSystemKey.String("mssql")
+ // Microsoft SQL Server Compact
+ DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact")
+ // MySQL
+ DBSystemMySQL = DBSystemKey.String("mysql")
+ // Oracle Database
+ DBSystemOracle = DBSystemKey.String("oracle")
+ // IBM DB2
+ DBSystemDB2 = DBSystemKey.String("db2")
+ // PostgreSQL
+ DBSystemPostgreSQL = DBSystemKey.String("postgresql")
+ // Amazon Redshift
+ DBSystemRedshift = DBSystemKey.String("redshift")
+ // Apache Hive
+ DBSystemHive = DBSystemKey.String("hive")
+ // Cloudscape
+ DBSystemCloudscape = DBSystemKey.String("cloudscape")
+ // HyperSQL DataBase
+ DBSystemHSQLDB = DBSystemKey.String("hsqldb")
+ // Progress Database
+ DBSystemProgress = DBSystemKey.String("progress")
+ // SAP MaxDB
+ DBSystemMaxDB = DBSystemKey.String("maxdb")
+ // SAP HANA
+ DBSystemHanaDB = DBSystemKey.String("hanadb")
+ // Ingres
+ DBSystemIngres = DBSystemKey.String("ingres")
+ // FirstSQL
+ DBSystemFirstSQL = DBSystemKey.String("firstsql")
+ // EnterpriseDB
+ DBSystemEDB = DBSystemKey.String("edb")
+ // InterSystems Caché
+ DBSystemCache = DBSystemKey.String("cache")
+ // Adabas (Adaptable Database System)
+ DBSystemAdabas = DBSystemKey.String("adabas")
+ // Firebird
+ DBSystemFirebird = DBSystemKey.String("firebird")
+ // Apache Derby
+ DBSystemDerby = DBSystemKey.String("derby")
+ // FileMaker
+ DBSystemFilemaker = DBSystemKey.String("filemaker")
+ // Informix
+ DBSystemInformix = DBSystemKey.String("informix")
+ // InstantDB
+ DBSystemInstantDB = DBSystemKey.String("instantdb")
+ // InterBase
+ DBSystemInterbase = DBSystemKey.String("interbase")
+ // MariaDB
+ DBSystemMariaDB = DBSystemKey.String("mariadb")
+ // Netezza
+ DBSystemNetezza = DBSystemKey.String("netezza")
+ // Pervasive PSQL
+ DBSystemPervasive = DBSystemKey.String("pervasive")
+ // PointBase
+ DBSystemPointbase = DBSystemKey.String("pointbase")
+ // SQLite
+ DBSystemSqlite = DBSystemKey.String("sqlite")
+ // Sybase
+ DBSystemSybase = DBSystemKey.String("sybase")
+ // Teradata
+ DBSystemTeradata = DBSystemKey.String("teradata")
+ // Vertica
+ DBSystemVertica = DBSystemKey.String("vertica")
+ // H2
+ DBSystemH2 = DBSystemKey.String("h2")
+ // ColdFusion IMQ
+ DBSystemColdfusion = DBSystemKey.String("coldfusion")
+ // Apache Cassandra
+ DBSystemCassandra = DBSystemKey.String("cassandra")
+ // Apache HBase
+ DBSystemHBase = DBSystemKey.String("hbase")
+ // MongoDB
+ DBSystemMongoDB = DBSystemKey.String("mongodb")
+ // Redis
+ DBSystemRedis = DBSystemKey.String("redis")
+ // Couchbase
+ DBSystemCouchbase = DBSystemKey.String("couchbase")
+ // CouchDB
+ DBSystemCouchDB = DBSystemKey.String("couchdb")
+ // Microsoft Azure Cosmos DB
+ DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
+ // Amazon DynamoDB
+ DBSystemDynamoDB = DBSystemKey.String("dynamodb")
+ // Neo4j
+ DBSystemNeo4j = DBSystemKey.String("neo4j")
+ // Apache Geode
+ DBSystemGeode = DBSystemKey.String("geode")
+ // Elasticsearch
+ DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
+ // Memcached
+ DBSystemMemcached = DBSystemKey.String("memcached")
+ // CockroachDB
+ DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
+ // OpenSearch
+ DBSystemOpensearch = DBSystemKey.String("opensearch")
+ // ClickHouse
+ DBSystemClickhouse = DBSystemKey.String("clickhouse")
+ // Cloud Spanner
+ DBSystemSpanner = DBSystemKey.String("spanner")
+ // Trino
+ DBSystemTrino = DBSystemKey.String("trino")
+)
+
+// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.dc" semantic conventions. It represents the data
+// center of the coordinating node for a query.
+func DBCassandraCoordinatorDC(val string) attribute.KeyValue {
+ return DBCassandraCoordinatorDCKey.String(val)
+}
+
+// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of
+// the coordinating node for a query.
+func DBCassandraCoordinatorID(val string) attribute.KeyValue {
+ return DBCassandraCoordinatorIDKey.String(val)
+}
+
+// DBCassandraIdempotence returns an attribute KeyValue conforming to the
+// "db.cassandra.idempotence" semantic conventions. It represents the whether
+// or not the query is idempotent.
+func DBCassandraIdempotence(val bool) attribute.KeyValue {
+ return DBCassandraIdempotenceKey.Bool(val)
+}
+
+// DBCassandraPageSize returns an attribute KeyValue conforming to the
+// "db.cassandra.page_size" semantic conventions. It represents the fetch size
+// used for paging, i.e. how many rows will be returned at once.
+func DBCassandraPageSize(val int) attribute.KeyValue {
+ return DBCassandraPageSizeKey.Int(val)
+}
+
+// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue
+// conforming to the "db.cassandra.speculative_execution_count" semantic
+// conventions. It represents the number of times a query was speculatively
+// executed. Not set or `0` if the query was not executed speculatively.
+func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
+ return DBCassandraSpeculativeExecutionCountKey.Int(val)
+}
+
+// DBCassandraTable returns an attribute KeyValue conforming to the
+// "db.cassandra.table" semantic conventions. It represents the name of the
+// primary Cassandra table that the operation is acting upon, including the
+// keyspace name (if applicable).
+func DBCassandraTable(val string) attribute.KeyValue {
+ return DBCassandraTableKey.String(val)
+}
+
+// DBConnectionString returns an attribute KeyValue conforming to the
+// "db.connection_string" semantic conventions. It represents the connection
+// string used to connect to the database. It is recommended to remove embedded
+// credentials.
+func DBConnectionString(val string) attribute.KeyValue {
+ return DBConnectionStringKey.String(val)
+}
+
+// DBCosmosDBClientID returns an attribute KeyValue conforming to the
+// "db.cosmosdb.client_id" semantic conventions. It represents the unique
+// Cosmos client instance id.
+func DBCosmosDBClientID(val string) attribute.KeyValue {
+ return DBCosmosDBClientIDKey.String(val)
+}
+
+// DBCosmosDBContainer returns an attribute KeyValue conforming to the
+// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB
+// container name.
+func DBCosmosDBContainer(val string) attribute.KeyValue {
+ return DBCosmosDBContainerKey.String(val)
+}
+
+// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the
+// "db.cosmosdb.request_charge" semantic conventions. It represents the rU
+// consumed for that operation
+func DBCosmosDBRequestCharge(val float64) attribute.KeyValue {
+ return DBCosmosDBRequestChargeKey.Float64(val)
+}
+
+// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming
+// to the "db.cosmosdb.request_content_length" semantic conventions. It
+// represents the request payload size in bytes
+func DBCosmosDBRequestContentLength(val int) attribute.KeyValue {
+ return DBCosmosDBRequestContentLengthKey.Int(val)
+}
+
+// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the
+// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB
+// status code.
+func DBCosmosDBStatusCode(val int) attribute.KeyValue {
+ return DBCosmosDBStatusCodeKey.Int(val)
+}
+
+// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the
+// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos
+// DB sub status code.
+func DBCosmosDBSubStatusCode(val int) attribute.KeyValue {
+ return DBCosmosDBSubStatusCodeKey.Int(val)
+}
+
+// DBElasticsearchClusterName returns an attribute KeyValue conforming to
+// the "db.elasticsearch.cluster.name" semantic conventions. It represents the
+// represents the identifier of an Elasticsearch cluster.
+func DBElasticsearchClusterName(val string) attribute.KeyValue {
+ return DBElasticsearchClusterNameKey.String(val)
+}
+
+// DBElasticsearchNodeName returns an attribute KeyValue conforming to the
+// "db.elasticsearch.node.name" semantic conventions. It represents the
+// represents the human-readable identifier of the node/instance to which a
+// request was routed.
+func DBElasticsearchNodeName(val string) attribute.KeyValue {
+ return DBElasticsearchNodeNameKey.String(val)
+}
+
+// DBInstanceID returns an attribute KeyValue conforming to the
+// "db.instance.id" semantic conventions. It represents an identifier (address,
+// unique name, or any other identifier) of the database instance that is
+// executing queries or mutations on the current connection. This is useful in
+// cases where the database is running in a clustered environment and the
+// instrumentation is able to record the node executing the query. The client
+// may obtain this value in databases like MySQL using queries like `select
+// @@hostname`.
+func DBInstanceID(val string) attribute.KeyValue {
+ return DBInstanceIDKey.String(val)
+}
+
+// DBJDBCDriverClassname returns an attribute KeyValue conforming to the
+// "db.jdbc.driver_classname" semantic conventions. It represents the
+// fully-qualified class name of the [Java Database Connectivity
+// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver
+// used to connect.
+func DBJDBCDriverClassname(val string) attribute.KeyValue {
+ return DBJDBCDriverClassnameKey.String(val)
+}
+
+// DBMongoDBCollection returns an attribute KeyValue conforming to the
+// "db.mongodb.collection" semantic conventions. It represents the MongoDB
+// collection being accessed within the database stated in `db.name`.
+func DBMongoDBCollection(val string) attribute.KeyValue {
+ return DBMongoDBCollectionKey.String(val)
+}
+
+// DBMSSQLInstanceName returns an attribute KeyValue conforming to the
+// "db.mssql.instance_name" semantic conventions. It represents the Microsoft
+// SQL Server [instance
+// name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+// connecting to. This name is used to determine the port of a named instance.
+func DBMSSQLInstanceName(val string) attribute.KeyValue {
+ return DBMSSQLInstanceNameKey.String(val)
+}
+
+// DBName returns an attribute KeyValue conforming to the "db.name" semantic
+// conventions. It represents the this attribute is used to report the name of
+// the database being accessed. For commands that switch the database, this
+// should be set to the target database (even if the command fails).
+func DBName(val string) attribute.KeyValue {
+ return DBNameKey.String(val)
+}
+
+// DBOperation returns an attribute KeyValue conforming to the
+// "db.operation" semantic conventions. It represents the name of the operation
+// being executed, e.g. the [MongoDB command
+// name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+// such as `findAndModify`, or the SQL keyword.
+func DBOperation(val string) attribute.KeyValue {
+ return DBOperationKey.String(val)
+}
+
+// DBRedisDBIndex returns an attribute KeyValue conforming to the
+// "db.redis.database_index" semantic conventions. It represents the index of
+// the database being accessed as used in the [`SELECT`
+// command](https://redis.io/commands/select), provided as an integer. To be
+// used instead of the generic `db.name` attribute.
+func DBRedisDBIndex(val int) attribute.KeyValue {
+ return DBRedisDBIndexKey.Int(val)
+}
+
+// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table"
+// semantic conventions. It represents the name of the primary table that the
+// operation is acting upon, including the database name (if applicable).
+func DBSQLTable(val string) attribute.KeyValue {
+ return DBSQLTableKey.String(val)
+}
+
+// DBStatement returns an attribute KeyValue conforming to the
+// "db.statement" semantic conventions. It represents the database statement
+// being executed.
+func DBStatement(val string) attribute.KeyValue {
+ return DBStatementKey.String(val)
+}
+
+// DBUser returns an attribute KeyValue conforming to the "db.user" semantic
+// conventions. It represents the username for accessing the database.
+func DBUser(val string) attribute.KeyValue {
+ return DBUserKey.String(val)
+}
+
+// Describes deprecated HTTP attributes.
+const (
+ // HTTPFlavorKey is the attribute Key conforming to the "http.flavor"
+ // semantic conventions.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Deprecated: use `network.protocol.name` instead.
+ HTTPFlavorKey = attribute.Key("http.flavor")
+
+ // HTTPMethodKey is the attribute Key conforming to the "http.method"
+ // semantic conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'GET', 'POST', 'HEAD'
+ // Deprecated: use `http.request.method` instead.
+ HTTPMethodKey = attribute.Key("http.method")
+
+ // HTTPRequestContentLengthKey is the attribute Key conforming to the
+ // "http.request_content_length" semantic conventions.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 3495
+ // Deprecated: use `http.request.header.content-length` instead.
+ HTTPRequestContentLengthKey = attribute.Key("http.request_content_length")
+
+ // HTTPResponseContentLengthKey is the attribute Key conforming to the
+ // "http.response_content_length" semantic conventions.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 3495
+ // Deprecated: use `http.response.header.content-length` instead.
+ HTTPResponseContentLengthKey = attribute.Key("http.response_content_length")
+
+ // HTTPSchemeKey is the attribute Key conforming to the "http.scheme"
+ // semantic conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'http', 'https'
+ // Deprecated: use `url.scheme` instead.
+ HTTPSchemeKey = attribute.Key("http.scheme")
+
+ // HTTPStatusCodeKey is the attribute Key conforming to the
+ // "http.status_code" semantic conventions.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 200
+ // Deprecated: use `http.response.status_code` instead.
+ HTTPStatusCodeKey = attribute.Key("http.status_code")
+
+ // HTTPTargetKey is the attribute Key conforming to the "http.target"
+ // semantic conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '/search?q=OpenTelemetry#SemConv'
+ // Deprecated: use `url.path` and `url.query` instead.
+ HTTPTargetKey = attribute.Key("http.target")
+
+ // HTTPURLKey is the attribute Key conforming to the "http.url" semantic
+ // conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv'
+ // Deprecated: use `url.full` instead.
+ HTTPURLKey = attribute.Key("http.url")
+
+ // HTTPUserAgentKey is the attribute Key conforming to the
+ // "http.user_agent" semantic conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU
+ // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko)
+ // Version/14.1.2 Mobile/15E148 Safari/604.1'
+ // Deprecated: use `user_agent.original` instead.
+ HTTPUserAgentKey = attribute.Key("http.user_agent")
+)
+
+var (
+ // HTTP/1.0
+ //
+ // Deprecated: use `network.protocol.name` instead.
+ HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0")
+ // HTTP/1.1
+ //
+ // Deprecated: use `network.protocol.name` instead.
+ HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1")
+ // HTTP/2
+ //
+ // Deprecated: use `network.protocol.name` instead.
+ HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0")
+ // HTTP/3
+ //
+ // Deprecated: use `network.protocol.name` instead.
+ HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0")
+ // SPDY protocol
+ //
+ // Deprecated: use `network.protocol.name` instead.
+ HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY")
+ // QUIC protocol
+ //
+ // Deprecated: use `network.protocol.name` instead.
+ HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC")
+)
+
+// HTTPMethod returns an attribute KeyValue conforming to the "http.method"
+// semantic conventions.
+//
+// Deprecated: use `http.request.method` instead.
+func HTTPMethod(val string) attribute.KeyValue {
+ return HTTPMethodKey.String(val)
+}
+
+// HTTPRequestContentLength returns an attribute KeyValue conforming to the
+// "http.request_content_length" semantic conventions.
+//
+// Deprecated: use `http.request.header.content-length` instead.
+func HTTPRequestContentLength(val int) attribute.KeyValue {
+ return HTTPRequestContentLengthKey.Int(val)
+}
+
+// HTTPResponseContentLength returns an attribute KeyValue conforming to the
+// "http.response_content_length" semantic conventions.
+//
+// Deprecated: use `http.response.header.content-length` instead.
+func HTTPResponseContentLength(val int) attribute.KeyValue {
+ return HTTPResponseContentLengthKey.Int(val)
+}
+
+// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme"
+// semantic conventions.
+//
+// Deprecated: use `url.scheme` instead.
+func HTTPScheme(val string) attribute.KeyValue {
+ return HTTPSchemeKey.String(val)
+}
+
+// HTTPStatusCode returns an attribute KeyValue conforming to the
+// "http.status_code" semantic conventions.
+//
+// Deprecated: use `http.response.status_code` instead.
+func HTTPStatusCode(val int) attribute.KeyValue {
+ return HTTPStatusCodeKey.Int(val)
+}
+
+// HTTPTarget returns an attribute KeyValue conforming to the "http.target"
+// semantic conventions.
+//
+// Deprecated: use `url.path` and `url.query` instead.
+func HTTPTarget(val string) attribute.KeyValue {
+ return HTTPTargetKey.String(val)
+}
+
+// HTTPURL returns an attribute KeyValue conforming to the "http.url"
+// semantic conventions.
+//
+// Deprecated: use `url.full` instead.
+func HTTPURL(val string) attribute.KeyValue {
+ return HTTPURLKey.String(val)
+}
+
+// HTTPUserAgent returns an attribute KeyValue conforming to the
+// "http.user_agent" semantic conventions.
+//
+// Deprecated: use `user_agent.original` instead.
+func HTTPUserAgent(val string) attribute.KeyValue {
+ return HTTPUserAgentKey.String(val)
+}
+
+// These attributes may be used for any network related operation.
+const (
+ // NetHostNameKey is the attribute Key conforming to the "net.host.name"
+ // semantic conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'example.com'
+ // Deprecated: use `server.address`.
+ NetHostNameKey = attribute.Key("net.host.name")
+
+ // NetHostPortKey is the attribute Key conforming to the "net.host.port"
+ // semantic conventions.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 8080
+ // Deprecated: use `server.port`.
+ NetHostPortKey = attribute.Key("net.host.port")
+
+ // NetPeerNameKey is the attribute Key conforming to the "net.peer.name"
+ // semantic conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'example.com'
+ // Deprecated: use `server.address` on client spans and `client.address` on
+ // server spans.
+ NetPeerNameKey = attribute.Key("net.peer.name")
+
+ // NetPeerPortKey is the attribute Key conforming to the "net.peer.port"
+ // semantic conventions.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 8080
+ // Deprecated: use `server.port` on client spans and `client.port` on
+ // server spans.
+ NetPeerPortKey = attribute.Key("net.peer.port")
+
+ // NetProtocolNameKey is the attribute Key conforming to the
+ // "net.protocol.name" semantic conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'amqp', 'http', 'mqtt'
+ // Deprecated: use `network.protocol.name`.
+ NetProtocolNameKey = attribute.Key("net.protocol.name")
+
+ // NetProtocolVersionKey is the attribute Key conforming to the
+ // "net.protocol.version" semantic conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '3.1.1'
+ // Deprecated: use `network.protocol.version`.
+ NetProtocolVersionKey = attribute.Key("net.protocol.version")
+
+ // NetSockFamilyKey is the attribute Key conforming to the
+ // "net.sock.family" semantic conventions.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Deprecated: use `network.transport` and `network.type`.
+ NetSockFamilyKey = attribute.Key("net.sock.family")
+
+ // NetSockHostAddrKey is the attribute Key conforming to the
+ // "net.sock.host.addr" semantic conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '/var/my.sock'
+ // Deprecated: use `network.local.address`.
+ NetSockHostAddrKey = attribute.Key("net.sock.host.addr")
+
+ // NetSockHostPortKey is the attribute Key conforming to the
+ // "net.sock.host.port" semantic conventions.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 8080
+ // Deprecated: use `network.local.port`.
+ NetSockHostPortKey = attribute.Key("net.sock.host.port")
+
+ // NetSockPeerAddrKey is the attribute Key conforming to the
+ // "net.sock.peer.addr" semantic conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '192.168.0.1'
+ // Deprecated: use `network.peer.address`.
+ NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr")
+
+ // NetSockPeerNameKey is the attribute Key conforming to the
+ // "net.sock.peer.name" semantic conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '/var/my.sock'
+ // Deprecated: no replacement at this time.
+ NetSockPeerNameKey = attribute.Key("net.sock.peer.name")
+
+ // NetSockPeerPortKey is the attribute Key conforming to the
+ // "net.sock.peer.port" semantic conventions.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 65531
+ // Deprecated: use `network.peer.port`.
+ NetSockPeerPortKey = attribute.Key("net.sock.peer.port")
+
+ // NetTransportKey is the attribute Key conforming to the "net.transport"
+ // semantic conventions.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Deprecated: use `network.transport`.
+ NetTransportKey = attribute.Key("net.transport")
+)
+
+var (
+ // IPv4 address
+ //
+ // Deprecated: use `network.transport` and `network.type`.
+ NetSockFamilyInet = NetSockFamilyKey.String("inet")
+ // IPv6 address
+ //
+ // Deprecated: use `network.transport` and `network.type`.
+ NetSockFamilyInet6 = NetSockFamilyKey.String("inet6")
+ // Unix domain socket path
+ //
+ // Deprecated: use `network.transport` and `network.type`.
+ NetSockFamilyUnix = NetSockFamilyKey.String("unix")
+)
+
+var (
+ // ip_tcp
+ //
+ // Deprecated: use `network.transport`.
+ NetTransportTCP = NetTransportKey.String("ip_tcp")
+ // ip_udp
+ //
+ // Deprecated: use `network.transport`.
+ NetTransportUDP = NetTransportKey.String("ip_udp")
+ // Named or anonymous pipe
+ //
+ // Deprecated: use `network.transport`.
+ NetTransportPipe = NetTransportKey.String("pipe")
+ // In-process communication
+ //
+ // Deprecated: use `network.transport`.
+ NetTransportInProc = NetTransportKey.String("inproc")
+ // Something else (non IP-based)
+ //
+ // Deprecated: use `network.transport`.
+ NetTransportOther = NetTransportKey.String("other")
+)
+
+// NetHostName returns an attribute KeyValue conforming to the
+// "net.host.name" semantic conventions.
+//
+// Deprecated: use `server.address`.
+func NetHostName(val string) attribute.KeyValue {
+ return NetHostNameKey.String(val)
+}
+
+// NetHostPort returns an attribute KeyValue conforming to the
+// "net.host.port" semantic conventions.
+//
+// Deprecated: use `server.port`.
+func NetHostPort(val int) attribute.KeyValue {
+ return NetHostPortKey.Int(val)
+}
+
+// NetPeerName returns an attribute KeyValue conforming to the
+// "net.peer.name" semantic conventions.
+//
+// Deprecated: use `server.address` on client spans and `client.address` on
+// server spans.
+func NetPeerName(val string) attribute.KeyValue {
+ return NetPeerNameKey.String(val)
+}
+
+// NetPeerPort returns an attribute KeyValue conforming to the
+// "net.peer.port" semantic conventions.
+//
+// Deprecated: use `server.port` on client spans and `client.port` on server
+// spans.
+func NetPeerPort(val int) attribute.KeyValue {
+ return NetPeerPortKey.Int(val)
+}
+
+// NetProtocolName returns an attribute KeyValue conforming to the
+// "net.protocol.name" semantic conventions.
+//
+// Deprecated: use `network.protocol.name`.
+func NetProtocolName(val string) attribute.KeyValue {
+ return NetProtocolNameKey.String(val)
+}
+
+// NetProtocolVersion returns an attribute KeyValue conforming to the
+// "net.protocol.version" semantic conventions.
+//
+// Deprecated: use `network.protocol.version`.
+func NetProtocolVersion(val string) attribute.KeyValue {
+ return NetProtocolVersionKey.String(val)
+}
+
+// NetSockHostAddr returns an attribute KeyValue conforming to the
+// "net.sock.host.addr" semantic conventions.
+//
+// Deprecated: use `network.local.address`.
+func NetSockHostAddr(val string) attribute.KeyValue {
+ return NetSockHostAddrKey.String(val)
+}
+
+// NetSockHostPort returns an attribute KeyValue conforming to the
+// "net.sock.host.port" semantic conventions.
+//
+// Deprecated: use `network.local.port`.
+func NetSockHostPort(val int) attribute.KeyValue {
+ return NetSockHostPortKey.Int(val)
+}
+
+// NetSockPeerAddr returns an attribute KeyValue conforming to the
+// "net.sock.peer.addr" semantic conventions.
+//
+// Deprecated: use `network.peer.address`.
+func NetSockPeerAddr(val string) attribute.KeyValue {
+ return NetSockPeerAddrKey.String(val)
+}
+
+// NetSockPeerName returns an attribute KeyValue conforming to the
+// "net.sock.peer.name" semantic conventions.
+//
+// Deprecated: no replacement at this time.
+func NetSockPeerName(val string) attribute.KeyValue {
+ return NetSockPeerNameKey.String(val)
+}
+
+// NetSockPeerPort returns an attribute KeyValue conforming to the
+// "net.sock.peer.port" semantic conventions.
+//
+// Deprecated: use `network.peer.port`.
+func NetSockPeerPort(val int) attribute.KeyValue {
+ return NetSockPeerPortKey.Int(val)
+}
+
+// These attributes may be used to describe the receiver of a network
+// exchange/packet. These should be used when there is no client/server
+// relationship between the two sides, or when that relationship is unknown.
+// This covers low-level network interactions (e.g. packet tracing) where you
+// don't know if there was a connection or which side initiated it. This also
+// covers unidirectional UDP flows and peer-to-peer communication where the
+// "user-facing" surface of the protocol / API doesn't expose a clear notion of
+// client and server.
+const (
+ // DestinationAddressKey is the attribute Key conforming to the
+ // "destination.address" semantic conventions. It represents the
+ // destination address - domain name if available without reverse DNS
+ // lookup; otherwise, IP address or Unix domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock'
+ // Note: When observed from the source side, and when communicating through
+ // an intermediary, `destination.address` SHOULD represent the destination
+ // address behind any intermediaries, for example proxies, if it's
+ // available.
+ DestinationAddressKey = attribute.Key("destination.address")
+
+ // DestinationPortKey is the attribute Key conforming to the
+ // "destination.port" semantic conventions. It represents the destination
+ // port number
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3389, 2888
+ DestinationPortKey = attribute.Key("destination.port")
+)
+
+// DestinationAddress returns an attribute KeyValue conforming to the
+// "destination.address" semantic conventions. It represents the destination
+// address - domain name if available without reverse DNS lookup; otherwise, IP
+// address or Unix domain socket name.
+func DestinationAddress(val string) attribute.KeyValue {
+ return DestinationAddressKey.String(val)
+}
+
+// DestinationPort returns an attribute KeyValue conforming to the
+// "destination.port" semantic conventions. It represents the destination port
+// number
+func DestinationPort(val int) attribute.KeyValue {
+ return DestinationPortKey.Int(val)
+}
+
+// These attributes may be used for any disk related operation.
+const (
+ // DiskIoDirectionKey is the attribute Key conforming to the
+ // "disk.io.direction" semantic conventions. It represents the disk IO
+ // operation direction.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'read'
+ DiskIoDirectionKey = attribute.Key("disk.io.direction")
+)
+
+var (
+ // read
+ DiskIoDirectionRead = DiskIoDirectionKey.String("read")
+ // write
+ DiskIoDirectionWrite = DiskIoDirectionKey.String("write")
+)
+
+// The shared attributes used to report an error.
+const (
+ // ErrorTypeKey is the attribute Key conforming to the "error.type"
+ // semantic conventions. It represents the describes a class of error the
+ // operation ended with.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'timeout', 'java.net.UnknownHostException',
+ // 'server_certificate_invalid', '500'
+ // Note: The `error.type` SHOULD be predictable and SHOULD have low
+ // cardinality.
+ // Instrumentations SHOULD document the list of errors they report.
+ //
+ // The cardinality of `error.type` within one instrumentation library
+ // SHOULD be low.
+ // Telemetry consumers that aggregate data from multiple instrumentation
+ // libraries and applications
+ // should be prepared for `error.type` to have high cardinality at query
+ // time when no
+ // additional filters are applied.
+ //
+ // If the operation has completed successfully, instrumentations SHOULD NOT
+ // set `error.type`.
+ //
+ // If a specific domain defines its own set of error identifiers (such as
+ // HTTP or gRPC status codes),
+ // it's RECOMMENDED to:
+ //
+ // * Use a domain-specific attribute
+ // * Set `error.type` to capture all errors, regardless of whether they are
+ // defined within the domain-specific set or not.
+ ErrorTypeKey = attribute.Key("error.type")
+)
+
+var (
+ // A fallback error value to be used when the instrumentation doesn't define a custom value
+ ErrorTypeOther = ErrorTypeKey.String("_OTHER")
+)
+
+// The shared attributes used to report a single exception associated with a
+// span or log.
+const (
+ // ExceptionEscapedKey is the attribute Key conforming to the
+ // "exception.escaped" semantic conventions. It represents the sHOULD be
+ // set to true if the exception event is recorded at a point where it is
+ // known that the exception is escaping the scope of the span.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: An exception is considered to have escaped (or left) the scope of
+ // a span,
+ // if that span is ended while the exception is still logically "in
+ // flight".
+ // This may be actually "in flight" in some languages (e.g. if the
+ // exception
+ // is passed to a Context manager's `__exit__` method in Python) but will
+ // usually be caught at the point of recording the exception in most
+ // languages.
+ //
+ // It is usually not possible to determine at the point where an exception
+ // is thrown
+ // whether it will escape the scope of a span.
+ // However, it is trivial to know that an exception
+ // will escape, if one checks for an active exception just before ending
+ // the span,
+ // as done in the [example for recording span
+ // exceptions](#recording-an-exception).
+ //
+ // It follows that an exception may still escape the scope of the span
+ // even if the `exception.escaped` attribute was not set or set to false,
+ // since the event might have been recorded at a time where it was not
+ // clear whether the exception will escape.
+ ExceptionEscapedKey = attribute.Key("exception.escaped")
+
+ // ExceptionMessageKey is the attribute Key conforming to the
+ // "exception.message" semantic conventions. It represents the exception
+ // message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Division by zero', "Can't convert 'int' object to str
+ // implicitly"
+ ExceptionMessageKey = attribute.Key("exception.message")
+
+ // ExceptionStacktraceKey is the attribute Key conforming to the
+ // "exception.stacktrace" semantic conventions. It represents a stacktrace
+ // as a string in the natural representation for the language runtime. The
+ // representation is to be determined and documented by each language SIG.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
+ // exception\\n at '
+ // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
+ // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
+ // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
+ ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
+
+ // ExceptionTypeKey is the attribute Key conforming to the "exception.type"
+ // semantic conventions. It represents the type of the exception (its
+ // fully-qualified class name, if applicable). The dynamic type of the
+ // exception should be preferred over the static type in languages that
+ // support it.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'java.net.ConnectException', 'OSError'
+ ExceptionTypeKey = attribute.Key("exception.type")
+)
+
+// ExceptionEscaped returns an attribute KeyValue conforming to the
+// "exception.escaped" semantic conventions. It represents the sHOULD be set to
+// true if the exception event is recorded at a point where it is known that
+// the exception is escaping the scope of the span.
+func ExceptionEscaped(val bool) attribute.KeyValue {
+ return ExceptionEscapedKey.Bool(val)
+}
+
+// ExceptionMessage returns an attribute KeyValue conforming to the
+// "exception.message" semantic conventions. It represents the exception
+// message.
+func ExceptionMessage(val string) attribute.KeyValue {
+ return ExceptionMessageKey.String(val)
+}
+
+// ExceptionStacktrace returns an attribute KeyValue conforming to the
+// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
+// string in the natural representation for the language runtime. The
+// representation is to be determined and documented by each language SIG.
+func ExceptionStacktrace(val string) attribute.KeyValue {
+ return ExceptionStacktraceKey.String(val)
+}
+
+// ExceptionType returns an attribute KeyValue conforming to the
+// "exception.type" semantic conventions. It represents the type of the
+// exception (its fully-qualified class name, if applicable). The dynamic type
+// of the exception should be preferred over the static type in languages that
+// support it.
+func ExceptionType(val string) attribute.KeyValue {
+ return ExceptionTypeKey.String(val)
+}
+
+// Semantic convention attributes in the HTTP namespace.
+const (
+ // HTTPRequestBodySizeKey is the attribute Key conforming to the
+ // "http.request.body.size" semantic conventions. It represents the size of
+ // the request payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as
+ // the
+ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+ // header. For requests using transport encoding, this should be the
+ // compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3495
+ HTTPRequestBodySizeKey = attribute.Key("http.request.body.size")
+
+ // HTTPRequestMethodKey is the attribute Key conforming to the
+ // "http.request.method" semantic conventions. It represents the hTTP
+ // request method.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'GET', 'POST', 'HEAD'
+ // Note: HTTP request method value SHOULD be "known" to the
+ // instrumentation.
+ // By default, this convention defines "known" methods as the ones listed
+ // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods)
+ // and the PATCH method defined in
+ // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html).
+ //
+ // If the HTTP request method is not known to instrumentation, it MUST set
+ // the `http.request.method` attribute to `_OTHER`.
+ //
+ // If the HTTP instrumentation could end up converting valid HTTP request
+ // methods to `_OTHER`, then it MUST provide a way to override
+ // the list of known HTTP methods. If this override is done via environment
+ // variable, then the environment variable MUST be named
+ // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated
+ // list of case-sensitive known HTTP methods
+ // (this list MUST be a full override of the default known method, it is
+ // not a list of known methods in addition to the defaults).
+ //
+ // HTTP method names are case-sensitive and `http.request.method` attribute
+ // value MUST match a known HTTP method name exactly.
+ // Instrumentations for specific web frameworks that consider HTTP methods
+ // to be case insensitive, SHOULD populate a canonical equivalent.
+ // Tracing instrumentations that do so, MUST also set
+ // `http.request.method_original` to the original value.
+ HTTPRequestMethodKey = attribute.Key("http.request.method")
+
+ // HTTPRequestMethodOriginalKey is the attribute Key conforming to the
+ // "http.request.method_original" semantic conventions. It represents the
+ // original HTTP method sent by the client in the request line.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'GeT', 'ACL', 'foo'
+ HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original")
+
+ // HTTPRequestResendCountKey is the attribute Key conforming to the
+ // "http.request.resend_count" semantic conventions. It represents the
+ // ordinal number of request resending attempt (for any reason, including
+ // redirects).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3
+ // Note: The resend count SHOULD be updated each time an HTTP request gets
+ // resent by the client, regardless of what was the cause of the resending
+ // (e.g. redirection, authorization failure, 503 Server Unavailable,
+ // network issues, or any other).
+ HTTPRequestResendCountKey = attribute.Key("http.request.resend_count")
+
+ // HTTPResponseBodySizeKey is the attribute Key conforming to the
+ // "http.response.body.size" semantic conventions. It represents the size
+ // of the response payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as
+ // the
+ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+ // header. For requests using transport encoding, this should be the
+ // compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3495
+ HTTPResponseBodySizeKey = attribute.Key("http.response.body.size")
+
+ // HTTPResponseStatusCodeKey is the attribute Key conforming to the
+ // "http.response.status_code" semantic conventions. It represents the
+ // [HTTP response status
+ // code](https://tools.ietf.org/html/rfc7231#section-6).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 200
+ HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code")
+
+ // HTTPRouteKey is the attribute Key conforming to the "http.route"
+ // semantic conventions. It represents the matched route, that is, the path
+ // template in the format used by the respective server framework.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/users/:userID?', '{controller}/{action}/{id?}'
+ // Note: MUST NOT be populated when this is not supported by the HTTP
+ // server framework as the route attribute should have low-cardinality and
+ // the URI path can NOT substitute it.
+ // SHOULD include the [application
+ // root](/docs/http/http-spans.md#http-server-definitions) if there is one.
+ HTTPRouteKey = attribute.Key("http.route")
+)
+
+var (
+ // CONNECT method
+ HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT")
+ // DELETE method
+ HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE")
+ // GET method
+ HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET")
+ // HEAD method
+ HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD")
+ // OPTIONS method
+ HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS")
+ // PATCH method
+ HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH")
+ // POST method
+ HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST")
+ // PUT method
+ HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT")
+ // TRACE method
+ HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE")
+ // Any HTTP method that the instrumentation has no prior knowledge of
+ HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER")
+)
+
+// HTTPRequestBodySize returns an attribute KeyValue conforming to the
+// "http.request.body.size" semantic conventions. It represents the size of the
+// request payload body in bytes. This is the number of bytes transferred
+// excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPRequestBodySize(val int) attribute.KeyValue {
+ return HTTPRequestBodySizeKey.Int(val)
+}
+
+// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the
+// "http.request.method_original" semantic conventions. It represents the
+// original HTTP method sent by the client in the request line.
+func HTTPRequestMethodOriginal(val string) attribute.KeyValue {
+ return HTTPRequestMethodOriginalKey.String(val)
+}
+
+// HTTPRequestResendCount returns an attribute KeyValue conforming to the
+// "http.request.resend_count" semantic conventions. It represents the ordinal
+// number of request resending attempt (for any reason, including redirects).
+func HTTPRequestResendCount(val int) attribute.KeyValue {
+ return HTTPRequestResendCountKey.Int(val)
+}
+
+// HTTPResponseBodySize returns an attribute KeyValue conforming to the
+// "http.response.body.size" semantic conventions. It represents the size of
+// the response payload body in bytes. This is the number of bytes transferred
+// excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPResponseBodySize(val int) attribute.KeyValue {
+ return HTTPResponseBodySizeKey.Int(val)
+}
+
+// HTTPResponseStatusCode returns an attribute KeyValue conforming to the
+// "http.response.status_code" semantic conventions. It represents the [HTTP
+// response status code](https://tools.ietf.org/html/rfc7231#section-6).
+func HTTPResponseStatusCode(val int) attribute.KeyValue {
+ return HTTPResponseStatusCodeKey.Int(val)
+}
+
+// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
+// semantic conventions. It represents the matched route, that is, the path
+// template in the format used by the respective server framework.
+func HTTPRoute(val string) attribute.KeyValue {
+ return HTTPRouteKey.String(val)
+}
+
+// Attributes describing telemetry around messaging systems and messaging
+// activities.
+const (
+ // MessagingBatchMessageCountKey is the attribute Key conforming to the
+ // "messaging.batch.message_count" semantic conventions. It represents the
+ // number of messages sent, received, or processed in the scope of the
+ // batching operation.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 0, 1, 2
+ // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
+ // spans that operate with a single message. When a messaging client
+ // library supports both batch and single-message API for the same
+ // operation, instrumentations SHOULD use `messaging.batch.message_count`
+ // for batching APIs and SHOULD NOT use it for single-message APIs.
+ MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
+
+ // MessagingClientIDKey is the attribute Key conforming to the
+ // "messaging.client_id" semantic conventions. It represents a unique
+ // identifier for the client that consumes or produces a message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'client-5', 'myhost@8742@s8083jm'
+ MessagingClientIDKey = attribute.Key("messaging.client_id")
+
+ // MessagingDestinationAnonymousKey is the attribute Key conforming to the
+ // "messaging.destination.anonymous" semantic conventions. It represents a
+ // boolean that is true if the message destination is anonymous (could be
+ // unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
+
+ // MessagingDestinationNameKey is the attribute Key conforming to the
+ // "messaging.destination.name" semantic conventions. It represents the
+ // message destination name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MyQueue', 'MyTopic'
+ // Note: Destination name SHOULD uniquely identify a specific queue, topic
+ // or other entity within the broker. If
+ // the broker doesn't have such notion, the destination name SHOULD
+ // uniquely identify the broker.
+ MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
+
+ // MessagingDestinationTemplateKey is the attribute Key conforming to the
+ // "messaging.destination.template" semantic conventions. It represents the
+ // low cardinality representation of the messaging destination name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/customers/{customerID}'
+ // Note: Destination names could be constructed from templates. An example
+ // would be a destination name involving a user name or product id.
+ // Although the destination name in this case is of high cardinality, the
+ // underlying template is of low cardinality and can be effectively used
+ // for grouping and aggregation.
+ MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
+
+ // MessagingDestinationTemporaryKey is the attribute Key conforming to the
+ // "messaging.destination.temporary" semantic conventions. It represents a
+ // boolean that is true if the message destination is temporary and might
+ // not exist anymore after messages are processed.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
+
+ // MessagingDestinationPublishAnonymousKey is the attribute Key conforming
+ // to the "messaging.destination_publish.anonymous" semantic conventions.
+ // It represents a boolean that is true if the publish message destination
+ // is anonymous (could be unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous")
+
+ // MessagingDestinationPublishNameKey is the attribute Key conforming to
+ // the "messaging.destination_publish.name" semantic conventions. It
+ // represents the name of the original destination the message was
+ // published to
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MyQueue', 'MyTopic'
+ // Note: The name SHOULD uniquely identify a specific queue, topic, or
+ // other entity within the broker. If
+ // the broker doesn't have such notion, the original destination name
+ // SHOULD uniquely identify the broker.
+ MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name")
+
+ // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming
+ // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions.
+ // It represents the ordering key for a given message. If the attribute is
+ // not present, the message does not have an ordering key.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'ordering_key'
+ MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key")
+
+ // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the
+ // "messaging.kafka.consumer.group" semantic conventions. It represents the
+ // name of the Kafka Consumer Group that is handling the message. Only
+ // applies to consumers, not producers.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'my-group'
+ MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group")
+
+ // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to
+ // the "messaging.kafka.destination.partition" semantic conventions. It
+ // represents the partition the message is sent to.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 2
+ MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition")
+
+ // MessagingKafkaMessageKeyKey is the attribute Key conforming to the
+ // "messaging.kafka.message.key" semantic conventions. It represents the
+ // message keys in Kafka are used for grouping alike messages to ensure
+ // they're processed on the same partition. They differ from
+ // `messaging.message.id` in that they're not unique. If the key is `null`,
+ // the attribute MUST NOT be set.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myKey'
+ // Note: If the key type is not string, it's string representation has to
+ // be supplied for the attribute. If the key has no unambiguous, canonical
+ // string form, don't include its value.
+ MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
+
+ // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the
+ // "messaging.kafka.message.offset" semantic conventions. It represents the
+ // offset of a record in the corresponding Kafka partition.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 42
+ MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset")
+
+ // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
+ // "messaging.kafka.message.tombstone" semantic conventions. It represents
+ // a boolean that is true if the message is a tombstone.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
+
+ // MessagingMessageBodySizeKey is the attribute Key conforming to the
+ // "messaging.message.body.size" semantic conventions. It represents the
+ // size of the message body in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1439
+ // Note: This can refer to both the compressed or uncompressed body size.
+ // If both sizes are known, the uncompressed
+ // body size should be used.
+ MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size")
+
+ // MessagingMessageConversationIDKey is the attribute Key conforming to the
+ // "messaging.message.conversation_id" semantic conventions. It represents
+ // the conversation ID identifying the conversation to which the message
+ // belongs, represented as a string. Sometimes called "Correlation ID".
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MyConversationID'
+ MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
+
+ // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the
+ // "messaging.message.envelope.size" semantic conventions. It represents
+ // the size of the message body and metadata in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 2738
+ // Note: This can refer to both the compressed or uncompressed size. If
+ // both sizes are known, the uncompressed
+ // size should be used.
+ MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size")
+
+ // MessagingMessageIDKey is the attribute Key conforming to the
+ // "messaging.message.id" semantic conventions. It represents a value used
+ // by the messaging system as an identifier for the message, represented as
+ // a string.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '452a7c7c7c7048c2f887f61572b18fc2'
+ MessagingMessageIDKey = attribute.Key("messaging.message.id")
+
+ // MessagingOperationKey is the attribute Key conforming to the
+ // "messaging.operation" semantic conventions. It represents a string
+ // identifying the kind of messaging operation.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: If a custom value is used, it MUST be of low cardinality.
+ MessagingOperationKey = attribute.Key("messaging.operation")
+
+ // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key
+ // conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+ // conventions. It represents the rabbitMQ message routing key.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myKey'
+ MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
+
+ // MessagingRocketmqClientGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.client_group" semantic conventions. It represents
+ // the name of the RocketMQ producer/consumer group that is handling the
+ // message. The client type is identified by the SpanKind.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myConsumerGroup'
+ MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
+
+ // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to
+ // the "messaging.rocketmq.consumption_model" semantic conventions. It
+ // represents the model of message consumption. This only applies to
+ // consumer spans.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
+
+ // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key
+ // conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+ // conventions. It represents the delay time level for delay message, which
+ // determines the message delay time.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3
+ MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
+
+ // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key
+ // conforming to the "messaging.rocketmq.message.delivery_timestamp"
+ // semantic conventions. It represents the timestamp in milliseconds that
+ // the delay message is expected to be delivered to consumer.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1665987217045
+ MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
+
+ // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.group" semantic conventions. It represents
+ // the it is essential for FIFO message. Messages that belong to the same
+ // message group are always processed one by one within the same consumer
+ // group.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myMessageGroup'
+ MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
+
+ // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.keys" semantic conventions. It represents
+ // the key(s) of message, another way to mark message besides message id.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'keyA', 'keyB'
+ MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
+
+ // MessagingRocketmqMessageTagKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.tag" semantic conventions. It represents the
+ // secondary classifier of message besides topic.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'tagA'
+ MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
+
+ // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.type" semantic conventions. It represents
+ // the type of message.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
+
+ // MessagingRocketmqNamespaceKey is the attribute Key conforming to the
+ // "messaging.rocketmq.namespace" semantic conventions. It represents the
+ // namespace of RocketMQ resources, resources in different namespaces are
+ // individual.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myNamespace'
+ MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
+
+ // MessagingSystemKey is the attribute Key conforming to the
+ // "messaging.system" semantic conventions. It represents an identifier for
+ // the messaging system being used. See below for a list of well-known
+ // identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingSystemKey = attribute.Key("messaging.system")
+)
+
+var (
+ // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created
+ MessagingOperationPublish = MessagingOperationKey.String("publish")
+ // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios
+ MessagingOperationCreate = MessagingOperationKey.String("create")
+ // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages
+ MessagingOperationReceive = MessagingOperationKey.String("receive")
+ // One or more messages are passed to a consumer. This operation refers to push-based scenarios, where consumer register callbacks which get called by messaging SDKs
+ MessagingOperationDeliver = MessagingOperationKey.String("deliver")
+)
+
+var (
+ // Clustering consumption model
+ MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
+ // Broadcasting consumption model
+ MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
+)
+
+var (
+ // Normal message
+ MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
+ // FIFO message
+ MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
+ // Delay message
+ MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
+ // Transaction message
+ MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
+)
+
+var (
+ // Apache ActiveMQ
+ MessagingSystemActivemq = MessagingSystemKey.String("activemq")
+ // Amazon Simple Queue Service (SQS)
+ MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs")
+ // Azure Event Grid
+ MessagingSystemAzureEventgrid = MessagingSystemKey.String("azure_eventgrid")
+ // Azure Event Hubs
+ MessagingSystemAzureEventhubs = MessagingSystemKey.String("azure_eventhubs")
+ // Azure Service Bus
+ MessagingSystemAzureServicebus = MessagingSystemKey.String("azure_servicebus")
+ // Google Cloud Pub/Sub
+ MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub")
+ // Java Message Service
+ MessagingSystemJms = MessagingSystemKey.String("jms")
+ // Apache Kafka
+ MessagingSystemKafka = MessagingSystemKey.String("kafka")
+ // RabbitMQ
+ MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq")
+ // Apache RocketMQ
+ MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq")
+)
+
+// MessagingBatchMessageCount returns an attribute KeyValue conforming to
+// the "messaging.batch.message_count" semantic conventions. It represents the
+// number of messages sent, received, or processed in the scope of the batching
+// operation.
+func MessagingBatchMessageCount(val int) attribute.KeyValue {
+ return MessagingBatchMessageCountKey.Int(val)
+}
+
+// MessagingClientID returns an attribute KeyValue conforming to the
+// "messaging.client_id" semantic conventions. It represents a unique
+// identifier for the client that consumes or produces a message.
+func MessagingClientID(val string) attribute.KeyValue {
+ return MessagingClientIDKey.String(val)
+}
+
+// MessagingDestinationAnonymous returns an attribute KeyValue conforming to
+// the "messaging.destination.anonymous" semantic conventions. It represents a
+// boolean that is true if the message destination is anonymous (could be
+// unnamed or have auto-generated name).
+func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
+ return MessagingDestinationAnonymousKey.Bool(val)
+}
+
+// MessagingDestinationName returns an attribute KeyValue conforming to the
+// "messaging.destination.name" semantic conventions. It represents the message
+// destination name
+func MessagingDestinationName(val string) attribute.KeyValue {
+ return MessagingDestinationNameKey.String(val)
+}
+
+// MessagingDestinationTemplate returns an attribute KeyValue conforming to
+// the "messaging.destination.template" semantic conventions. It represents the
+// low cardinality representation of the messaging destination name
+func MessagingDestinationTemplate(val string) attribute.KeyValue {
+ return MessagingDestinationTemplateKey.String(val)
+}
+
+// MessagingDestinationTemporary returns an attribute KeyValue conforming to
+// the "messaging.destination.temporary" semantic conventions. It represents a
+// boolean that is true if the message destination is temporary and might not
+// exist anymore after messages are processed.
+func MessagingDestinationTemporary(val bool) attribute.KeyValue {
+ return MessagingDestinationTemporaryKey.Bool(val)
+}
+
+// MessagingDestinationPublishAnonymous returns an attribute KeyValue
+// conforming to the "messaging.destination_publish.anonymous" semantic
+// conventions. It represents a boolean that is true if the publish message
+// destination is anonymous (could be unnamed or have auto-generated name).
+func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue {
+ return MessagingDestinationPublishAnonymousKey.Bool(val)
+}
+
+// MessagingDestinationPublishName returns an attribute KeyValue conforming
+// to the "messaging.destination_publish.name" semantic conventions. It
+// represents the name of the original destination the message was published to
+func MessagingDestinationPublishName(val string) attribute.KeyValue {
+ return MessagingDestinationPublishNameKey.String(val)
+}
+
+// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue
+// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic
+// conventions. It represents the ordering key for a given message. If the
+// attribute is not present, the message does not have an ordering key.
+func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue {
+ return MessagingGCPPubsubMessageOrderingKeyKey.String(val)
+}
+
+// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to
+// the "messaging.kafka.consumer.group" semantic conventions. It represents the
+// name of the Kafka Consumer Group that is handling the message. Only applies
+// to consumers, not producers.
+func MessagingKafkaConsumerGroup(val string) attribute.KeyValue {
+ return MessagingKafkaConsumerGroupKey.String(val)
+}
+
+// MessagingKafkaDestinationPartition returns an attribute KeyValue
+// conforming to the "messaging.kafka.destination.partition" semantic
+// conventions. It represents the partition the message is sent to.
+func MessagingKafkaDestinationPartition(val int) attribute.KeyValue {
+ return MessagingKafkaDestinationPartitionKey.Int(val)
+}
+
+// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
+// "messaging.kafka.message.key" semantic conventions. It represents the
+// message keys in Kafka are used for grouping alike messages to ensure they're
+// processed on the same partition. They differ from `messaging.message.id` in
+// that they're not unique. If the key is `null`, the attribute MUST NOT be
+// set.
+func MessagingKafkaMessageKey(val string) attribute.KeyValue {
+ return MessagingKafkaMessageKeyKey.String(val)
+}
+
+// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to
+// the "messaging.kafka.message.offset" semantic conventions. It represents the
+// offset of a record in the corresponding Kafka partition.
+func MessagingKafkaMessageOffset(val int) attribute.KeyValue {
+ return MessagingKafkaMessageOffsetKey.Int(val)
+}
+
+// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming
+// to the "messaging.kafka.message.tombstone" semantic conventions. It
+// represents a boolean that is true if the message is a tombstone.
+func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
+ return MessagingKafkaMessageTombstoneKey.Bool(val)
+}
+
+// MessagingMessageBodySize returns an attribute KeyValue conforming to the
+// "messaging.message.body.size" semantic conventions. It represents the size
+// of the message body in bytes.
+func MessagingMessageBodySize(val int) attribute.KeyValue {
+ return MessagingMessageBodySizeKey.Int(val)
+}
+
+// MessagingMessageConversationID returns an attribute KeyValue conforming
+// to the "messaging.message.conversation_id" semantic conventions. It
+// represents the conversation ID identifying the conversation to which the
+// message belongs, represented as a string. Sometimes called "Correlation ID".
+func MessagingMessageConversationID(val string) attribute.KeyValue {
+ return MessagingMessageConversationIDKey.String(val)
+}
+
+// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to
+// the "messaging.message.envelope.size" semantic conventions. It represents
+// the size of the message body and metadata in bytes.
+func MessagingMessageEnvelopeSize(val int) attribute.KeyValue {
+ return MessagingMessageEnvelopeSizeKey.Int(val)
+}
+
+// MessagingMessageID returns an attribute KeyValue conforming to the
+// "messaging.message.id" semantic conventions. It represents a value used by
+// the messaging system as an identifier for the message, represented as a
+// string.
+func MessagingMessageID(val string) attribute.KeyValue {
+ return MessagingMessageIDKey.String(val)
+}
+
+// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue
+// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+// conventions. It represents the rabbitMQ message routing key.
+func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue {
+ return MessagingRabbitmqDestinationRoutingKeyKey.String(val)
+}
+
+// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.client_group" semantic conventions. It represents
+// the name of the RocketMQ producer/consumer group that is handling the
+// message. The client type is identified by the SpanKind.
+func MessagingRocketmqClientGroup(val string) attribute.KeyValue {
+ return MessagingRocketmqClientGroupKey.String(val)
+}
+
+// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+// conventions. It represents the delay time level for delay message, which
+// determines the message delay time.
+func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue {
+ return MessagingRocketmqMessageDelayTimeLevelKey.Int(val)
+}
+
+// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
+// conventions. It represents the timestamp in milliseconds that the delay
+// message is expected to be delivered to consumer.
+func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue {
+ return MessagingRocketmqMessageDeliveryTimestampKey.Int(val)
+}
+
+// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.group" semantic conventions. It represents
+// the it is essential for FIFO message. Messages that belong to the same
+// message group are always processed one by one within the same consumer
+// group.
+func MessagingRocketmqMessageGroup(val string) attribute.KeyValue {
+ return MessagingRocketmqMessageGroupKey.String(val)
+}
+
+// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.keys" semantic conventions. It represents
+// the key(s) of message, another way to mark message besides message id.
+func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue {
+ return MessagingRocketmqMessageKeysKey.StringSlice(val)
+}
+
+// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.tag" semantic conventions. It represents the
+// secondary classifier of message besides topic.
+func MessagingRocketmqMessageTag(val string) attribute.KeyValue {
+ return MessagingRocketmqMessageTagKey.String(val)
+}
+
+// MessagingRocketmqNamespace returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.namespace" semantic conventions. It represents the
+// namespace of RocketMQ resources, resources in different namespaces are
+// individual.
+func MessagingRocketmqNamespace(val string) attribute.KeyValue {
+ return MessagingRocketmqNamespaceKey.String(val)
+}
+
+// These attributes may be used for any network related operation.
+const (
+ // NetworkCarrierIccKey is the attribute Key conforming to the
+ // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
+ // alpha-2 2-character country code associated with the mobile carrier
+ // network.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'DE'
+ NetworkCarrierIccKey = attribute.Key("network.carrier.icc")
+
+ // NetworkCarrierMccKey is the attribute Key conforming to the
+ // "network.carrier.mcc" semantic conventions. It represents the mobile
+ // carrier country code.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '310'
+ NetworkCarrierMccKey = attribute.Key("network.carrier.mcc")
+
+ // NetworkCarrierMncKey is the attribute Key conforming to the
+ // "network.carrier.mnc" semantic conventions. It represents the mobile
+ // carrier network code.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '001'
+ NetworkCarrierMncKey = attribute.Key("network.carrier.mnc")
+
+ // NetworkCarrierNameKey is the attribute Key conforming to the
+ // "network.carrier.name" semantic conventions. It represents the name of
+ // the mobile carrier.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'sprint'
+ NetworkCarrierNameKey = attribute.Key("network.carrier.name")
+
+ // NetworkConnectionSubtypeKey is the attribute Key conforming to the
+ // "network.connection.subtype" semantic conventions. It represents the
+ // this describes more details regarding the connection.type. It may be the
+ // type of cell technology connection, but it could be used for describing
+ // details about a wifi connection.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'LTE'
+ NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype")
+
+ // NetworkConnectionTypeKey is the attribute Key conforming to the
+ // "network.connection.type" semantic conventions. It represents the
+ // internet connection type.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'wifi'
+ NetworkConnectionTypeKey = attribute.Key("network.connection.type")
+
+ // NetworkIoDirectionKey is the attribute Key conforming to the
+ // "network.io.direction" semantic conventions. It represents the network
+ // IO operation direction.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'transmit'
+ NetworkIoDirectionKey = attribute.Key("network.io.direction")
+
+ // NetworkLocalAddressKey is the attribute Key conforming to the
+ // "network.local.address" semantic conventions. It represents the local
+ // address of the network connection - IP address or Unix domain socket
+ // name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '10.1.2.80', '/tmp/my.sock'
+ NetworkLocalAddressKey = attribute.Key("network.local.address")
+
+ // NetworkLocalPortKey is the attribute Key conforming to the
+ // "network.local.port" semantic conventions. It represents the local port
+ // number of the network connection.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 65123
+ NetworkLocalPortKey = attribute.Key("network.local.port")
+
+ // NetworkPeerAddressKey is the attribute Key conforming to the
+ // "network.peer.address" semantic conventions. It represents the peer
+ // address of the network connection - IP address or Unix domain socket
+ // name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '10.1.2.80', '/tmp/my.sock'
+ NetworkPeerAddressKey = attribute.Key("network.peer.address")
+
+ // NetworkPeerPortKey is the attribute Key conforming to the
+ // "network.peer.port" semantic conventions. It represents the peer port
+ // number of the network connection.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 65123
+ NetworkPeerPortKey = attribute.Key("network.peer.port")
+
+ // NetworkProtocolNameKey is the attribute Key conforming to the
+ // "network.protocol.name" semantic conventions. It represents the [OSI
+ // application layer](https://osi-model.com/application-layer/) or non-OSI
+ // equivalent.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'amqp', 'http', 'mqtt'
+ // Note: The value SHOULD be normalized to lowercase.
+ NetworkProtocolNameKey = attribute.Key("network.protocol.name")
+
+ // NetworkProtocolVersionKey is the attribute Key conforming to the
+ // "network.protocol.version" semantic conventions. It represents the
+ // version of the protocol specified in `network.protocol.name`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '3.1.1'
+ // Note: `network.protocol.version` refers to the version of the protocol
+ // used and might be different from the protocol client's version. If the
+ // HTTP client has a version of `0.27.2`, but sends HTTP version `1.1`,
+ // this attribute should be set to `1.1`.
+ NetworkProtocolVersionKey = attribute.Key("network.protocol.version")
+
+ // NetworkTransportKey is the attribute Key conforming to the
+ // "network.transport" semantic conventions. It represents the [OSI
+ // transport layer](https://osi-model.com/transport-layer/) or
+ // [inter-process communication
+ // method](https://wikipedia.org/wiki/Inter-process_communication).
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'tcp', 'udp'
+ // Note: The value SHOULD be normalized to lowercase.
+ //
+ // Consider always setting the transport when setting a port number, since
+ // a port number is ambiguous without knowing the transport. For example
+ // different processes could be listening on TCP port 12345 and UDP port
+ // 12345.
+ NetworkTransportKey = attribute.Key("network.transport")
+
+ // NetworkTypeKey is the attribute Key conforming to the "network.type"
+ // semantic conventions. It represents the [OSI network
+ // layer](https://osi-model.com/network-layer/) or non-OSI equivalent.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'ipv4', 'ipv6'
+ // Note: The value SHOULD be normalized to lowercase.
+ NetworkTypeKey = attribute.Key("network.type")
+)
+
+var (
+ // GPRS
+ NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs")
+ // EDGE
+ NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge")
+ // UMTS
+ NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts")
+ // CDMA
+ NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma")
+ // EVDO Rel. 0
+ NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0")
+ // EVDO Rev. A
+ NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a")
+ // CDMA2000 1XRTT
+ NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt")
+ // HSDPA
+ NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa")
+ // HSUPA
+ NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa")
+ // HSPA
+ NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa")
+ // IDEN
+ NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden")
+ // EVDO Rev. B
+ NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b")
+ // LTE
+ NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte")
+ // EHRPD
+ NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd")
+ // HSPAP
+ NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap")
+ // GSM
+ NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm")
+ // TD-SCDMA
+ NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma")
+ // IWLAN
+ NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan")
+ // 5G NR (New Radio)
+ NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr")
+ // 5G NRNSA (New Radio Non-Standalone)
+ NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa")
+ // LTE CA
+ NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca")
+)
+
+var (
+ // wifi
+ NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi")
+ // wired
+ NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired")
+ // cell
+ NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell")
+ // unavailable
+ NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable")
+ // unknown
+ NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown")
+)
+
+var (
+ // transmit
+ NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit")
+ // receive
+ NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive")
+)
+
+var (
+ // TCP
+ NetworkTransportTCP = NetworkTransportKey.String("tcp")
+ // UDP
+ NetworkTransportUDP = NetworkTransportKey.String("udp")
+ // Named or anonymous pipe
+ NetworkTransportPipe = NetworkTransportKey.String("pipe")
+ // Unix domain socket
+ NetworkTransportUnix = NetworkTransportKey.String("unix")
+)
+
+var (
+ // IPv4
+ NetworkTypeIpv4 = NetworkTypeKey.String("ipv4")
+ // IPv6
+ NetworkTypeIpv6 = NetworkTypeKey.String("ipv6")
+)
+
+// NetworkCarrierIcc returns an attribute KeyValue conforming to the
+// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
+// alpha-2 2-character country code associated with the mobile carrier network.
+func NetworkCarrierIcc(val string) attribute.KeyValue {
+ return NetworkCarrierIccKey.String(val)
+}
+
+// NetworkCarrierMcc returns an attribute KeyValue conforming to the
+// "network.carrier.mcc" semantic conventions. It represents the mobile carrier
+// country code.
+func NetworkCarrierMcc(val string) attribute.KeyValue {
+ return NetworkCarrierMccKey.String(val)
+}
+
+// NetworkCarrierMnc returns an attribute KeyValue conforming to the
+// "network.carrier.mnc" semantic conventions. It represents the mobile carrier
+// network code.
+func NetworkCarrierMnc(val string) attribute.KeyValue {
+ return NetworkCarrierMncKey.String(val)
+}
+
+// NetworkCarrierName returns an attribute KeyValue conforming to the
+// "network.carrier.name" semantic conventions. It represents the name of the
+// mobile carrier.
+func NetworkCarrierName(val string) attribute.KeyValue {
+ return NetworkCarrierNameKey.String(val)
+}
+
+// NetworkLocalAddress returns an attribute KeyValue conforming to the
+// "network.local.address" semantic conventions. It represents the local
+// address of the network connection - IP address or Unix domain socket name.
+func NetworkLocalAddress(val string) attribute.KeyValue {
+ return NetworkLocalAddressKey.String(val)
+}
+
+// NetworkLocalPort returns an attribute KeyValue conforming to the
+// "network.local.port" semantic conventions. It represents the local port
+// number of the network connection.
+func NetworkLocalPort(val int) attribute.KeyValue {
+ return NetworkLocalPortKey.Int(val)
+}
+
+// NetworkPeerAddress returns an attribute KeyValue conforming to the
+// "network.peer.address" semantic conventions. It represents the peer address
+// of the network connection - IP address or Unix domain socket name.
+func NetworkPeerAddress(val string) attribute.KeyValue {
+ return NetworkPeerAddressKey.String(val)
+}
+
+// NetworkPeerPort returns an attribute KeyValue conforming to the
+// "network.peer.port" semantic conventions. It represents the peer port number
+// of the network connection.
+func NetworkPeerPort(val int) attribute.KeyValue {
+ return NetworkPeerPortKey.Int(val)
+}
+
+// NetworkProtocolName returns an attribute KeyValue conforming to the
+// "network.protocol.name" semantic conventions. It represents the [OSI
+// application layer](https://osi-model.com/application-layer/) or non-OSI
+// equivalent.
+func NetworkProtocolName(val string) attribute.KeyValue {
+ return NetworkProtocolNameKey.String(val)
+}
+
+// NetworkProtocolVersion returns an attribute KeyValue conforming to the
+// "network.protocol.version" semantic conventions. It represents the version
+// of the protocol specified in `network.protocol.name`.
+func NetworkProtocolVersion(val string) attribute.KeyValue {
+ return NetworkProtocolVersionKey.String(val)
+}
+
+// Attributes for remote procedure calls.
+const (
+ // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the
+ // "rpc.connect_rpc.error_code" semantic conventions. It represents the
+ // [error codes](https://connect.build/docs/protocol/#error-codes) of the
+ // Connect request. Error codes are always string values.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code")
+
+ // RPCGRPCStatusCodeKey is the attribute Key conforming to the
+ // "rpc.grpc.status_code" semantic conventions. It represents the [numeric
+ // status
+ // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of
+ // the gRPC request.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
+
+ // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_code" semantic conventions. It represents the
+ // `error.code` property of response if it is an error response.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: -32700, 100
+ RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
+
+ // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_message" semantic conventions. It represents the
+ // `error.message` property of response if it is an error response.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Parse error', 'User already exists'
+ RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
+
+ // RPCJsonrpcRequestIDKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+ // property of request or response. Since protocol allows id to be int,
+ // string, `null` or missing (for notifications), value is expected to be
+ // cast to string for simplicity. Use empty string in case of `null` value.
+ // Omit entirely if this is a notification.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '10', 'request-7', ''
+ RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
+
+ // RPCJsonrpcVersionKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+ // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+ // doesn't specify this, the value can be omitted.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2.0', '1.0'
+ RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
+
+ // RPCMethodKey is the attribute Key conforming to the "rpc.method"
+ // semantic conventions. It represents the name of the (logical) method
+ // being called, must be equal to the $method part in the span name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'exampleMethod'
+ // Note: This is the logical name of the method from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // method/function. The `code.function` attribute may be used to store the
+ // latter (e.g., method actually executing the call on the server side, RPC
+ // client stub method on the client side).
+ RPCMethodKey = attribute.Key("rpc.method")
+
+ // RPCServiceKey is the attribute Key conforming to the "rpc.service"
+ // semantic conventions. It represents the full (logical) name of the
+ // service being called, including its package name, if applicable.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myservice.EchoService'
+ // Note: This is the logical name of the service from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // class. The `code.namespace` attribute may be used to store the latter
+ // (despite the attribute name, it may include a class name; e.g., class
+ // with method actually executing the call on the server side, RPC client
+ // stub class on the client side).
+ RPCServiceKey = attribute.Key("rpc.service")
+
+ // RPCSystemKey is the attribute Key conforming to the "rpc.system"
+ // semantic conventions. It represents a string identifying the remoting
+ // system. See below for a list of well-known identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ RPCSystemKey = attribute.Key("rpc.system")
+)
+
+var (
+ // cancelled
+ RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled")
+ // unknown
+ RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown")
+ // invalid_argument
+ RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument")
+ // deadline_exceeded
+ RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded")
+ // not_found
+ RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found")
+ // already_exists
+ RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists")
+ // permission_denied
+ RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied")
+ // resource_exhausted
+ RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted")
+ // failed_precondition
+ RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition")
+ // aborted
+ RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted")
+ // out_of_range
+ RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range")
+ // unimplemented
+ RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented")
+ // internal
+ RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal")
+ // unavailable
+ RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable")
+ // data_loss
+ RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss")
+ // unauthenticated
+ RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated")
+)
+
+var (
+ // OK
+ RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
+ // CANCELLED
+ RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
+ // UNKNOWN
+ RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
+ // INVALID_ARGUMENT
+ RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
+ // DEADLINE_EXCEEDED
+ RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
+ // NOT_FOUND
+ RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
+ // ALREADY_EXISTS
+ RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
+ // PERMISSION_DENIED
+ RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
+ // RESOURCE_EXHAUSTED
+ RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
+ // FAILED_PRECONDITION
+ RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
+ // ABORTED
+ RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
+ // OUT_OF_RANGE
+ RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
+ // UNIMPLEMENTED
+ RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
+ // INTERNAL
+ RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
+ // UNAVAILABLE
+ RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
+ // DATA_LOSS
+ RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
+ // UNAUTHENTICATED
+ RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
+)
+
+var (
+ // gRPC
+ RPCSystemGRPC = RPCSystemKey.String("grpc")
+ // Java RMI
+ RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
+ // .NET WCF
+ RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
+ // Apache Dubbo
+ RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
+ // Connect RPC
+ RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc")
+)
+
+// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_code" semantic conventions. It represents the
+// `error.code` property of response if it is an error response.
+func RPCJsonrpcErrorCode(val int) attribute.KeyValue {
+ return RPCJsonrpcErrorCodeKey.Int(val)
+}
+
+// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_message" semantic conventions. It represents the
+// `error.message` property of response if it is an error response.
+func RPCJsonrpcErrorMessage(val string) attribute.KeyValue {
+ return RPCJsonrpcErrorMessageKey.String(val)
+}
+
+// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+// property of request or response. Since protocol allows id to be int, string,
+// `null` or missing (for notifications), value is expected to be cast to
+// string for simplicity. Use empty string in case of `null` value. Omit
+// entirely if this is a notification.
+func RPCJsonrpcRequestID(val string) attribute.KeyValue {
+ return RPCJsonrpcRequestIDKey.String(val)
+}
+
+// RPCJsonrpcVersion returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+// doesn't specify this, the value can be omitted.
+func RPCJsonrpcVersion(val string) attribute.KeyValue {
+ return RPCJsonrpcVersionKey.String(val)
+}
+
+// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
+// semantic conventions. It represents the name of the (logical) method being
+// called, must be equal to the $method part in the span name.
+func RPCMethod(val string) attribute.KeyValue {
+ return RPCMethodKey.String(val)
+}
+
+// RPCService returns an attribute KeyValue conforming to the "rpc.service"
+// semantic conventions. It represents the full (logical) name of the service
+// being called, including its package name, if applicable.
+func RPCService(val string) attribute.KeyValue {
+ return RPCServiceKey.String(val)
+}
+
+// These attributes may be used to describe the server in a connection-based
+// network interaction where there is one side that initiates the connection
+// (the client is the side that initiates the connection). This covers all TCP
+// network interactions since TCP is connection-based and one side initiates
+// the connection (an exception is made for peer-to-peer communication over TCP
+// where the "user-facing" surface of the protocol / API doesn't expose a clear
+// notion of client and server). This also covers UDP network interactions
+// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS.
+const (
+ // ServerAddressKey is the attribute Key conforming to the "server.address"
+ // semantic conventions. It represents the server domain name if available
+ // without reverse DNS lookup; otherwise, IP address or Unix domain socket
+ // name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock'
+ // Note: When observed from the client side, and when communicating through
+ // an intermediary, `server.address` SHOULD represent the server address
+ // behind any intermediaries, for example proxies, if it's available.
+ ServerAddressKey = attribute.Key("server.address")
+
+ // ServerPortKey is the attribute Key conforming to the "server.port"
+ // semantic conventions. It represents the server port number.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 80, 8080, 443
+ // Note: When observed from the client side, and when communicating through
+ // an intermediary, `server.port` SHOULD represent the server port behind
+ // any intermediaries, for example proxies, if it's available.
+ ServerPortKey = attribute.Key("server.port")
+)
+
+// ServerAddress returns an attribute KeyValue conforming to the
+// "server.address" semantic conventions. It represents the server domain name
+// if available without reverse DNS lookup; otherwise, IP address or Unix
+// domain socket name.
+func ServerAddress(val string) attribute.KeyValue {
+ return ServerAddressKey.String(val)
+}
+
+// ServerPort returns an attribute KeyValue conforming to the "server.port"
+// semantic conventions. It represents the server port number.
+func ServerPort(val int) attribute.KeyValue {
+ return ServerPortKey.Int(val)
+}
+
+// These attributes may be used to describe the sender of a network
+// exchange/packet. These should be used when there is no client/server
+// relationship between the two sides, or when that relationship is unknown.
+// This covers low-level network interactions (e.g. packet tracing) where you
+// don't know if there was a connection or which side initiated it. This also
+// covers unidirectional UDP flows and peer-to-peer communication where the
+// "user-facing" surface of the protocol / API doesn't expose a clear notion of
+// client and server.
+const (
+ // SourceAddressKey is the attribute Key conforming to the "source.address"
+ // semantic conventions. It represents the source address - domain name if
+ // available without reverse DNS lookup; otherwise, IP address or Unix
+ // domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock'
+ // Note: When observed from the destination side, and when communicating
+ // through an intermediary, `source.address` SHOULD represent the source
+ // address behind any intermediaries, for example proxies, if it's
+ // available.
+ SourceAddressKey = attribute.Key("source.address")
+
+ // SourcePortKey is the attribute Key conforming to the "source.port"
+ // semantic conventions. It represents the source port number
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3389, 2888
+ SourcePortKey = attribute.Key("source.port")
+)
+
+// SourceAddress returns an attribute KeyValue conforming to the
+// "source.address" semantic conventions. It represents the source address -
+// domain name if available without reverse DNS lookup; otherwise, IP address
+// or Unix domain socket name.
+func SourceAddress(val string) attribute.KeyValue {
+ return SourceAddressKey.String(val)
+}
+
+// SourcePort returns an attribute KeyValue conforming to the "source.port"
+// semantic conventions. It represents the source port number
+func SourcePort(val int) attribute.KeyValue {
+ return SourcePortKey.Int(val)
+}
+
+// Semantic convention attributes in the TLS namespace.
+const (
+ // TLSCipherKey is the attribute Key conforming to the "tls.cipher"
+ // semantic conventions. It represents the string indicating the
+ // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5)
+ // used during the current connection.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA',
+ // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256'
+ // Note: The values allowed for `tls.cipher` MUST be one of the
+ // `Descriptions` of the [registered TLS Cipher
+ // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4).
+ TLSCipherKey = attribute.Key("tls.cipher")
+
+ // TLSClientCertificateKey is the attribute Key conforming to the
+ // "tls.client.certificate" semantic conventions. It represents the
+ // pEM-encoded stand-alone certificate offered by the client. This is
+ // usually mutually-exclusive of `client.certificate_chain` since this
+ // value also exists in that list.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MII...'
+ TLSClientCertificateKey = attribute.Key("tls.client.certificate")
+
+ // TLSClientCertificateChainKey is the attribute Key conforming to the
+ // "tls.client.certificate_chain" semantic conventions. It represents the
+ // array of PEM-encoded certificates that make up the certificate chain
+ // offered by the client. This is usually mutually-exclusive of
+ // `client.certificate` since that value should be the first certificate in
+ // the chain.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MII...', 'MI...'
+ TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain")
+
+ // TLSClientHashMd5Key is the attribute Key conforming to the
+ // "tls.client.hash.md5" semantic conventions. It represents the
+ // certificate fingerprint using the MD5 digest of DER-encoded version of
+ // certificate offered by the client. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC'
+ TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5")
+
+ // TLSClientHashSha1Key is the attribute Key conforming to the
+ // "tls.client.hash.sha1" semantic conventions. It represents the
+ // certificate fingerprint using the SHA1 digest of DER-encoded version of
+ // certificate offered by the client. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A'
+ TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1")
+
+ // TLSClientHashSha256Key is the attribute Key conforming to the
+ // "tls.client.hash.sha256" semantic conventions. It represents the
+ // certificate fingerprint using the SHA256 digest of DER-encoded version
+ // of certificate offered by the client. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0'
+ TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256")
+
+ // TLSClientIssuerKey is the attribute Key conforming to the
+ // "tls.client.issuer" semantic conventions. It represents the
+ // distinguished name of
+ // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6)
+ // of the issuer of the x.509 certificate presented by the client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example,
+ // DC=com'
+ TLSClientIssuerKey = attribute.Key("tls.client.issuer")
+
+ // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3"
+ // semantic conventions. It represents a hash that identifies clients based
+ // on how they perform an SSL/TLS handshake.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'd4e5b18d6b55c71272893221c96ba240'
+ TLSClientJa3Key = attribute.Key("tls.client.ja3")
+
+ // TLSClientNotAfterKey is the attribute Key conforming to the
+ // "tls.client.not_after" semantic conventions. It represents the date/Time
+ // indicating when client certificate is no longer considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2021-01-01T00:00:00.000Z'
+ TLSClientNotAfterKey = attribute.Key("tls.client.not_after")
+
+ // TLSClientNotBeforeKey is the attribute Key conforming to the
+ // "tls.client.not_before" semantic conventions. It represents the
+ // date/Time indicating when client certificate is first considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1970-01-01T00:00:00.000Z'
+ TLSClientNotBeforeKey = attribute.Key("tls.client.not_before")
+
+ // TLSClientServerNameKey is the attribute Key conforming to the
+ // "tls.client.server_name" semantic conventions. It represents the also
+ // called an SNI, this tells the server which hostname to which the client
+ // is attempting to connect to.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry.io'
+ TLSClientServerNameKey = attribute.Key("tls.client.server_name")
+
+ // TLSClientSubjectKey is the attribute Key conforming to the
+ // "tls.client.subject" semantic conventions. It represents the
+ // distinguished name of subject of the x.509 certificate presented by the
+ // client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com'
+ TLSClientSubjectKey = attribute.Key("tls.client.subject")
+
+ // TLSClientSupportedCiphersKey is the attribute Key conforming to the
+ // "tls.client.supported_ciphers" semantic conventions. It represents the
+ // array of ciphers offered by the client during the client hello.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
+ // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."'
+ TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers")
+
+ // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic
+ // conventions. It represents the string indicating the curve used for the
+ // given cipher, when applicable
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'secp256r1'
+ TLSCurveKey = attribute.Key("tls.curve")
+
+ // TLSEstablishedKey is the attribute Key conforming to the
+ // "tls.established" semantic conventions. It represents the boolean flag
+ // indicating if the TLS negotiation was successful and transitioned to an
+ // encrypted tunnel.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: True
+ TLSEstablishedKey = attribute.Key("tls.established")
+
+ // TLSNextProtocolKey is the attribute Key conforming to the
+ // "tls.next_protocol" semantic conventions. It represents the string
+ // indicating the protocol being tunneled. Per the values in the [IANA
+ // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids),
+ // this string should be lower case.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'http/1.1'
+ TLSNextProtocolKey = attribute.Key("tls.next_protocol")
+
+ // TLSProtocolNameKey is the attribute Key conforming to the
+ // "tls.protocol.name" semantic conventions. It represents the normalized
+ // lowercase protocol name parsed from original string of the negotiated
+ // [SSL/TLS protocol
+ // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ TLSProtocolNameKey = attribute.Key("tls.protocol.name")
+
+ // TLSProtocolVersionKey is the attribute Key conforming to the
+ // "tls.protocol.version" semantic conventions. It represents the numeric
+ // part of the version parsed from the original string of the negotiated
+ // [SSL/TLS protocol
+ // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1.2', '3'
+ TLSProtocolVersionKey = attribute.Key("tls.protocol.version")
+
+ // TLSResumedKey is the attribute Key conforming to the "tls.resumed"
+ // semantic conventions. It represents the boolean flag indicating if this
+ // TLS connection was resumed from an existing TLS negotiation.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: True
+ TLSResumedKey = attribute.Key("tls.resumed")
+
+ // TLSServerCertificateKey is the attribute Key conforming to the
+ // "tls.server.certificate" semantic conventions. It represents the
+ // pEM-encoded stand-alone certificate offered by the server. This is
+ // usually mutually-exclusive of `server.certificate_chain` since this
+ // value also exists in that list.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MII...'
+ TLSServerCertificateKey = attribute.Key("tls.server.certificate")
+
+ // TLSServerCertificateChainKey is the attribute Key conforming to the
+ // "tls.server.certificate_chain" semantic conventions. It represents the
+ // array of PEM-encoded certificates that make up the certificate chain
+ // offered by the server. This is usually mutually-exclusive of
+ // `server.certificate` since that value should be the first certificate in
+ // the chain.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MII...', 'MI...'
+ TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain")
+
+ // TLSServerHashMd5Key is the attribute Key conforming to the
+ // "tls.server.hash.md5" semantic conventions. It represents the
+ // certificate fingerprint using the MD5 digest of DER-encoded version of
+ // certificate offered by the server. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC'
+ TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5")
+
+ // TLSServerHashSha1Key is the attribute Key conforming to the
+ // "tls.server.hash.sha1" semantic conventions. It represents the
+ // certificate fingerprint using the SHA1 digest of DER-encoded version of
+ // certificate offered by the server. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A'
+ TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1")
+
+ // TLSServerHashSha256Key is the attribute Key conforming to the
+ // "tls.server.hash.sha256" semantic conventions. It represents the
+ // certificate fingerprint using the SHA256 digest of DER-encoded version
+ // of certificate offered by the server. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0'
+ TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256")
+
+ // TLSServerIssuerKey is the attribute Key conforming to the
+ // "tls.server.issuer" semantic conventions. It represents the
+ // distinguished name of
+ // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6)
+ // of the issuer of the x.509 certificate presented by the client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example,
+ // DC=com'
+ TLSServerIssuerKey = attribute.Key("tls.server.issuer")
+
+ // TLSServerJa3sKey is the attribute Key conforming to the
+ // "tls.server.ja3s" semantic conventions. It represents a hash that
+ // identifies servers based on how they perform an SSL/TLS handshake.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'd4e5b18d6b55c71272893221c96ba240'
+ TLSServerJa3sKey = attribute.Key("tls.server.ja3s")
+
+ // TLSServerNotAfterKey is the attribute Key conforming to the
+ // "tls.server.not_after" semantic conventions. It represents the date/Time
+ // indicating when server certificate is no longer considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2021-01-01T00:00:00.000Z'
+ TLSServerNotAfterKey = attribute.Key("tls.server.not_after")
+
+ // TLSServerNotBeforeKey is the attribute Key conforming to the
+ // "tls.server.not_before" semantic conventions. It represents the
+ // date/Time indicating when server certificate is first considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1970-01-01T00:00:00.000Z'
+ TLSServerNotBeforeKey = attribute.Key("tls.server.not_before")
+
+ // TLSServerSubjectKey is the attribute Key conforming to the
+ // "tls.server.subject" semantic conventions. It represents the
+ // distinguished name of subject of the x.509 certificate presented by the
+ // server.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com'
+ TLSServerSubjectKey = attribute.Key("tls.server.subject")
+)
+
+var (
+ // ssl
+ TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl")
+ // tls
+ TLSProtocolNameTLS = TLSProtocolNameKey.String("tls")
+)
+
+// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher"
+// semantic conventions. It represents the string indicating the
+// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used
+// during the current connection.
+func TLSCipher(val string) attribute.KeyValue {
+ return TLSCipherKey.String(val)
+}
+
+// TLSClientCertificate returns an attribute KeyValue conforming to the
+// "tls.client.certificate" semantic conventions. It represents the pEM-encoded
+// stand-alone certificate offered by the client. This is usually
+// mutually-exclusive of `client.certificate_chain` since this value also
+// exists in that list.
+func TLSClientCertificate(val string) attribute.KeyValue {
+ return TLSClientCertificateKey.String(val)
+}
+
+// TLSClientCertificateChain returns an attribute KeyValue conforming to the
+// "tls.client.certificate_chain" semantic conventions. It represents the array
+// of PEM-encoded certificates that make up the certificate chain offered by
+// the client. This is usually mutually-exclusive of `client.certificate` since
+// that value should be the first certificate in the chain.
+func TLSClientCertificateChain(val ...string) attribute.KeyValue {
+ return TLSClientCertificateChainKey.StringSlice(val)
+}
+
+// TLSClientHashMd5 returns an attribute KeyValue conforming to the
+// "tls.client.hash.md5" semantic conventions. It represents the certificate
+// fingerprint using the MD5 digest of DER-encoded version of certificate
+// offered by the client. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSClientHashMd5(val string) attribute.KeyValue {
+ return TLSClientHashMd5Key.String(val)
+}
+
+// TLSClientHashSha1 returns an attribute KeyValue conforming to the
+// "tls.client.hash.sha1" semantic conventions. It represents the certificate
+// fingerprint using the SHA1 digest of DER-encoded version of certificate
+// offered by the client. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSClientHashSha1(val string) attribute.KeyValue {
+ return TLSClientHashSha1Key.String(val)
+}
+
+// TLSClientHashSha256 returns an attribute KeyValue conforming to the
+// "tls.client.hash.sha256" semantic conventions. It represents the certificate
+// fingerprint using the SHA256 digest of DER-encoded version of certificate
+// offered by the client. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSClientHashSha256(val string) attribute.KeyValue {
+ return TLSClientHashSha256Key.String(val)
+}
+
+// TLSClientIssuer returns an attribute KeyValue conforming to the
+// "tls.client.issuer" semantic conventions. It represents the distinguished
+// name of
+// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of
+// the issuer of the x.509 certificate presented by the client.
+func TLSClientIssuer(val string) attribute.KeyValue {
+ return TLSClientIssuerKey.String(val)
+}
+
+// TLSClientJa3 returns an attribute KeyValue conforming to the
+// "tls.client.ja3" semantic conventions. It represents a hash that identifies
+// clients based on how they perform an SSL/TLS handshake.
+func TLSClientJa3(val string) attribute.KeyValue {
+ return TLSClientJa3Key.String(val)
+}
+
+// TLSClientNotAfter returns an attribute KeyValue conforming to the
+// "tls.client.not_after" semantic conventions. It represents the date/Time
+// indicating when client certificate is no longer considered valid.
+func TLSClientNotAfter(val string) attribute.KeyValue {
+ return TLSClientNotAfterKey.String(val)
+}
+
+// TLSClientNotBefore returns an attribute KeyValue conforming to the
+// "tls.client.not_before" semantic conventions. It represents the date/Time
+// indicating when client certificate is first considered valid.
+func TLSClientNotBefore(val string) attribute.KeyValue {
+ return TLSClientNotBeforeKey.String(val)
+}
+
+// TLSClientServerName returns an attribute KeyValue conforming to the
+// "tls.client.server_name" semantic conventions. It represents the also called
+// an SNI, this tells the server which hostname to which the client is
+// attempting to connect to.
+func TLSClientServerName(val string) attribute.KeyValue {
+ return TLSClientServerNameKey.String(val)
+}
+
+// TLSClientSubject returns an attribute KeyValue conforming to the
+// "tls.client.subject" semantic conventions. It represents the distinguished
+// name of subject of the x.509 certificate presented by the client.
+func TLSClientSubject(val string) attribute.KeyValue {
+ return TLSClientSubjectKey.String(val)
+}
+
+// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the
+// "tls.client.supported_ciphers" semantic conventions. It represents the array
+// of ciphers offered by the client during the client hello.
+func TLSClientSupportedCiphers(val ...string) attribute.KeyValue {
+ return TLSClientSupportedCiphersKey.StringSlice(val)
+}
+
+// TLSCurve returns an attribute KeyValue conforming to the "tls.curve"
+// semantic conventions. It represents the string indicating the curve used for
+// the given cipher, when applicable
+func TLSCurve(val string) attribute.KeyValue {
+ return TLSCurveKey.String(val)
+}
+
+// TLSEstablished returns an attribute KeyValue conforming to the
+// "tls.established" semantic conventions. It represents the boolean flag
+// indicating if the TLS negotiation was successful and transitioned to an
+// encrypted tunnel.
+func TLSEstablished(val bool) attribute.KeyValue {
+ return TLSEstablishedKey.Bool(val)
+}
+
+// TLSNextProtocol returns an attribute KeyValue conforming to the
+// "tls.next_protocol" semantic conventions. It represents the string
+// indicating the protocol being tunneled. Per the values in the [IANA
+// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids),
+// this string should be lower case.
+func TLSNextProtocol(val string) attribute.KeyValue {
+ return TLSNextProtocolKey.String(val)
+}
+
+// TLSProtocolVersion returns an attribute KeyValue conforming to the
+// "tls.protocol.version" semantic conventions. It represents the numeric part
+// of the version parsed from the original string of the negotiated [SSL/TLS
+// protocol
+// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
+func TLSProtocolVersion(val string) attribute.KeyValue {
+ return TLSProtocolVersionKey.String(val)
+}
+
+// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed"
+// semantic conventions. It represents the boolean flag indicating if this TLS
+// connection was resumed from an existing TLS negotiation.
+func TLSResumed(val bool) attribute.KeyValue {
+ return TLSResumedKey.Bool(val)
+}
+
+// TLSServerCertificate returns an attribute KeyValue conforming to the
+// "tls.server.certificate" semantic conventions. It represents the pEM-encoded
+// stand-alone certificate offered by the server. This is usually
+// mutually-exclusive of `server.certificate_chain` since this value also
+// exists in that list.
+func TLSServerCertificate(val string) attribute.KeyValue {
+ return TLSServerCertificateKey.String(val)
+}
+
+// TLSServerCertificateChain returns an attribute KeyValue conforming to the
+// "tls.server.certificate_chain" semantic conventions. It represents the array
+// of PEM-encoded certificates that make up the certificate chain offered by
+// the server. This is usually mutually-exclusive of `server.certificate` since
+// that value should be the first certificate in the chain.
+func TLSServerCertificateChain(val ...string) attribute.KeyValue {
+ return TLSServerCertificateChainKey.StringSlice(val)
+}
+
+// TLSServerHashMd5 returns an attribute KeyValue conforming to the
+// "tls.server.hash.md5" semantic conventions. It represents the certificate
+// fingerprint using the MD5 digest of DER-encoded version of certificate
+// offered by the server. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSServerHashMd5(val string) attribute.KeyValue {
+ return TLSServerHashMd5Key.String(val)
+}
+
+// TLSServerHashSha1 returns an attribute KeyValue conforming to the
+// "tls.server.hash.sha1" semantic conventions. It represents the certificate
+// fingerprint using the SHA1 digest of DER-encoded version of certificate
+// offered by the server. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSServerHashSha1(val string) attribute.KeyValue {
+ return TLSServerHashSha1Key.String(val)
+}
+
+// TLSServerHashSha256 returns an attribute KeyValue conforming to the
+// "tls.server.hash.sha256" semantic conventions. It represents the certificate
+// fingerprint using the SHA256 digest of DER-encoded version of certificate
+// offered by the server. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSServerHashSha256(val string) attribute.KeyValue {
+ return TLSServerHashSha256Key.String(val)
+}
+
+// TLSServerIssuer returns an attribute KeyValue conforming to the
+// "tls.server.issuer" semantic conventions. It represents the distinguished
+// name of
+// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of
+// the issuer of the x.509 certificate presented by the client.
+func TLSServerIssuer(val string) attribute.KeyValue {
+ return TLSServerIssuerKey.String(val)
+}
+
+// TLSServerJa3s returns an attribute KeyValue conforming to the
+// "tls.server.ja3s" semantic conventions. It represents a hash that identifies
+// servers based on how they perform an SSL/TLS handshake.
+func TLSServerJa3s(val string) attribute.KeyValue {
+ return TLSServerJa3sKey.String(val)
+}
+
+// TLSServerNotAfter returns an attribute KeyValue conforming to the
+// "tls.server.not_after" semantic conventions. It represents the date/Time
+// indicating when server certificate is no longer considered valid.
+func TLSServerNotAfter(val string) attribute.KeyValue {
+ return TLSServerNotAfterKey.String(val)
+}
+
+// TLSServerNotBefore returns an attribute KeyValue conforming to the
+// "tls.server.not_before" semantic conventions. It represents the date/Time
+// indicating when server certificate is first considered valid.
+func TLSServerNotBefore(val string) attribute.KeyValue {
+ return TLSServerNotBeforeKey.String(val)
+}
+
+// TLSServerSubject returns an attribute KeyValue conforming to the
+// "tls.server.subject" semantic conventions. It represents the distinguished
+// name of subject of the x.509 certificate presented by the server.
+func TLSServerSubject(val string) attribute.KeyValue {
+ return TLSServerSubjectKey.String(val)
+}
+
+// Attributes describing URL.
+const (
+ // URLFragmentKey is the attribute Key conforming to the "url.fragment"
+ // semantic conventions. It represents the [URI
+ // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'SemConv'
+ URLFragmentKey = attribute.Key("url.fragment")
+
+ // URLFullKey is the attribute Key conforming to the "url.full" semantic
+ // conventions. It represents the absolute URL describing a network
+ // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv',
+ // '//localhost'
+ // Note: For network calls, URL usually has
+ // `scheme://host[:port][path][?query][#fragment]` format, where the
+ // fragment is not transmitted over HTTP, but if it is known, it SHOULD be
+ // included nevertheless.
+ // `url.full` MUST NOT contain credentials passed via URL in form of
+ // `https://username:password@www.example.com/`. In such case username and
+ // password SHOULD be redacted and attribute's value SHOULD be
+ // `https://REDACTED:REDACTED@www.example.com/`.
+ // `url.full` SHOULD capture the absolute URL when it is available (or can
+ // be reconstructed) and SHOULD NOT be validated or modified except for
+ // sanitizing purposes.
+ URLFullKey = attribute.Key("url.full")
+
+ // URLPathKey is the attribute Key conforming to the "url.path" semantic
+ // conventions. It represents the [URI
+ // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/search'
+ URLPathKey = attribute.Key("url.path")
+
+ // URLQueryKey is the attribute Key conforming to the "url.query" semantic
+ // conventions. It represents the [URI
+ // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'q=OpenTelemetry'
+ // Note: Sensitive content provided in query string SHOULD be scrubbed when
+ // instrumentations can identify it.
+ URLQueryKey = attribute.Key("url.query")
+
+ // URLSchemeKey is the attribute Key conforming to the "url.scheme"
+ // semantic conventions. It represents the [URI
+ // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
+ // identifying the used protocol.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'https', 'ftp', 'telnet'
+ URLSchemeKey = attribute.Key("url.scheme")
+)
+
+// URLFragment returns an attribute KeyValue conforming to the
+// "url.fragment" semantic conventions. It represents the [URI
+// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
+func URLFragment(val string) attribute.KeyValue {
+ return URLFragmentKey.String(val)
+}
+
+// URLFull returns an attribute KeyValue conforming to the "url.full"
+// semantic conventions. It represents the absolute URL describing a network
+// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
+func URLFull(val string) attribute.KeyValue {
+ return URLFullKey.String(val)
+}
+
+// URLPath returns an attribute KeyValue conforming to the "url.path"
+// semantic conventions. It represents the [URI
+// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
+func URLPath(val string) attribute.KeyValue {
+ return URLPathKey.String(val)
+}
+
+// URLQuery returns an attribute KeyValue conforming to the "url.query"
+// semantic conventions. It represents the [URI
+// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
+func URLQuery(val string) attribute.KeyValue {
+ return URLQueryKey.String(val)
+}
+
+// URLScheme returns an attribute KeyValue conforming to the "url.scheme"
+// semantic conventions. It represents the [URI
+// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
+// identifying the used protocol.
+func URLScheme(val string) attribute.KeyValue {
+ return URLSchemeKey.String(val)
+}
+
+// Describes user-agent attributes.
+const (
+ // UserAgentOriginalKey is the attribute Key conforming to the
+ // "user_agent.original" semantic conventions. It represents the value of
+ // the [HTTP
+ // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+ // header sent by the client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU
+ // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko)
+ // Version/14.1.2 Mobile/15E148 Safari/604.1'
+ UserAgentOriginalKey = attribute.Key("user_agent.original")
+)
+
+// UserAgentOriginal returns an attribute KeyValue conforming to the
+// "user_agent.original" semantic conventions. It represents the value of the
+// [HTTP
+// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+// header sent by the client.
+func UserAgentOriginal(val string) attribute.KeyValue {
+ return UserAgentOriginalKey.String(val)
+}
+
+// Session is defined as the period of time encompassing all activities
+// performed by the application and the actions executed by the end user.
+// Consequently, a Session is represented as a collection of Logs, Events, and
+// Spans emitted by the Client Application throughout the Session's duration.
+// Each Session is assigned a unique identifier, which is included as an
+// attribute in the Logs, Events, and Spans generated during the Session's
+// lifecycle.
+// When a session reaches end of life, typically due to user inactivity or
+// session timeout, a new session identifier will be assigned. The previous
+// session identifier may be provided by the instrumentation so that telemetry
+// backends can link the two sessions.
+const (
+ // SessionIDKey is the attribute Key conforming to the "session.id"
+ // semantic conventions. It represents a unique id to identify a session.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '00112233-4455-6677-8899-aabbccddeeff'
+ SessionIDKey = attribute.Key("session.id")
+
+ // SessionPreviousIDKey is the attribute Key conforming to the
+ // "session.previous_id" semantic conventions. It represents the previous
+ // `session.id` for this user, when known.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '00112233-4455-6677-8899-aabbccddeeff'
+ SessionPreviousIDKey = attribute.Key("session.previous_id")
+)
+
+// SessionID returns an attribute KeyValue conforming to the "session.id"
+// semantic conventions. It represents a unique id to identify a session.
+func SessionID(val string) attribute.KeyValue {
+ return SessionIDKey.String(val)
+}
+
+// SessionPreviousID returns an attribute KeyValue conforming to the
+// "session.previous_id" semantic conventions. It represents the previous
+// `session.id` for this user, when known.
+func SessionPreviousID(val string) attribute.KeyValue {
+ return SessionPreviousIDKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go
new file mode 100644
index 000000000..9b802db27
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package semconv implements OpenTelemetry semantic conventions.
+//
+// OpenTelemetry semantic conventions are agreed standardized naming
+// patterns for OpenTelemetry things. This package represents the v1.24.0
+// version of the OpenTelemetry semantic conventions.
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go
new file mode 100644
index 000000000..cd3c71629
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go
@@ -0,0 +1,211 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// This event represents an occurrence of a lifecycle transition on the iOS
+// platform.
+const (
+ // IosStateKey is the attribute Key conforming to the "ios.state" semantic
+ // conventions. It represents the this attribute represents the state the
+ // application has transitioned into at the occurrence of the event.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Note: The iOS lifecycle states are defined in the [UIApplicationDelegate
+ // documentation](https://developer.apple.com/documentation/uikit/uiapplicationdelegate#1656902),
+ // and from which the `OS terminology` column values are derived.
+ IosStateKey = attribute.Key("ios.state")
+)
+
+var (
+ // The app has become `active`. Associated with UIKit notification `applicationDidBecomeActive`
+ IosStateActive = IosStateKey.String("active")
+ // The app is now `inactive`. Associated with UIKit notification `applicationWillResignActive`
+ IosStateInactive = IosStateKey.String("inactive")
+ // The app is now in the background. This value is associated with UIKit notification `applicationDidEnterBackground`
+ IosStateBackground = IosStateKey.String("background")
+ // The app is now in the foreground. This value is associated with UIKit notification `applicationWillEnterForeground`
+ IosStateForeground = IosStateKey.String("foreground")
+ // The app is about to terminate. Associated with UIKit notification `applicationWillTerminate`
+ IosStateTerminate = IosStateKey.String("terminate")
+)
+
+// This event represents an occurrence of a lifecycle transition on the Android
+// platform.
+const (
+ // AndroidStateKey is the attribute Key conforming to the "android.state"
+ // semantic conventions. It represents the this attribute represents the
+ // state the application has transitioned into at the occurrence of the
+ // event.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Note: The Android lifecycle states are defined in [Activity lifecycle
+ // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc),
+ // and from which the `OS identifiers` are derived.
+ AndroidStateKey = attribute.Key("android.state")
+)
+
+var (
+ // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time
+ AndroidStateCreated = AndroidStateKey.String("created")
+ // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state
+ AndroidStateBackground = AndroidStateKey.String("background")
+ // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states
+ AndroidStateForeground = AndroidStateKey.String("foreground")
+)
+
+// This semantic convention defines the attributes used to represent a feature
+// flag evaluation as an event.
+const (
+ // FeatureFlagKeyKey is the attribute Key conforming to the
+ // "feature_flag.key" semantic conventions. It represents the unique
+ // identifier of the feature flag.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: 'logo-color'
+ FeatureFlagKeyKey = attribute.Key("feature_flag.key")
+
+ // FeatureFlagProviderNameKey is the attribute Key conforming to the
+ // "feature_flag.provider_name" semantic conventions. It represents the
+ // name of the service provider that performs the flag evaluation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: experimental
+ // Examples: 'Flag Manager'
+ FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
+
+ // FeatureFlagVariantKey is the attribute Key conforming to the
+ // "feature_flag.variant" semantic conventions. It represents the sHOULD be
+ // a semantic identifier for a value. If one is unavailable, a stringified
+ // version of the value can be used.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: experimental
+ // Examples: 'red', 'true', 'on'
+ // Note: A semantic identifier, commonly referred to as a variant, provides
+ // a means
+ // for referring to a value without including the value itself. This can
+ // provide additional context for understanding the meaning behind a value.
+ // For example, the variant `red` maybe be used for the value `#c05543`.
+ //
+ // A stringified version of the value can be used in situations where a
+ // semantic identifier is unavailable. String representation of the value
+ // should be determined by the implementer.
+ FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
+)
+
+// FeatureFlagKey returns an attribute KeyValue conforming to the
+// "feature_flag.key" semantic conventions. It represents the unique identifier
+// of the feature flag.
+func FeatureFlagKey(val string) attribute.KeyValue {
+ return FeatureFlagKeyKey.String(val)
+}
+
+// FeatureFlagProviderName returns an attribute KeyValue conforming to the
+// "feature_flag.provider_name" semantic conventions. It represents the name of
+// the service provider that performs the flag evaluation.
+func FeatureFlagProviderName(val string) attribute.KeyValue {
+ return FeatureFlagProviderNameKey.String(val)
+}
+
+// FeatureFlagVariant returns an attribute KeyValue conforming to the
+// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
+// semantic identifier for a value. If one is unavailable, a stringified
+// version of the value can be used.
+func FeatureFlagVariant(val string) attribute.KeyValue {
+ return FeatureFlagVariantKey.String(val)
+}
+
+// RPC received/sent message.
+const (
+ // MessageCompressedSizeKey is the attribute Key conforming to the
+ // "message.compressed_size" semantic conventions. It represents the
+ // compressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessageCompressedSizeKey = attribute.Key("message.compressed_size")
+
+ // MessageIDKey is the attribute Key conforming to the "message.id"
+ // semantic conventions. It represents the mUST be calculated as two
+ // different counters starting from `1` one for sent messages and one for
+ // received message.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: This way we guarantee that the values will be consistent between
+ // different implementations.
+ MessageIDKey = attribute.Key("message.id")
+
+ // MessageTypeKey is the attribute Key conforming to the "message.type"
+ // semantic conventions. It represents the whether this is a received or
+ // sent message.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessageTypeKey = attribute.Key("message.type")
+
+ // MessageUncompressedSizeKey is the attribute Key conforming to the
+ // "message.uncompressed_size" semantic conventions. It represents the
+ // uncompressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
+)
+
+var (
+ // sent
+ MessageTypeSent = MessageTypeKey.String("SENT")
+ // received
+ MessageTypeReceived = MessageTypeKey.String("RECEIVED")
+)
+
+// MessageCompressedSize returns an attribute KeyValue conforming to the
+// "message.compressed_size" semantic conventions. It represents the compressed
+// size of the message in bytes.
+func MessageCompressedSize(val int) attribute.KeyValue {
+ return MessageCompressedSizeKey.Int(val)
+}
+
+// MessageID returns an attribute KeyValue conforming to the "message.id"
+// semantic conventions. It represents the mUST be calculated as two different
+// counters starting from `1` one for sent messages and one for received
+// message.
+func MessageID(val int) attribute.KeyValue {
+ return MessageIDKey.Int(val)
+}
+
+// MessageUncompressedSize returns an attribute KeyValue conforming to the
+// "message.uncompressed_size" semantic conventions. It represents the
+// uncompressed size of the message in bytes.
+func MessageUncompressedSize(val int) attribute.KeyValue {
+ return MessageUncompressedSizeKey.Int(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go
new file mode 100644
index 000000000..ef9bbd37a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
+
+const (
+ // ExceptionEventName is the name of the Span event representing an exception.
+ ExceptionEventName = "exception"
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go
new file mode 100644
index 000000000..69eda1959
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go
@@ -0,0 +1,2556 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// A cloud environment (e.g. GCP, Azure, AWS).
+const (
+ // CloudAccountIDKey is the attribute Key conforming to the
+ // "cloud.account.id" semantic conventions. It represents the cloud account
+ // ID the resource is assigned to.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '111111111111', 'opentelemetry'
+ CloudAccountIDKey = attribute.Key("cloud.account.id")
+
+ // CloudAvailabilityZoneKey is the attribute Key conforming to the
+ // "cloud.availability_zone" semantic conventions. It represents the cloud
+ // regions often have multiple, isolated locations known as zones to
+ // increase availability. Availability zone represents the zone where the
+ // resource is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'us-east-1c'
+ // Note: Availability zones are called "zones" on Alibaba Cloud and Google
+ // Cloud.
+ CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
+
+ // CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
+ // semantic conventions. It represents the cloud platform in use.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: The prefix of the service SHOULD match the one specified in
+ // `cloud.provider`.
+ CloudPlatformKey = attribute.Key("cloud.platform")
+
+ // CloudProviderKey is the attribute Key conforming to the "cloud.provider"
+ // semantic conventions. It represents the name of the cloud provider.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ CloudProviderKey = attribute.Key("cloud.provider")
+
+ // CloudRegionKey is the attribute Key conforming to the "cloud.region"
+ // semantic conventions. It represents the geographical region the resource
+ // is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'us-central1', 'us-east-1'
+ // Note: Refer to your provider's docs to see the available regions, for
+ // example [Alibaba Cloud
+ // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS
+ // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/),
+ // [Azure
+ // regions](https://azure.microsoft.com/global-infrastructure/geographies/),
+ // [Google Cloud regions](https://cloud.google.com/about/locations), or
+ // [Tencent Cloud
+ // regions](https://www.tencentcloud.com/document/product/213/6091).
+ CloudRegionKey = attribute.Key("cloud.region")
+
+ // CloudResourceIDKey is the attribute Key conforming to the
+ // "cloud.resource_id" semantic conventions. It represents the cloud
+ // provider-specific native identifier of the monitored cloud resource
+ // (e.g. an
+ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+ // on AWS, a [fully qualified resource
+ // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id)
+ // on Azure, a [full resource
+ // name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
+ // on GCP)
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function',
+ // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID',
+ // '/subscriptions/<SUBSCIPTION_GUID>/resourceGroups/<RG>/providers/Microsoft.Web/sites/<FUNCAPP>/functions/<FUNC>'
+ // Note: On some cloud providers, it may not be possible to determine the
+ // full ID at startup,
+ // so it may be necessary to set `cloud.resource_id` as a span attribute
+ // instead.
+ //
+ // The exact value to use for `cloud.resource_id` depends on the cloud
+ // provider.
+ // The following well-known definitions MUST be used if you set this
+ // attribute and they apply:
+ //
+ // * **AWS Lambda:** The function
+ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
+ // Take care not to use the "invoked ARN" directly but replace any
+ // [alias
+ // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html)
+ // with the resolved function version, as the same runtime instance may
+ // be invokable with
+ // multiple different aliases.
+ // * **GCP:** The [URI of the
+ // resource](https://cloud.google.com/iam/docs/full-resource-names)
+ // * **Azure:** The [Fully Qualified Resource
+ // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id)
+ // of the invoked function,
+ // *not* the function app, having the form
+ // `/subscriptions/<SUBSCIPTION_GUID>/resourceGroups/<RG>/providers/Microsoft.Web/sites/<FUNCAPP>/functions/<FUNC>`.
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider.
+ CloudResourceIDKey = attribute.Key("cloud.resource_id")
+)
+
+var (
+ // Alibaba Cloud Elastic Compute Service
+ CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
+ // Alibaba Cloud Function Compute
+ CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
+ // Red Hat OpenShift on Alibaba Cloud
+ CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift")
+ // AWS Elastic Compute Cloud
+ CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
+ // AWS Elastic Container Service
+ CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
+ // AWS Elastic Kubernetes Service
+ CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
+ // AWS Lambda
+ CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
+ // AWS Elastic Beanstalk
+ CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
+ // AWS App Runner
+ CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
+ // Red Hat OpenShift on AWS (ROSA)
+ CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift")
+ // Azure Virtual Machines
+ CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
+ // Azure Container Instances
+ CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
+ // Azure Kubernetes Service
+ CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
+ // Azure Functions
+ CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
+ // Azure App Service
+ CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
+ // Azure Red Hat OpenShift
+ CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift")
+ // Google Bare Metal Solution (BMS)
+ CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution")
+ // Google Cloud Compute Engine (GCE)
+ CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
+ // Google Cloud Run
+ CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
+ // Google Cloud Kubernetes Engine (GKE)
+ CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
+ // Google Cloud Functions (GCF)
+ CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
+ // Google Cloud App Engine (GAE)
+ CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
+ // Red Hat OpenShift on Google Cloud
+ CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift")
+ // Red Hat OpenShift on IBM Cloud
+ CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift")
+ // Tencent Cloud Cloud Virtual Machine (CVM)
+ CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
+ // Tencent Cloud Elastic Kubernetes Service (EKS)
+ CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
+ // Tencent Cloud Serverless Cloud Function (SCF)
+ CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
+)
+
+var (
+ // Alibaba Cloud
+ CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ CloudProviderAWS = CloudProviderKey.String("aws")
+ // Microsoft Azure
+ CloudProviderAzure = CloudProviderKey.String("azure")
+ // Google Cloud Platform
+ CloudProviderGCP = CloudProviderKey.String("gcp")
+ // Heroku Platform as a Service
+ CloudProviderHeroku = CloudProviderKey.String("heroku")
+ // IBM Cloud
+ CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud")
+ // Tencent Cloud
+ CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
+)
+
+// CloudAccountID returns an attribute KeyValue conforming to the
+// "cloud.account.id" semantic conventions. It represents the cloud account ID
+// the resource is assigned to.
+func CloudAccountID(val string) attribute.KeyValue {
+ return CloudAccountIDKey.String(val)
+}
+
+// CloudAvailabilityZone returns an attribute KeyValue conforming to the
+// "cloud.availability_zone" semantic conventions. It represents the cloud
+// regions often have multiple, isolated locations known as zones to increase
+// availability. Availability zone represents the zone where the resource is
+// running.
+func CloudAvailabilityZone(val string) attribute.KeyValue {
+ return CloudAvailabilityZoneKey.String(val)
+}
+
+// CloudRegion returns an attribute KeyValue conforming to the
+// "cloud.region" semantic conventions. It represents the geographical region
+// the resource is running.
+func CloudRegion(val string) attribute.KeyValue {
+ return CloudRegionKey.String(val)
+}
+
+// CloudResourceID returns an attribute KeyValue conforming to the
+// "cloud.resource_id" semantic conventions. It represents the cloud
+// provider-specific native identifier of the monitored cloud resource (e.g. an
+// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+// on AWS, a [fully qualified resource
+// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on
+// Azure, a [full resource
+// name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
+// on GCP)
+func CloudResourceID(val string) attribute.KeyValue {
+ return CloudResourceIDKey.String(val)
+}
+
+// A container instance.
+const (
+ // ContainerCommandKey is the attribute Key conforming to the
+ // "container.command" semantic conventions. It represents the command used
+ // to run the container (i.e. the command name).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'otelcontribcol'
+ // Note: If using embedded credentials or sensitive data, it is recommended
+ // to remove them to prevent potential leakage.
+ ContainerCommandKey = attribute.Key("container.command")
+
+ // ContainerCommandArgsKey is the attribute Key conforming to the
+ // "container.command_args" semantic conventions. It represents the all the
+ // command arguments (including the command/executable itself) run by the
+ // container. [2]
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'otelcontribcol, --config, config.yaml'
+ ContainerCommandArgsKey = attribute.Key("container.command_args")
+
+ // ContainerCommandLineKey is the attribute Key conforming to the
+ // "container.command_line" semantic conventions. It represents the full
+ // command run by the container as a single string representing the full
+ // command. [2]
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'otelcontribcol --config config.yaml'
+ ContainerCommandLineKey = attribute.Key("container.command_line")
+
+ // ContainerIDKey is the attribute Key conforming to the "container.id"
+ // semantic conventions. It represents the container ID. Usually a UUID, as
+ // for example used to [identify Docker
+ // containers](https://docs.docker.com/engine/reference/run/#container-identification).
+ // The UUID might be abbreviated.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'a3bf90e006b2'
+ ContainerIDKey = attribute.Key("container.id")
+
+ // ContainerImageIDKey is the attribute Key conforming to the
+ // "container.image.id" semantic conventions. It represents the runtime
+ // specific image identifier. Usually a hash algorithm followed by a UUID.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f'
+ // Note: Docker defines a sha256 of the image id; `container.image.id`
+ // corresponds to the `Image` field from the Docker container inspect
+ // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect)
+ // endpoint.
+ // K8S defines a link to the container registry repository with digest
+ // `"imageID": "registry.azurecr.io
+ // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`.
+ // The ID is assinged by the container runtime and can vary in different
+ // environments. Consider using `oci.manifest.digest` if it is important to
+ // identify the same image in different environments/runtimes.
+ ContainerImageIDKey = attribute.Key("container.image.id")
+
+ // ContainerImageNameKey is the attribute Key conforming to the
+ // "container.image.name" semantic conventions. It represents the name of
+ // the image the container was built on.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'gcr.io/opentelemetry/operator'
+ ContainerImageNameKey = attribute.Key("container.image.name")
+
+ // ContainerImageRepoDigestsKey is the attribute Key conforming to the
+ // "container.image.repo_digests" semantic conventions. It represents the
+ // repo digests of the container image as provided by the container
+ // runtime.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb',
+ // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578'
+ // Note:
+ // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect)
+ // and
+ // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238)
+ // report those under the `RepoDigests` field.
+ ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests")
+
+ // ContainerImageTagsKey is the attribute Key conforming to the
+ // "container.image.tags" semantic conventions. It represents the container
+ // image tags. An example can be found in [Docker Image
+ // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect).
+ // Should be only the `<tag>` section of the full name for example from
+ // `registry.example.com/my-org/my-image:<tag>`.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'v1.27.1', '3.5.7-0'
+ ContainerImageTagsKey = attribute.Key("container.image.tags")
+
+ // ContainerNameKey is the attribute Key conforming to the "container.name"
+ // semantic conventions. It represents the container name used by container
+ // runtime.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry-autoconf'
+ ContainerNameKey = attribute.Key("container.name")
+
+ // ContainerRuntimeKey is the attribute Key conforming to the
+ // "container.runtime" semantic conventions. It represents the container
+ // runtime managing this container.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'docker', 'containerd', 'rkt'
+ ContainerRuntimeKey = attribute.Key("container.runtime")
+)
+
+// ContainerCommand returns an attribute KeyValue conforming to the
+// "container.command" semantic conventions. It represents the command used to
+// run the container (i.e. the command name).
+func ContainerCommand(val string) attribute.KeyValue {
+ return ContainerCommandKey.String(val)
+}
+
+// ContainerCommandArgs returns an attribute KeyValue conforming to the
+// "container.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) run by the
+// container. [2]
+func ContainerCommandArgs(val ...string) attribute.KeyValue {
+ return ContainerCommandArgsKey.StringSlice(val)
+}
+
+// ContainerCommandLine returns an attribute KeyValue conforming to the
+// "container.command_line" semantic conventions. It represents the full
+// command run by the container as a single string representing the full
+// command. [2]
+func ContainerCommandLine(val string) attribute.KeyValue {
+ return ContainerCommandLineKey.String(val)
+}
+
+// ContainerID returns an attribute KeyValue conforming to the
+// "container.id" semantic conventions. It represents the container ID. Usually
+// a UUID, as for example used to [identify Docker
+// containers](https://docs.docker.com/engine/reference/run/#container-identification).
+// The UUID might be abbreviated.
+func ContainerID(val string) attribute.KeyValue {
+ return ContainerIDKey.String(val)
+}
+
+// ContainerImageID returns an attribute KeyValue conforming to the
+// "container.image.id" semantic conventions. It represents the runtime
+// specific image identifier. Usually a hash algorithm followed by a UUID.
+func ContainerImageID(val string) attribute.KeyValue {
+ return ContainerImageIDKey.String(val)
+}
+
+// ContainerImageName returns an attribute KeyValue conforming to the
+// "container.image.name" semantic conventions. It represents the name of the
+// image the container was built on.
+func ContainerImageName(val string) attribute.KeyValue {
+ return ContainerImageNameKey.String(val)
+}
+
+// ContainerImageRepoDigests returns an attribute KeyValue conforming to the
+// "container.image.repo_digests" semantic conventions. It represents the repo
+// digests of the container image as provided by the container runtime.
+func ContainerImageRepoDigests(val ...string) attribute.KeyValue {
+ return ContainerImageRepoDigestsKey.StringSlice(val)
+}
+
+// ContainerImageTags returns an attribute KeyValue conforming to the
+// "container.image.tags" semantic conventions. It represents the container
+// image tags. An example can be found in [Docker Image
+// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect).
+// Should be only the `<tag>` section of the full name for example from
+// `registry.example.com/my-org/my-image:<tag>`.
+func ContainerImageTags(val ...string) attribute.KeyValue {
+ return ContainerImageTagsKey.StringSlice(val)
+}
+
+// ContainerName returns an attribute KeyValue conforming to the
+// "container.name" semantic conventions. It represents the container name used
+// by container runtime.
+func ContainerName(val string) attribute.KeyValue {
+ return ContainerNameKey.String(val)
+}
+
+// ContainerRuntime returns an attribute KeyValue conforming to the
+// "container.runtime" semantic conventions. It represents the container
+// runtime managing this container.
+func ContainerRuntime(val string) attribute.KeyValue {
+ return ContainerRuntimeKey.String(val)
+}
+
+// Describes device attributes.
+const (
+ // DeviceIDKey is the attribute Key conforming to the "device.id" semantic
+ // conventions. It represents a unique identifier representing the device
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
+ // Note: The device identifier MUST only be defined using the values
+ // outlined below. This value is not an advertising identifier and MUST NOT
+ // be used as such. On iOS (Swift or Objective-C), this value MUST be equal
+ // to the [vendor
+ // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor).
+ // On Android (Java or Kotlin), this value MUST be equal to the Firebase
+ // Installation ID or a globally unique UUID which is persisted across
+ // sessions in your application. More information can be found
+ // [here](https://developer.android.com/training/articles/user-data-ids) on
+ // best practices and exact implementation details. Caution should be taken
+ // when storing personal data or anything which can identify a user. GDPR
+ // and data protection laws may apply, ensure you do your own due
+ // diligence.
+ DeviceIDKey = attribute.Key("device.id")
+
+ // DeviceManufacturerKey is the attribute Key conforming to the
+ // "device.manufacturer" semantic conventions. It represents the name of
+ // the device manufacturer
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Apple', 'Samsung'
+ // Note: The Android OS provides this field via
+ // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
+ // iOS apps SHOULD hardcode the value `Apple`.
+ DeviceManufacturerKey = attribute.Key("device.manufacturer")
+
+ // DeviceModelIdentifierKey is the attribute Key conforming to the
+ // "device.model.identifier" semantic conventions. It represents the model
+ // identifier for the device
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'iPhone3,4', 'SM-G920F'
+ // Note: It's recommended this value represents a machine-readable version
+ // of the model identifier rather than the market or consumer-friendly name
+ // of the device.
+ DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
+
+ // DeviceModelNameKey is the attribute Key conforming to the
+ // "device.model.name" semantic conventions. It represents the marketing
+ // name for the device model
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
+ // Note: It's recommended this value represents a human-readable version of
+ // the device model rather than a machine-readable alternative.
+ DeviceModelNameKey = attribute.Key("device.model.name")
+)
+
+// DeviceID returns an attribute KeyValue conforming to the "device.id"
+// semantic conventions. It represents a unique identifier representing the
+// device
+func DeviceID(val string) attribute.KeyValue {
+ return DeviceIDKey.String(val)
+}
+
+// DeviceManufacturer returns an attribute KeyValue conforming to the
+// "device.manufacturer" semantic conventions. It represents the name of the
+// device manufacturer
+func DeviceManufacturer(val string) attribute.KeyValue {
+ return DeviceManufacturerKey.String(val)
+}
+
+// DeviceModelIdentifier returns an attribute KeyValue conforming to the
+// "device.model.identifier" semantic conventions. It represents the model
+// identifier for the device
+func DeviceModelIdentifier(val string) attribute.KeyValue {
+ return DeviceModelIdentifierKey.String(val)
+}
+
+// DeviceModelName returns an attribute KeyValue conforming to the
+// "device.model.name" semantic conventions. It represents the marketing name
+// for the device model
+func DeviceModelName(val string) attribute.KeyValue {
+ return DeviceModelNameKey.String(val)
+}
+
+// A host is defined as a computing instance. For example, physical servers,
+// virtual machines, switches or disk array.
+const (
+ // HostArchKey is the attribute Key conforming to the "host.arch" semantic
+ // conventions. It represents the CPU architecture the host system is
+ // running on.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ HostArchKey = attribute.Key("host.arch")
+
+ // HostCPUCacheL2SizeKey is the attribute Key conforming to the
+ // "host.cpu.cache.l2.size" semantic conventions. It represents the amount
+ // of level 2 memory cache available to the processor (in Bytes).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 12288000
+ HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size")
+
+ // HostCPUFamilyKey is the attribute Key conforming to the
+ // "host.cpu.family" semantic conventions. It represents the family or
+ // generation of the CPU.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '6', 'PA-RISC 1.1e'
+ HostCPUFamilyKey = attribute.Key("host.cpu.family")
+
+ // HostCPUModelIDKey is the attribute Key conforming to the
+ // "host.cpu.model.id" semantic conventions. It represents the model
+ // identifier. It provides more granular information about the CPU,
+ // distinguishing it from other CPUs within the same family.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '6', '9000/778/B180L'
+ HostCPUModelIDKey = attribute.Key("host.cpu.model.id")
+
+ // HostCPUModelNameKey is the attribute Key conforming to the
+ // "host.cpu.model.name" semantic conventions. It represents the model
+ // designation of the processor.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz'
+ HostCPUModelNameKey = attribute.Key("host.cpu.model.name")
+
+ // HostCPUSteppingKey is the attribute Key conforming to the
+ // "host.cpu.stepping" semantic conventions. It represents the stepping or
+ // core revisions.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1
+ HostCPUSteppingKey = attribute.Key("host.cpu.stepping")
+
+ // HostCPUVendorIDKey is the attribute Key conforming to the
+ // "host.cpu.vendor.id" semantic conventions. It represents the processor
+ // manufacturer identifier. A maximum 12-character string.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'GenuineIntel'
+ // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor
+ // ID string in EBX, EDX and ECX registers. Writing these to memory in this
+ // order results in a 12-character string.
+ HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id")
+
+ // HostIDKey is the attribute Key conforming to the "host.id" semantic
+ // conventions. It represents the unique host ID. For Cloud, this must be
+ // the instance_id assigned by the cloud provider. For non-containerized
+ // systems, this should be the `machine-id`. See the table below for the
+ // sources to use to determine the `machine-id` based on operating system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'fdbf79e8af94cb7f9e8df36789187052'
+ HostIDKey = attribute.Key("host.id")
+
+ // HostImageIDKey is the attribute Key conforming to the "host.image.id"
+ // semantic conventions. It represents the vM image ID or host OS image ID.
+ // For Cloud, this value is from the provider.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'ami-07b06b442921831e5'
+ HostImageIDKey = attribute.Key("host.image.id")
+
+ // HostImageNameKey is the attribute Key conforming to the
+ // "host.image.name" semantic conventions. It represents the name of the VM
+ // image or OS install the host was instantiated from.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
+ HostImageNameKey = attribute.Key("host.image.name")
+
+ // HostImageVersionKey is the attribute Key conforming to the
+ // "host.image.version" semantic conventions. It represents the version
+ // string of the VM image or host OS as defined in [Version
+ // Attributes](/docs/resource/README.md#version-attributes).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '0.1'
+ HostImageVersionKey = attribute.Key("host.image.version")
+
+ // HostIPKey is the attribute Key conforming to the "host.ip" semantic
+ // conventions. It represents the available IP addresses of the host,
+ // excluding loopback interfaces.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e'
+ // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6
+ // addresses MUST be specified in the [RFC
+ // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format.
+ HostIPKey = attribute.Key("host.ip")
+
+ // HostMacKey is the attribute Key conforming to the "host.mac" semantic
+ // conventions. It represents the available MAC addresses of the host,
+ // excluding loopback interfaces.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F'
+ // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal
+ // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf):
+ // as hyphen-separated octets in uppercase hexadecimal form from most to
+ // least significant.
+ HostMacKey = attribute.Key("host.mac")
+
+ // HostNameKey is the attribute Key conforming to the "host.name" semantic
+ // conventions. It represents the name of the host. On Unix systems, it may
+ // contain what the hostname command returns, or the fully qualified
+ // hostname, or another name specified by the user.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry-test'
+ HostNameKey = attribute.Key("host.name")
+
+ // HostTypeKey is the attribute Key conforming to the "host.type" semantic
+ // conventions. It represents the type of host. For Cloud, this must be the
+ // machine type.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'n1-standard-1'
+ HostTypeKey = attribute.Key("host.type")
+)
+
+var (
+ // AMD64
+ HostArchAMD64 = HostArchKey.String("amd64")
+ // ARM32
+ HostArchARM32 = HostArchKey.String("arm32")
+ // ARM64
+ HostArchARM64 = HostArchKey.String("arm64")
+ // Itanium
+ HostArchIA64 = HostArchKey.String("ia64")
+ // 32-bit PowerPC
+ HostArchPPC32 = HostArchKey.String("ppc32")
+ // 64-bit PowerPC
+ HostArchPPC64 = HostArchKey.String("ppc64")
+ // IBM z/Architecture
+ HostArchS390x = HostArchKey.String("s390x")
+ // 32-bit x86
+ HostArchX86 = HostArchKey.String("x86")
+)
+
+// HostCPUCacheL2Size returns an attribute KeyValue conforming to the
+// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of
+// level 2 memory cache available to the processor (in Bytes).
+func HostCPUCacheL2Size(val int) attribute.KeyValue {
+ return HostCPUCacheL2SizeKey.Int(val)
+}
+
+// HostCPUFamily returns an attribute KeyValue conforming to the
+// "host.cpu.family" semantic conventions. It represents the family or
+// generation of the CPU.
+func HostCPUFamily(val string) attribute.KeyValue {
+ return HostCPUFamilyKey.String(val)
+}
+
+// HostCPUModelID returns an attribute KeyValue conforming to the
+// "host.cpu.model.id" semantic conventions. It represents the model
+// identifier. It provides more granular information about the CPU,
+// distinguishing it from other CPUs within the same family.
+func HostCPUModelID(val string) attribute.KeyValue {
+ return HostCPUModelIDKey.String(val)
+}
+
+// HostCPUModelName returns an attribute KeyValue conforming to the
+// "host.cpu.model.name" semantic conventions. It represents the model
+// designation of the processor.
+func HostCPUModelName(val string) attribute.KeyValue {
+ return HostCPUModelNameKey.String(val)
+}
+
+// HostCPUStepping returns an attribute KeyValue conforming to the
+// "host.cpu.stepping" semantic conventions. It represents the stepping or core
+// revisions.
+func HostCPUStepping(val int) attribute.KeyValue {
+ return HostCPUSteppingKey.Int(val)
+}
+
+// HostCPUVendorID returns an attribute KeyValue conforming to the
+// "host.cpu.vendor.id" semantic conventions. It represents the processor
+// manufacturer identifier. A maximum 12-character string.
+func HostCPUVendorID(val string) attribute.KeyValue {
+ return HostCPUVendorIDKey.String(val)
+}
+
+// HostID returns an attribute KeyValue conforming to the "host.id" semantic
+// conventions. It represents the unique host ID. For Cloud, this must be the
+// instance_id assigned by the cloud provider. For non-containerized systems,
+// this should be the `machine-id`. See the table below for the sources to use
+// to determine the `machine-id` based on operating system.
+func HostID(val string) attribute.KeyValue {
+ return HostIDKey.String(val)
+}
+
+// HostImageID returns an attribute KeyValue conforming to the
+// "host.image.id" semantic conventions. It represents the vM image ID or host
+// OS image ID. For Cloud, this value is from the provider.
+func HostImageID(val string) attribute.KeyValue {
+ return HostImageIDKey.String(val)
+}
+
+// HostImageName returns an attribute KeyValue conforming to the
+// "host.image.name" semantic conventions. It represents the name of the VM
+// image or OS install the host was instantiated from.
+func HostImageName(val string) attribute.KeyValue {
+ return HostImageNameKey.String(val)
+}
+
+// HostImageVersion returns an attribute KeyValue conforming to the
+// "host.image.version" semantic conventions. It represents the version string
+// of the VM image or host OS as defined in [Version
+// Attributes](/docs/resource/README.md#version-attributes).
+func HostImageVersion(val string) attribute.KeyValue {
+ return HostImageVersionKey.String(val)
+}
+
+// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic
+// conventions. It represents the available IP addresses of the host, excluding
+// loopback interfaces.
+func HostIP(val ...string) attribute.KeyValue {
+ return HostIPKey.StringSlice(val)
+}
+
+// HostMac returns an attribute KeyValue conforming to the "host.mac"
+// semantic conventions. It represents the available MAC addresses of the host,
+// excluding loopback interfaces.
+func HostMac(val ...string) attribute.KeyValue {
+ return HostMacKey.StringSlice(val)
+}
+
+// HostName returns an attribute KeyValue conforming to the "host.name"
+// semantic conventions. It represents the name of the host. On Unix systems,
+// it may contain what the hostname command returns, or the fully qualified
+// hostname, or another name specified by the user.
+func HostName(val string) attribute.KeyValue {
+ return HostNameKey.String(val)
+}
+
+// HostType returns an attribute KeyValue conforming to the "host.type"
+// semantic conventions. It represents the type of host. For Cloud, this must
+// be the machine type.
+func HostType(val string) attribute.KeyValue {
+ return HostTypeKey.String(val)
+}
+
+// Kubernetes resource attributes.
+const (
+ // K8SClusterNameKey is the attribute Key conforming to the
+ // "k8s.cluster.name" semantic conventions. It represents the name of the
+ // cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry-cluster'
+ K8SClusterNameKey = attribute.Key("k8s.cluster.name")
+
+ // K8SClusterUIDKey is the attribute Key conforming to the
+ // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for
+ // the cluster, set to the UID of the `kube-system` namespace.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d'
+ // Note: K8S doesn't have support for obtaining a cluster ID. If this is
+ // ever
+ // added, we will recommend collecting the `k8s.cluster.uid` through the
+ // official APIs. In the meantime, we are able to use the `uid` of the
+ // `kube-system` namespace as a proxy for cluster ID. Read on for the
+ // rationale.
+ //
+ // Every object created in a K8S cluster is assigned a distinct UID. The
+ // `kube-system` namespace is used by Kubernetes itself and will exist
+ // for the lifetime of the cluster. Using the `uid` of the `kube-system`
+ // namespace is a reasonable proxy for the K8S ClusterID as it will only
+ // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are
+ // UUIDs as standardized by
+ // [ISO/IEC 9834-8 and ITU-T
+ // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html).
+ // Which states:
+ //
+ // > If generated according to one of the mechanisms defined in Rec.
+ // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be
+ // different from all other UUIDs generated before 3603 A.D., or is
+ // extremely likely to be different (depending on the mechanism chosen).
+ //
+ // Therefore, UIDs between clusters should be extremely unlikely to
+ // conflict.
+ K8SClusterUIDKey = attribute.Key("k8s.cluster.uid")
+
+ // K8SContainerNameKey is the attribute Key conforming to the
+ // "k8s.container.name" semantic conventions. It represents the name of the
+ // Container from Pod specification, must be unique within a Pod. Container
+ // runtime usually uses different globally unique name (`container.name`).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'redis'
+ K8SContainerNameKey = attribute.Key("k8s.container.name")
+
+ // K8SContainerRestartCountKey is the attribute Key conforming to the
+ // "k8s.container.restart_count" semantic conventions. It represents the
+ // number of times the container was restarted. This attribute can be used
+ // to identify a particular container (running or stopped) within a
+ // container spec.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 0, 2
+ K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
+
+ // K8SCronJobNameKey is the attribute Key conforming to the
+ // "k8s.cronjob.name" semantic conventions. It represents the name of the
+ // CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
+
+ // K8SCronJobUIDKey is the attribute Key conforming to the
+ // "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+ // CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
+
+ // K8SDaemonSetNameKey is the attribute Key conforming to the
+ // "k8s.daemonset.name" semantic conventions. It represents the name of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
+
+ // K8SDaemonSetUIDKey is the attribute Key conforming to the
+ // "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
+
+ // K8SDeploymentNameKey is the attribute Key conforming to the
+ // "k8s.deployment.name" semantic conventions. It represents the name of
+ // the Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
+
+ // K8SDeploymentUIDKey is the attribute Key conforming to the
+ // "k8s.deployment.uid" semantic conventions. It represents the UID of the
+ // Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
+
+ // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name"
+ // semantic conventions. It represents the name of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SJobNameKey = attribute.Key("k8s.job.name")
+
+ // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid"
+ // semantic conventions. It represents the UID of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SJobUIDKey = attribute.Key("k8s.job.uid")
+
+ // K8SNamespaceNameKey is the attribute Key conforming to the
+ // "k8s.namespace.name" semantic conventions. It represents the name of the
+ // namespace that the pod is running in.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'default'
+ K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
+
+ // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
+ // semantic conventions. It represents the name of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'node-1'
+ K8SNodeNameKey = attribute.Key("k8s.node.name")
+
+ // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid"
+ // semantic conventions. It represents the UID of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
+ K8SNodeUIDKey = attribute.Key("k8s.node.uid")
+
+ // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name"
+ // semantic conventions. It represents the name of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry-pod-autoconf'
+ K8SPodNameKey = attribute.Key("k8s.pod.name")
+
+ // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid"
+ // semantic conventions. It represents the UID of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SPodUIDKey = attribute.Key("k8s.pod.uid")
+
+ // K8SReplicaSetNameKey is the attribute Key conforming to the
+ // "k8s.replicaset.name" semantic conventions. It represents the name of
+ // the ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
+
+ // K8SReplicaSetUIDKey is the attribute Key conforming to the
+ // "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+ // ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
+
+ // K8SStatefulSetNameKey is the attribute Key conforming to the
+ // "k8s.statefulset.name" semantic conventions. It represents the name of
+ // the StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
+
+ // K8SStatefulSetUIDKey is the attribute Key conforming to the
+ // "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+ // StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
+)
+
+// K8SClusterName returns an attribute KeyValue conforming to the
+// "k8s.cluster.name" semantic conventions. It represents the name of the
+// cluster.
+func K8SClusterName(val string) attribute.KeyValue {
+ return K8SClusterNameKey.String(val)
+}
+
+// K8SClusterUID returns an attribute KeyValue conforming to the
+// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the
+// cluster, set to the UID of the `kube-system` namespace.
+func K8SClusterUID(val string) attribute.KeyValue {
+ return K8SClusterUIDKey.String(val)
+}
+
+// K8SContainerName returns an attribute KeyValue conforming to the
+// "k8s.container.name" semantic conventions. It represents the name of the
+// Container from Pod specification, must be unique within a Pod. Container
+// runtime usually uses different globally unique name (`container.name`).
+func K8SContainerName(val string) attribute.KeyValue {
+ return K8SContainerNameKey.String(val)
+}
+
+// K8SContainerRestartCount returns an attribute KeyValue conforming to the
+// "k8s.container.restart_count" semantic conventions. It represents the number
+// of times the container was restarted. This attribute can be used to identify
+// a particular container (running or stopped) within a container spec.
+func K8SContainerRestartCount(val int) attribute.KeyValue {
+ return K8SContainerRestartCountKey.Int(val)
+}
+
+// K8SCronJobName returns an attribute KeyValue conforming to the
+// "k8s.cronjob.name" semantic conventions. It represents the name of the
+// CronJob.
+func K8SCronJobName(val string) attribute.KeyValue {
+ return K8SCronJobNameKey.String(val)
+}
+
+// K8SCronJobUID returns an attribute KeyValue conforming to the
+// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+// CronJob.
+func K8SCronJobUID(val string) attribute.KeyValue {
+ return K8SCronJobUIDKey.String(val)
+}
+
+// K8SDaemonSetName returns an attribute KeyValue conforming to the
+// "k8s.daemonset.name" semantic conventions. It represents the name of the
+// DaemonSet.
+func K8SDaemonSetName(val string) attribute.KeyValue {
+ return K8SDaemonSetNameKey.String(val)
+}
+
+// K8SDaemonSetUID returns an attribute KeyValue conforming to the
+// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+// DaemonSet.
+func K8SDaemonSetUID(val string) attribute.KeyValue {
+ return K8SDaemonSetUIDKey.String(val)
+}
+
+// K8SDeploymentName returns an attribute KeyValue conforming to the
+// "k8s.deployment.name" semantic conventions. It represents the name of the
+// Deployment.
+func K8SDeploymentName(val string) attribute.KeyValue {
+ return K8SDeploymentNameKey.String(val)
+}
+
+// K8SDeploymentUID returns an attribute KeyValue conforming to the
+// "k8s.deployment.uid" semantic conventions. It represents the UID of the
+// Deployment.
+func K8SDeploymentUID(val string) attribute.KeyValue {
+ return K8SDeploymentUIDKey.String(val)
+}
+
+// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
+// semantic conventions. It represents the name of the Job.
+func K8SJobName(val string) attribute.KeyValue {
+ return K8SJobNameKey.String(val)
+}
+
+// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
+// semantic conventions. It represents the UID of the Job.
+func K8SJobUID(val string) attribute.KeyValue {
+ return K8SJobUIDKey.String(val)
+}
+
+// K8SNamespaceName returns an attribute KeyValue conforming to the
+// "k8s.namespace.name" semantic conventions. It represents the name of the
+// namespace that the pod is running in.
+func K8SNamespaceName(val string) attribute.KeyValue {
+ return K8SNamespaceNameKey.String(val)
+}
+
+// K8SNodeName returns an attribute KeyValue conforming to the
+// "k8s.node.name" semantic conventions. It represents the name of the Node.
+func K8SNodeName(val string) attribute.KeyValue {
+ return K8SNodeNameKey.String(val)
+}
+
+// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
+// semantic conventions. It represents the UID of the Node.
+func K8SNodeUID(val string) attribute.KeyValue {
+ return K8SNodeUIDKey.String(val)
+}
+
+// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
+// semantic conventions. It represents the name of the Pod.
+func K8SPodName(val string) attribute.KeyValue {
+ return K8SPodNameKey.String(val)
+}
+
+// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
+// semantic conventions. It represents the UID of the Pod.
+func K8SPodUID(val string) attribute.KeyValue {
+ return K8SPodUIDKey.String(val)
+}
+
+// K8SReplicaSetName returns an attribute KeyValue conforming to the
+// "k8s.replicaset.name" semantic conventions. It represents the name of the
+// ReplicaSet.
+func K8SReplicaSetName(val string) attribute.KeyValue {
+ return K8SReplicaSetNameKey.String(val)
+}
+
+// K8SReplicaSetUID returns an attribute KeyValue conforming to the
+// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+// ReplicaSet.
+func K8SReplicaSetUID(val string) attribute.KeyValue {
+ return K8SReplicaSetUIDKey.String(val)
+}
+
+// K8SStatefulSetName returns an attribute KeyValue conforming to the
+// "k8s.statefulset.name" semantic conventions. It represents the name of the
+// StatefulSet.
+func K8SStatefulSetName(val string) attribute.KeyValue {
+ return K8SStatefulSetNameKey.String(val)
+}
+
+// K8SStatefulSetUID returns an attribute KeyValue conforming to the
+// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+// StatefulSet.
+func K8SStatefulSetUID(val string) attribute.KeyValue {
+ return K8SStatefulSetUIDKey.String(val)
+}
+
+// An OCI image manifest.
+const (
+ // OciManifestDigestKey is the attribute Key conforming to the
+ // "oci.manifest.digest" semantic conventions. It represents the digest of
+ // the OCI image manifest. For container images specifically is the digest
+ // by which the container image is known.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4'
+ // Note: Follows [OCI Image Manifest
+ // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md),
+ // and specifically the [Digest
+ // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests).
+ // An example can be found in [Example Image
+ // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest).
+ OciManifestDigestKey = attribute.Key("oci.manifest.digest")
+)
+
+// OciManifestDigest returns an attribute KeyValue conforming to the
+// "oci.manifest.digest" semantic conventions. It represents the digest of the
+// OCI image manifest. For container images specifically is the digest by which
+// the container image is known.
+func OciManifestDigest(val string) attribute.KeyValue {
+ return OciManifestDigestKey.String(val)
+}
+
+// The operating system (OS) on which the process represented by this resource
+// is running.
+const (
+ // OSBuildIDKey is the attribute Key conforming to the "os.build_id"
+ // semantic conventions. It represents the unique identifier for a
+ // particular build or compilation of the operating system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'TQ3C.230805.001.B2', '20E247', '22621'
+ OSBuildIDKey = attribute.Key("os.build_id")
+
+ // OSDescriptionKey is the attribute Key conforming to the "os.description"
+ // semantic conventions. It represents the human readable (not intended to
+ // be parsed) OS version information, like e.g. reported by `ver` or
+ // `lsb_release -a` commands.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1
+ // LTS'
+ OSDescriptionKey = attribute.Key("os.description")
+
+ // OSNameKey is the attribute Key conforming to the "os.name" semantic
+ // conventions. It represents the human readable operating system name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'iOS', 'Android', 'Ubuntu'
+ OSNameKey = attribute.Key("os.name")
+
+ // OSTypeKey is the attribute Key conforming to the "os.type" semantic
+ // conventions. It represents the operating system type.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ OSTypeKey = attribute.Key("os.type")
+
+ // OSVersionKey is the attribute Key conforming to the "os.version"
+ // semantic conventions. It represents the version string of the operating
+ // system as defined in [Version
+ // Attributes](/docs/resource/README.md#version-attributes).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '14.2.1', '18.04.1'
+ OSVersionKey = attribute.Key("os.version")
+)
+
+var (
+ // Microsoft Windows
+ OSTypeWindows = OSTypeKey.String("windows")
+ // Linux
+ OSTypeLinux = OSTypeKey.String("linux")
+ // Apple Darwin
+ OSTypeDarwin = OSTypeKey.String("darwin")
+ // FreeBSD
+ OSTypeFreeBSD = OSTypeKey.String("freebsd")
+ // NetBSD
+ OSTypeNetBSD = OSTypeKey.String("netbsd")
+ // OpenBSD
+ OSTypeOpenBSD = OSTypeKey.String("openbsd")
+ // DragonFly BSD
+ OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
+ // HP-UX (Hewlett Packard Unix)
+ OSTypeHPUX = OSTypeKey.String("hpux")
+ // AIX (Advanced Interactive eXecutive)
+ OSTypeAIX = OSTypeKey.String("aix")
+ // SunOS, Oracle Solaris
+ OSTypeSolaris = OSTypeKey.String("solaris")
+ // IBM z/OS
+ OSTypeZOS = OSTypeKey.String("z_os")
+)
+
+// OSBuildID returns an attribute KeyValue conforming to the "os.build_id"
+// semantic conventions. It represents the unique identifier for a particular
+// build or compilation of the operating system.
+func OSBuildID(val string) attribute.KeyValue {
+ return OSBuildIDKey.String(val)
+}
+
+// OSDescription returns an attribute KeyValue conforming to the
+// "os.description" semantic conventions. It represents the human readable (not
+// intended to be parsed) OS version information, like e.g. reported by `ver`
+// or `lsb_release -a` commands.
+func OSDescription(val string) attribute.KeyValue {
+ return OSDescriptionKey.String(val)
+}
+
+// OSName returns an attribute KeyValue conforming to the "os.name" semantic
+// conventions. It represents the human readable operating system name.
+func OSName(val string) attribute.KeyValue {
+ return OSNameKey.String(val)
+}
+
+// OSVersion returns an attribute KeyValue conforming to the "os.version"
+// semantic conventions. It represents the version string of the operating
+// system as defined in [Version
+// Attributes](/docs/resource/README.md#version-attributes).
+func OSVersion(val string) attribute.KeyValue {
+ return OSVersionKey.String(val)
+}
+
+// An operating system process.
+const (
+ // ProcessCommandKey is the attribute Key conforming to the
+ // "process.command" semantic conventions. It represents the command used
+ // to launch the process (i.e. the command name). On Linux based systems,
+ // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can
+ // be set to the first parameter extracted from `GetCommandLineW`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'cmd/otelcol'
+ ProcessCommandKey = attribute.Key("process.command")
+
+ // ProcessCommandArgsKey is the attribute Key conforming to the
+ // "process.command_args" semantic conventions. It represents the all the
+ // command arguments (including the command/executable itself) as received
+ // by the process. On Linux-based systems (and some other Unixoid systems
+ // supporting procfs), can be set according to the list of null-delimited
+ // strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+ // this would be the full argv vector passed to `main`.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'cmd/otecol', '--config=config.yaml'
+ ProcessCommandArgsKey = attribute.Key("process.command_args")
+
+ // ProcessCommandLineKey is the attribute Key conforming to the
+ // "process.command_line" semantic conventions. It represents the full
+ // command used to launch the process as a single string representing the
+ // full command. On Windows, can be set to the result of `GetCommandLineW`.
+ // Do not set this if you have to assemble it just for monitoring; use
+ // `process.command_args` instead.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
+ ProcessCommandLineKey = attribute.Key("process.command_line")
+
+ // ProcessExecutableNameKey is the attribute Key conforming to the
+ // "process.executable.name" semantic conventions. It represents the name
+ // of the process executable. On Linux based systems, can be set to the
+ // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name
+ // of `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'otelcol'
+ ProcessExecutableNameKey = attribute.Key("process.executable.name")
+
+ // ProcessExecutablePathKey is the attribute Key conforming to the
+ // "process.executable.path" semantic conventions. It represents the full
+ // path to the process executable. On Linux based systems, can be set to
+ // the target of `proc/[pid]/exe`. On Windows, can be set to the result of
+ // `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/usr/bin/cmd/otelcol'
+ ProcessExecutablePathKey = attribute.Key("process.executable.path")
+
+ // ProcessOwnerKey is the attribute Key conforming to the "process.owner"
+ // semantic conventions. It represents the username of the user that owns
+ // the process.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'root'
+ ProcessOwnerKey = attribute.Key("process.owner")
+
+ // ProcessParentPIDKey is the attribute Key conforming to the
+ // "process.parent_pid" semantic conventions. It represents the parent
+ // Process identifier (PPID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 111
+ ProcessParentPIDKey = attribute.Key("process.parent_pid")
+
+ // ProcessPIDKey is the attribute Key conforming to the "process.pid"
+ // semantic conventions. It represents the process identifier (PID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1234
+ ProcessPIDKey = attribute.Key("process.pid")
+
+ // ProcessRuntimeDescriptionKey is the attribute Key conforming to the
+ // "process.runtime.description" semantic conventions. It represents an
+ // additional description about the runtime of the process, for example a
+ // specific vendor customization of the runtime environment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
+ ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
+
+ // ProcessRuntimeNameKey is the attribute Key conforming to the
+ // "process.runtime.name" semantic conventions. It represents the name of
+ // the runtime of this process. For compiled native binaries, this SHOULD
+ // be the name of the compiler.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'OpenJDK Runtime Environment'
+ ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
+
+ // ProcessRuntimeVersionKey is the attribute Key conforming to the
+ // "process.runtime.version" semantic conventions. It represents the
+ // version of the runtime of this process, as returned by the runtime
+ // without modification.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '14.0.2'
+ ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
+)
+
+// ProcessCommand returns an attribute KeyValue conforming to the
+// "process.command" semantic conventions. It represents the command used to
+// launch the process (i.e. the command name). On Linux based systems, can be
+// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to
+// the first parameter extracted from `GetCommandLineW`.
+func ProcessCommand(val string) attribute.KeyValue {
+ return ProcessCommandKey.String(val)
+}
+
+// ProcessCommandArgs returns an attribute KeyValue conforming to the
+// "process.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) as received by
+// the process. On Linux-based systems (and some other Unixoid systems
+// supporting procfs), can be set according to the list of null-delimited
+// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+// this would be the full argv vector passed to `main`.
+func ProcessCommandArgs(val ...string) attribute.KeyValue {
+ return ProcessCommandArgsKey.StringSlice(val)
+}
+
+// ProcessCommandLine returns an attribute KeyValue conforming to the
+// "process.command_line" semantic conventions. It represents the full command
+// used to launch the process as a single string representing the full command.
+// On Windows, can be set to the result of `GetCommandLineW`. Do not set this
+// if you have to assemble it just for monitoring; use `process.command_args`
+// instead.
+func ProcessCommandLine(val string) attribute.KeyValue {
+ return ProcessCommandLineKey.String(val)
+}
+
+// ProcessExecutableName returns an attribute KeyValue conforming to the
+// "process.executable.name" semantic conventions. It represents the name of
+// the process executable. On Linux based systems, can be set to the `Name` in
+// `proc/[pid]/status`. On Windows, can be set to the base name of
+// `GetProcessImageFileNameW`.
+func ProcessExecutableName(val string) attribute.KeyValue {
+ return ProcessExecutableNameKey.String(val)
+}
+
+// ProcessExecutablePath returns an attribute KeyValue conforming to the
+// "process.executable.path" semantic conventions. It represents the full path
+// to the process executable. On Linux based systems, can be set to the target
+// of `proc/[pid]/exe`. On Windows, can be set to the result of
+// `GetProcessImageFileNameW`.
+func ProcessExecutablePath(val string) attribute.KeyValue {
+ return ProcessExecutablePathKey.String(val)
+}
+
+// ProcessOwner returns an attribute KeyValue conforming to the
+// "process.owner" semantic conventions. It represents the username of the user
+// that owns the process.
+func ProcessOwner(val string) attribute.KeyValue {
+ return ProcessOwnerKey.String(val)
+}
+
+// ProcessParentPID returns an attribute KeyValue conforming to the
+// "process.parent_pid" semantic conventions. It represents the parent Process
+// identifier (PPID).
+func ProcessParentPID(val int) attribute.KeyValue {
+ return ProcessParentPIDKey.Int(val)
+}
+
+// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
+// semantic conventions. It represents the process identifier (PID).
+func ProcessPID(val int) attribute.KeyValue {
+ return ProcessPIDKey.Int(val)
+}
+
+// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
+// "process.runtime.description" semantic conventions. It represents an
+// additional description about the runtime of the process, for example a
+// specific vendor customization of the runtime environment.
+func ProcessRuntimeDescription(val string) attribute.KeyValue {
+ return ProcessRuntimeDescriptionKey.String(val)
+}
+
+// ProcessRuntimeName returns an attribute KeyValue conforming to the
+// "process.runtime.name" semantic conventions. It represents the name of the
+// runtime of this process. For compiled native binaries, this SHOULD be the
+// name of the compiler.
+func ProcessRuntimeName(val string) attribute.KeyValue {
+ return ProcessRuntimeNameKey.String(val)
+}
+
+// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
+// "process.runtime.version" semantic conventions. It represents the version of
+// the runtime of this process, as returned by the runtime without
+// modification.
+func ProcessRuntimeVersion(val string) attribute.KeyValue {
+ return ProcessRuntimeVersionKey.String(val)
+}
+
+// The Android platform on which the Android application is running.
+const (
+ // AndroidOSAPILevelKey is the attribute Key conforming to the
+ // "android.os.api_level" semantic conventions. It represents the uniquely
+ // identifies the framework API revision offered by a version
+ // (`os.version`) of the android operating system. More information can be
+ // found
+ // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '33', '32'
+ AndroidOSAPILevelKey = attribute.Key("android.os.api_level")
+)
+
+// AndroidOSAPILevel returns an attribute KeyValue conforming to the
+// "android.os.api_level" semantic conventions. It represents the uniquely
+// identifies the framework API revision offered by a version (`os.version`) of
+// the android operating system. More information can be found
+// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels).
+func AndroidOSAPILevel(val string) attribute.KeyValue {
+ return AndroidOSAPILevelKey.String(val)
+}
+
+// The web browser in which the application represented by the resource is
+// running. The `browser.*` attributes MUST be used only for resources that
+// represent applications running in a web browser (regardless of whether
+// running on a mobile or desktop device).
+const (
+ // BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
+ // semantic conventions. It represents the array of brand name and version
+ // separated by a space
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.brands`).
+ BrowserBrandsKey = attribute.Key("browser.brands")
+
+ // BrowserLanguageKey is the attribute Key conforming to the
+ // "browser.language" semantic conventions. It represents the preferred
+ // language of the user using the browser
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'en', 'en-US', 'fr', 'fr-FR'
+ // Note: This value is intended to be taken from the Navigator API
+ // `navigator.language`.
+ BrowserLanguageKey = attribute.Key("browser.language")
+
+ // BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
+ // semantic conventions. It represents a boolean that is true if the
+ // browser is running on a mobile device
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.mobile`). If unavailable, this attribute
+ // SHOULD be left unset.
+ BrowserMobileKey = attribute.Key("browser.mobile")
+
+ // BrowserPlatformKey is the attribute Key conforming to the
+ // "browser.platform" semantic conventions. It represents the platform on
+ // which the browser is running
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Windows', 'macOS', 'Android'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.platform`). If unavailable, the legacy
+ // `navigator.platform` API SHOULD NOT be used instead and this attribute
+ // SHOULD be left unset in order for the values to be consistent.
+ // The list of possible values is defined in the [W3C User-Agent Client
+ // Hints
+ // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
+ // Note that some (but not all) of these values can overlap with values in
+ // the [`os.type` and `os.name` attributes](./os.md). However, for
+ // consistency, the values in the `browser.platform` attribute should
+ // capture the exact value that the user agent provides.
+ BrowserPlatformKey = attribute.Key("browser.platform")
+)
+
+// BrowserBrands returns an attribute KeyValue conforming to the
+// "browser.brands" semantic conventions. It represents the array of brand name
+// and version separated by a space
+func BrowserBrands(val ...string) attribute.KeyValue {
+ return BrowserBrandsKey.StringSlice(val)
+}
+
+// BrowserLanguage returns an attribute KeyValue conforming to the
+// "browser.language" semantic conventions. It represents the preferred
+// language of the user using the browser
+func BrowserLanguage(val string) attribute.KeyValue {
+ return BrowserLanguageKey.String(val)
+}
+
+// BrowserMobile returns an attribute KeyValue conforming to the
+// "browser.mobile" semantic conventions. It represents a boolean that is true
+// if the browser is running on a mobile device
+func BrowserMobile(val bool) attribute.KeyValue {
+ return BrowserMobileKey.Bool(val)
+}
+
+// BrowserPlatform returns an attribute KeyValue conforming to the
+// "browser.platform" semantic conventions. It represents the platform on which
+// the browser is running
+func BrowserPlatform(val string) attribute.KeyValue {
+ return BrowserPlatformKey.String(val)
+}
+
+// Resources used by AWS Elastic Container Service (ECS).
+const (
+ // AWSECSClusterARNKey is the attribute Key conforming to the
+ // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
+ // [ECS
+ // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
+
+ // AWSECSContainerARNKey is the attribute Key conforming to the
+ // "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+ // Resource Name (ARN) of an [ECS container
+ // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
+ AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
+
+ // AWSECSLaunchtypeKey is the attribute Key conforming to the
+ // "aws.ecs.launchtype" semantic conventions. It represents the [launch
+ // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html)
+ // for an ECS task.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
+
+ // AWSECSTaskARNKey is the attribute Key conforming to the
+ // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an
+ // [ECS task
+ // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b'
+ AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
+
+ // AWSECSTaskFamilyKey is the attribute Key conforming to the
+ // "aws.ecs.task.family" semantic conventions. It represents the task
+ // definition family this task definition is a member of.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry-family'
+ AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
+
+ // AWSECSTaskRevisionKey is the attribute Key conforming to the
+ // "aws.ecs.task.revision" semantic conventions. It represents the revision
+ // for this task definition.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '8', '26'
+ AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
+)
+
+var (
+ // ec2
+ AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
+ // fargate
+ AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
+)
+
+// AWSECSClusterARN returns an attribute KeyValue conforming to the
+// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS
+// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+func AWSECSClusterARN(val string) attribute.KeyValue {
+ return AWSECSClusterARNKey.String(val)
+}
+
+// AWSECSContainerARN returns an attribute KeyValue conforming to the
+// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+// Resource Name (ARN) of an [ECS container
+// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+func AWSECSContainerARN(val string) attribute.KeyValue {
+ return AWSECSContainerARNKey.String(val)
+}
+
+// AWSECSTaskARN returns an attribute KeyValue conforming to the
+// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS
+// task
+// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
+func AWSECSTaskARN(val string) attribute.KeyValue {
+ return AWSECSTaskARNKey.String(val)
+}
+
+// AWSECSTaskFamily returns an attribute KeyValue conforming to the
+// "aws.ecs.task.family" semantic conventions. It represents the task
+// definition family this task definition is a member of.
+func AWSECSTaskFamily(val string) attribute.KeyValue {
+ return AWSECSTaskFamilyKey.String(val)
+}
+
+// AWSECSTaskRevision returns an attribute KeyValue conforming to the
+// "aws.ecs.task.revision" semantic conventions. It represents the revision for
+// this task definition.
+func AWSECSTaskRevision(val string) attribute.KeyValue {
+ return AWSECSTaskRevisionKey.String(val)
+}
+
+// Resources used by AWS Elastic Kubernetes Service (EKS).
+const (
+ // AWSEKSClusterARNKey is the attribute Key conforming to the
+ // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an
+ // EKS cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
+)
+
+// AWSEKSClusterARN returns an attribute KeyValue conforming to the
+// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
+// cluster.
+func AWSEKSClusterARN(val string) attribute.KeyValue {
+ return AWSEKSClusterARNKey.String(val)
+}
+
+// Resources specific to Amazon Web Services.
+const (
+ // AWSLogGroupARNsKey is the attribute Key conforming to the
+ // "aws.log.group.arns" semantic conventions. It represents the Amazon
+ // Resource Name(s) (ARN) of the AWS log group(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
+ // Note: See the [log group ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+ AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
+
+ // AWSLogGroupNamesKey is the attribute Key conforming to the
+ // "aws.log.group.names" semantic conventions. It represents the name(s) of
+ // the AWS log group(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/aws/lambda/my-function', 'opentelemetry-service'
+ // Note: Multiple log groups must be supported for cases like
+ // multi-container applications, where a single application has sidecar
+ // containers, and each write to their own log group.
+ AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
+
+ // AWSLogStreamARNsKey is the attribute Key conforming to the
+ // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of
+ // the AWS log stream(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ // Note: See the [log stream ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+ // One log group can contain several log streams, so these ARNs necessarily
+ // identify both a log group and a log stream.
+ AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
+
+ // AWSLogStreamNamesKey is the attribute Key conforming to the
+ // "aws.log.stream.names" semantic conventions. It represents the name(s)
+ // of the AWS log stream(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
+)
+
+// AWSLogGroupARNs returns an attribute KeyValue conforming to the
+// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
+// Name(s) (ARN) of the AWS log group(s).
+func AWSLogGroupARNs(val ...string) attribute.KeyValue {
+ return AWSLogGroupARNsKey.StringSlice(val)
+}
+
+// AWSLogGroupNames returns an attribute KeyValue conforming to the
+// "aws.log.group.names" semantic conventions. It represents the name(s) of the
+// AWS log group(s) an application is writing to.
+func AWSLogGroupNames(val ...string) attribute.KeyValue {
+ return AWSLogGroupNamesKey.StringSlice(val)
+}
+
+// AWSLogStreamARNs returns an attribute KeyValue conforming to the
+// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
+// AWS log stream(s).
+func AWSLogStreamARNs(val ...string) attribute.KeyValue {
+ return AWSLogStreamARNsKey.StringSlice(val)
+}
+
+// AWSLogStreamNames returns an attribute KeyValue conforming to the
+// "aws.log.stream.names" semantic conventions. It represents the name(s) of
+// the AWS log stream(s) an application is writing to.
+func AWSLogStreamNames(val ...string) attribute.KeyValue {
+ return AWSLogStreamNamesKey.StringSlice(val)
+}
+
+// Resource used by Google Cloud Run.
+const (
+ // GCPCloudRunJobExecutionKey is the attribute Key conforming to the
+ // "gcp.cloud_run.job.execution" semantic conventions. It represents the
+ // name of the Cloud Run
+ // [execution](https://cloud.google.com/run/docs/managing/job-executions)
+ // being run for the Job, as set by the
+ // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+ // environment variable.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'job-name-xxxx', 'sample-job-mdw84'
+ GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution")
+
+ // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the
+ // "gcp.cloud_run.job.task_index" semantic conventions. It represents the
+ // index for a task within an execution as provided by the
+ // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+ // environment variable.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 0, 1
+ GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index")
+)
+
+// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the
+// "gcp.cloud_run.job.execution" semantic conventions. It represents the name
+// of the Cloud Run
+// [execution](https://cloud.google.com/run/docs/managing/job-executions) being
+// run for the Job, as set by the
+// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+// environment variable.
+func GCPCloudRunJobExecution(val string) attribute.KeyValue {
+ return GCPCloudRunJobExecutionKey.String(val)
+}
+
+// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the
+// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index
+// for a task within an execution as provided by the
+// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+// environment variable.
+func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue {
+ return GCPCloudRunJobTaskIndexKey.Int(val)
+}
+
+// Resources used by Google Compute Engine (GCE).
+const (
+ // GCPGceInstanceHostnameKey is the attribute Key conforming to the
+ // "gcp.gce.instance.hostname" semantic conventions. It represents the
+ // hostname of a GCE instance. This is the full value of the default or
+ // [custom
+ // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'my-host1234.example.com',
+ // 'sample-vm.us-west1-b.c.my-project.internal'
+ GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname")
+
+ // GCPGceInstanceNameKey is the attribute Key conforming to the
+ // "gcp.gce.instance.name" semantic conventions. It represents the instance
+ // name of a GCE instance. This is the value provided by `host.name`, the
+ // visible name of the instance in the Cloud Console UI, and the prefix for
+ // the default hostname of the instance as defined by the [default internal
+ // DNS
+ // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'instance-1', 'my-vm-name'
+ GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name")
+)
+
+// GCPGceInstanceHostname returns an attribute KeyValue conforming to the
+// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname
+// of a GCE instance. This is the full value of the default or [custom
+// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
+func GCPGceInstanceHostname(val string) attribute.KeyValue {
+ return GCPGceInstanceHostnameKey.String(val)
+}
+
+// GCPGceInstanceName returns an attribute KeyValue conforming to the
+// "gcp.gce.instance.name" semantic conventions. It represents the instance
+// name of a GCE instance. This is the value provided by `host.name`, the
+// visible name of the instance in the Cloud Console UI, and the prefix for the
+// default hostname of the instance as defined by the [default internal DNS
+// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
+func GCPGceInstanceName(val string) attribute.KeyValue {
+ return GCPGceInstanceNameKey.String(val)
+}
+
+// Heroku dyno metadata
+const (
+ // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id"
+ // semantic conventions. It represents the unique identifier for the
+ // application
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da'
+ HerokuAppIDKey = attribute.Key("heroku.app.id")
+
+ // HerokuReleaseCommitKey is the attribute Key conforming to the
+ // "heroku.release.commit" semantic conventions. It represents the commit
+ // hash for the current release
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec'
+ HerokuReleaseCommitKey = attribute.Key("heroku.release.commit")
+
+ // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the
+ // "heroku.release.creation_timestamp" semantic conventions. It represents
+ // the time and date the release was created
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2022-10-23T18:00:42Z'
+ HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp")
+)
+
+// HerokuAppID returns an attribute KeyValue conforming to the
+// "heroku.app.id" semantic conventions. It represents the unique identifier
+// for the application
+func HerokuAppID(val string) attribute.KeyValue {
+ return HerokuAppIDKey.String(val)
+}
+
+// HerokuReleaseCommit returns an attribute KeyValue conforming to the
+// "heroku.release.commit" semantic conventions. It represents the commit hash
+// for the current release
+func HerokuReleaseCommit(val string) attribute.KeyValue {
+ return HerokuReleaseCommitKey.String(val)
+}
+
+// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming
+// to the "heroku.release.creation_timestamp" semantic conventions. It
+// represents the time and date the release was created
+func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue {
+ return HerokuReleaseCreationTimestampKey.String(val)
+}
+
+// The software deployment.
+const (
+ // DeploymentEnvironmentKey is the attribute Key conforming to the
+ // "deployment.environment" semantic conventions. It represents the name of
+ // the [deployment
+ // environment](https://wikipedia.org/wiki/Deployment_environment) (aka
+ // deployment tier).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'staging', 'production'
+ // Note: `deployment.environment` does not affect the uniqueness
+ // constraints defined through
+ // the `service.namespace`, `service.name` and `service.instance.id`
+ // resource attributes.
+ // This implies that resources carrying the following attribute
+ // combinations MUST be
+ // considered to be identifying the same service:
+ //
+ // * `service.name=frontend`, `deployment.environment=production`
+ // * `service.name=frontend`, `deployment.environment=staging`.
+ DeploymentEnvironmentKey = attribute.Key("deployment.environment")
+)
+
+// DeploymentEnvironment returns an attribute KeyValue conforming to the
+// "deployment.environment" semantic conventions. It represents the name of the
+// [deployment environment](https://wikipedia.org/wiki/Deployment_environment)
+// (aka deployment tier).
+func DeploymentEnvironment(val string) attribute.KeyValue {
+ return DeploymentEnvironmentKey.String(val)
+}
+
+// A serverless instance.
+const (
+ // FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
+ // semantic conventions. It represents the execution environment ID as a
+ // string, that will be potentially reused for other invocations to the
+ // same function/function version.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
+ // Note: * **AWS Lambda:** Use the (full) log stream name.
+ FaaSInstanceKey = attribute.Key("faas.instance")
+
+ // FaaSMaxMemoryKey is the attribute Key conforming to the
+ // "faas.max_memory" semantic conventions. It represents the amount of
+ // memory available to the serverless function converted to Bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 134217728
+ // Note: It's recommended to set this attribute since e.g. too little
+ // memory can easily stop a Java AWS Lambda function from working
+ // correctly. On AWS Lambda, the environment variable
+ // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must
+ // be multiplied by 1,048,576).
+ FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
+
+ // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
+ // conventions. It represents the name of the single function that this
+ // runtime instance executes.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: 'my-function', 'myazurefunctionapp/some-function-name'
+ // Note: This is the name of the function as configured/deployed on the
+ // FaaS
+ // platform and is usually different from the name of the callback
+ // function (which may be stored in the
+ // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes)
+ // span attributes).
+ //
+ // For some cloud providers, the above definition is ambiguous. The
+ // following
+ // definition of function name MUST be used for this attribute
+ // (and consequently the span name) for the listed cloud
+ // providers/products:
+ //
+ // * **Azure:** The full name `<FUNCAPP>/<FUNC>`, i.e., function app name
+ // followed by a forward slash followed by the function name (this form
+ // can also be seen in the resource JSON for the function).
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider (see also the `cloud.resource_id` attribute).
+ FaaSNameKey = attribute.Key("faas.name")
+
+ // FaaSVersionKey is the attribute Key conforming to the "faas.version"
+ // semantic conventions. It represents the immutable version of the
+ // function being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '26', 'pinkfroid-00002'
+ // Note: Depending on the cloud provider and platform, use:
+ //
+ // * **AWS Lambda:** The [function
+ // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html)
+ // (an integer represented as a decimal string).
+ // * **Google Cloud Run (Services):** The
+ // [revision](https://cloud.google.com/run/docs/managing/revisions)
+ // (i.e., the function name plus the revision suffix).
+ // * **Google Cloud Functions:** The value of the
+ // [`K_REVISION` environment
+ // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically).
+ // * **Azure Functions:** Not applicable. Do not set this attribute.
+ FaaSVersionKey = attribute.Key("faas.version")
+)
+
+// FaaSInstance returns an attribute KeyValue conforming to the
+// "faas.instance" semantic conventions. It represents the execution
+// environment ID as a string, that will be potentially reused for other
+// invocations to the same function/function version.
+func FaaSInstance(val string) attribute.KeyValue {
+ return FaaSInstanceKey.String(val)
+}
+
+// FaaSMaxMemory returns an attribute KeyValue conforming to the
+// "faas.max_memory" semantic conventions. It represents the amount of memory
+// available to the serverless function converted to Bytes.
+func FaaSMaxMemory(val int) attribute.KeyValue {
+ return FaaSMaxMemoryKey.Int(val)
+}
+
+// FaaSName returns an attribute KeyValue conforming to the "faas.name"
+// semantic conventions. It represents the name of the single function that
+// this runtime instance executes.
+func FaaSName(val string) attribute.KeyValue {
+ return FaaSNameKey.String(val)
+}
+
+// FaaSVersion returns an attribute KeyValue conforming to the
+// "faas.version" semantic conventions. It represents the immutable version of
+// the function being executed.
+func FaaSVersion(val string) attribute.KeyValue {
+ return FaaSVersionKey.String(val)
+}
+
+// A service instance.
+const (
+ // ServiceNameKey is the attribute Key conforming to the "service.name"
+ // semantic conventions. It represents the logical name of the service.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: 'shoppingcart'
+ // Note: MUST be the same for all instances of horizontally scaled
+ // services. If the value was not specified, SDKs MUST fallback to
+ // `unknown_service:` concatenated with
+ // [`process.executable.name`](process.md#process), e.g.
+ // `unknown_service:bash`. If `process.executable.name` is not available,
+ // the value MUST be set to `unknown_service`.
+ ServiceNameKey = attribute.Key("service.name")
+
+ // ServiceVersionKey is the attribute Key conforming to the
+ // "service.version" semantic conventions. It represents the version string
+ // of the service API or implementation. The format is not defined by these
+ // conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2.0.0', 'a01dbef8a'
+ ServiceVersionKey = attribute.Key("service.version")
+)
+
+// ServiceName returns an attribute KeyValue conforming to the
+// "service.name" semantic conventions. It represents the logical name of the
+// service.
+func ServiceName(val string) attribute.KeyValue {
+ return ServiceNameKey.String(val)
+}
+
+// ServiceVersion returns an attribute KeyValue conforming to the
+// "service.version" semantic conventions. It represents the version string of
+// the service API or implementation. The format is not defined by these
+// conventions.
+func ServiceVersion(val string) attribute.KeyValue {
+ return ServiceVersionKey.String(val)
+}
+
+// A service instance.
+const (
+ // ServiceInstanceIDKey is the attribute Key conforming to the
+ // "service.instance.id" semantic conventions. It represents the string ID
+ // of the service instance.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'my-k8s-pod-deployment-1',
+ // '627cc493-f310-47de-96bd-71410b7dec09'
+ // Note: MUST be unique for each instance of the same
+ // `service.namespace,service.name` pair (in other words
+ // `service.namespace,service.name,service.instance.id` triplet MUST be
+ // globally unique). The ID helps to distinguish instances of the same
+ // service that exist at the same time (e.g. instances of a horizontally
+ // scaled service). It is preferable for the ID to be persistent and stay
+ // the same for the lifetime of the service instance, however it is
+ // acceptable that the ID is ephemeral and changes during important
+ // lifetime events for the service (e.g. service restarts). If the service
+ // has no inherent unique ID that can be used as the value of this
+ // attribute it is recommended to generate a random Version 1 or Version 4
+ // RFC 4122 UUID (services aiming for reproducible UUIDs may also use
+ // Version 5, see RFC 4122 for more recommendations).
+ ServiceInstanceIDKey = attribute.Key("service.instance.id")
+
+ // ServiceNamespaceKey is the attribute Key conforming to the
+ // "service.namespace" semantic conventions. It represents a namespace for
+ // `service.name`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Shop'
+ // Note: A string value having a meaning that helps to distinguish a group
+ // of services, for example the team name that owns a group of services.
+ // `service.name` is expected to be unique within the same namespace. If
+ // `service.namespace` is not specified in the Resource then `service.name`
+ // is expected to be unique for all services that have no explicit
+ // namespace defined (so the empty/unspecified namespace is simply one more
+ // valid namespace). Zero-length namespace string is assumed equal to
+ // unspecified namespace.
+ ServiceNamespaceKey = attribute.Key("service.namespace")
+)
+
+// ServiceInstanceID returns an attribute KeyValue conforming to the
+// "service.instance.id" semantic conventions. It represents the string ID of
+// the service instance.
+func ServiceInstanceID(val string) attribute.KeyValue {
+ return ServiceInstanceIDKey.String(val)
+}
+
+// ServiceNamespace returns an attribute KeyValue conforming to the
+// "service.namespace" semantic conventions. It represents a namespace for
+// `service.name`.
+func ServiceNamespace(val string) attribute.KeyValue {
+ return ServiceNamespaceKey.String(val)
+}
+
+// The telemetry SDK used to capture data recorded by the instrumentation
+// libraries.
+const (
+ // TelemetrySDKLanguageKey is the attribute Key conforming to the
+ // "telemetry.sdk.language" semantic conventions. It represents the
+ // language of the telemetry SDK.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: experimental
+ TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
+
+ // TelemetrySDKNameKey is the attribute Key conforming to the
+ // "telemetry.sdk.name" semantic conventions. It represents the name of the
+ // telemetry SDK as defined above.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute
+ // to `opentelemetry`.
+ // If another SDK, like a fork or a vendor-provided implementation, is
+ // used, this SDK MUST set the
+ // `telemetry.sdk.name` attribute to the fully-qualified class or module
+ // name of this SDK's main entry point
+ // or another suitable identifier depending on the language.
+ // The identifier `opentelemetry` is reserved and MUST NOT be used in this
+ // case.
+ // All custom identifiers SHOULD be stable across different versions of an
+ // implementation.
+ TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
+
+ // TelemetrySDKVersionKey is the attribute Key conforming to the
+ // "telemetry.sdk.version" semantic conventions. It represents the version
+ // string of the telemetry SDK.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: '1.2.3'
+ TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
+)
+
+var (
+ // cpp
+ TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
+ // dotnet
+ TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
+ // erlang
+ TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
+ // go
+ TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
+ // java
+ TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
+ // nodejs
+ TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
+ // php
+ TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
+ // python
+ TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
+ // ruby
+ TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
+ // rust
+ TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust")
+ // swift
+ TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
+ // webjs
+ TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
+)
+
+// TelemetrySDKName returns an attribute KeyValue conforming to the
+// "telemetry.sdk.name" semantic conventions. It represents the name of the
+// telemetry SDK as defined above.
+func TelemetrySDKName(val string) attribute.KeyValue {
+ return TelemetrySDKNameKey.String(val)
+}
+
+// TelemetrySDKVersion returns an attribute KeyValue conforming to the
+// "telemetry.sdk.version" semantic conventions. It represents the version
+// string of the telemetry SDK.
+func TelemetrySDKVersion(val string) attribute.KeyValue {
+ return TelemetrySDKVersionKey.String(val)
+}
+
+// The telemetry SDK used to capture data recorded by the instrumentation
+// libraries.
+const (
+ // TelemetryDistroNameKey is the attribute Key conforming to the
+ // "telemetry.distro.name" semantic conventions. It represents the name of
+ // the auto instrumentation agent or distribution, if used.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'parts-unlimited-java'
+ // Note: Official auto instrumentation agents and distributions SHOULD set
+ // the `telemetry.distro.name` attribute to
+ // a string starting with `opentelemetry-`, e.g.
+ // `opentelemetry-java-instrumentation`.
+ TelemetryDistroNameKey = attribute.Key("telemetry.distro.name")
+
+ // TelemetryDistroVersionKey is the attribute Key conforming to the
+ // "telemetry.distro.version" semantic conventions. It represents the
+ // version string of the auto instrumentation agent or distribution, if
+ // used.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1.2.3'
+ TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version")
+)
+
+// TelemetryDistroName returns an attribute KeyValue conforming to the
+// "telemetry.distro.name" semantic conventions. It represents the name of the
+// auto instrumentation agent or distribution, if used.
+func TelemetryDistroName(val string) attribute.KeyValue {
+ return TelemetryDistroNameKey.String(val)
+}
+
+// TelemetryDistroVersion returns an attribute KeyValue conforming to the
+// "telemetry.distro.version" semantic conventions. It represents the version
+// string of the auto instrumentation agent or distribution, if used.
+func TelemetryDistroVersion(val string) attribute.KeyValue {
+ return TelemetryDistroVersionKey.String(val)
+}
+
+// Resource describing the packaged software running the application code. Web
+// engines are typically executed using process.runtime.
+const (
+ // WebEngineDescriptionKey is the attribute Key conforming to the
+ // "webengine.description" semantic conventions. It represents the
+ // additional description of the web engine (e.g. detailed version and
+ // edition information).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
+ // 2.2.2.Final'
+ WebEngineDescriptionKey = attribute.Key("webengine.description")
+
+ // WebEngineNameKey is the attribute Key conforming to the "webengine.name"
+ // semantic conventions. It represents the name of the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: 'WildFly'
+ WebEngineNameKey = attribute.Key("webengine.name")
+
+ // WebEngineVersionKey is the attribute Key conforming to the
+ // "webengine.version" semantic conventions. It represents the version of
+ // the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '21.0.0'
+ WebEngineVersionKey = attribute.Key("webengine.version")
+)
+
+// WebEngineDescription returns an attribute KeyValue conforming to the
+// "webengine.description" semantic conventions. It represents the additional
+// description of the web engine (e.g. detailed version and edition
+// information).
+func WebEngineDescription(val string) attribute.KeyValue {
+ return WebEngineDescriptionKey.String(val)
+}
+
+// WebEngineName returns an attribute KeyValue conforming to the
+// "webengine.name" semantic conventions. It represents the name of the web
+// engine.
+func WebEngineName(val string) attribute.KeyValue {
+ return WebEngineNameKey.String(val)
+}
+
+// WebEngineVersion returns an attribute KeyValue conforming to the
+// "webengine.version" semantic conventions. It represents the version of the
+// web engine.
+func WebEngineVersion(val string) attribute.KeyValue {
+ return WebEngineVersionKey.String(val)
+}
+
+// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's
+// concepts.
+const (
+ // OTelScopeNameKey is the attribute Key conforming to the
+ // "otel.scope.name" semantic conventions. It represents the name of the
+ // instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'io.opentelemetry.contrib.mongodb'
+ OTelScopeNameKey = attribute.Key("otel.scope.name")
+
+ // OTelScopeVersionKey is the attribute Key conforming to the
+ // "otel.scope.version" semantic conventions. It represents the version of
+ // the instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1.0.0'
+ OTelScopeVersionKey = attribute.Key("otel.scope.version")
+)
+
+// OTelScopeName returns an attribute KeyValue conforming to the
+// "otel.scope.name" semantic conventions. It represents the name of the
+// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+func OTelScopeName(val string) attribute.KeyValue {
+ return OTelScopeNameKey.String(val)
+}
+
+// OTelScopeVersion returns an attribute KeyValue conforming to the
+// "otel.scope.version" semantic conventions. It represents the version of the
+// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+func OTelScopeVersion(val string) attribute.KeyValue {
+ return OTelScopeVersionKey.String(val)
+}
+
+// Span attributes used by non-OTLP exporters to represent OpenTelemetry
+// Scope's concepts.
+const (
+ // OTelLibraryNameKey is the attribute Key conforming to the
+ // "otel.library.name" semantic conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'io.opentelemetry.contrib.mongodb'
+ // Deprecated: use the `otel.scope.name` attribute.
+ OTelLibraryNameKey = attribute.Key("otel.library.name")
+
+ // OTelLibraryVersionKey is the attribute Key conforming to the
+ // "otel.library.version" semantic conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '1.0.0'
+ // Deprecated: use the `otel.scope.version` attribute.
+ OTelLibraryVersionKey = attribute.Key("otel.library.version")
+)
+
+// OTelLibraryName returns an attribute KeyValue conforming to the
+// "otel.library.name" semantic conventions.
+//
+// Deprecated: use the `otel.scope.name` attribute.
+func OTelLibraryName(val string) attribute.KeyValue {
+ return OTelLibraryNameKey.String(val)
+}
+
+// OTelLibraryVersion returns an attribute KeyValue conforming to the
+// "otel.library.version" semantic conventions.
+//
+// Deprecated: use the `otel.scope.version` attribute.
+func OTelLibraryVersion(val string) attribute.KeyValue {
+ return OTelLibraryVersionKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go
new file mode 100644
index 000000000..9733ce888
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
+
+// SchemaURL is the schema URL that matches the version of the semantic conventions
+// that this package defines. Semconv packages starting from v1.4.0 must declare
+// non-empty schema URL in the form https://opentelemetry.io/schemas/<version>
+const SchemaURL = "https://opentelemetry.io/schemas/1.24.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go
new file mode 100644
index 000000000..397174818
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go
@@ -0,0 +1,1334 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// Operations that access some remote service.
+const (
+ // PeerServiceKey is the attribute Key conforming to the "peer.service"
+ // semantic conventions. It represents the
+ // [`service.name`](/docs/resource/README.md#service) of the remote
+ // service. SHOULD be equal to the actual `service.name` resource attribute
+ // of the remote service if any.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'AuthTokenCache'
+ PeerServiceKey = attribute.Key("peer.service")
+)
+
+// PeerService returns an attribute KeyValue conforming to the
+// "peer.service" semantic conventions. It represents the
+// [`service.name`](/docs/resource/README.md#service) of the remote service.
+// SHOULD be equal to the actual `service.name` resource attribute of the
+// remote service if any.
+func PeerService(val string) attribute.KeyValue {
+ return PeerServiceKey.String(val)
+}
+
+// These attributes may be used for any operation with an authenticated and/or
+// authorized enduser.
+const (
+ // EnduserIDKey is the attribute Key conforming to the "enduser.id"
+ // semantic conventions. It represents the username or client_id extracted
+ // from the access token or
+ // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header
+ // in the inbound request from outside the system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'username'
+ EnduserIDKey = attribute.Key("enduser.id")
+
+ // EnduserRoleKey is the attribute Key conforming to the "enduser.role"
+ // semantic conventions. It represents the actual/assumed role the client
+ // is making the request under extracted from token or application security
+ // context.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'admin'
+ EnduserRoleKey = attribute.Key("enduser.role")
+
+ // EnduserScopeKey is the attribute Key conforming to the "enduser.scope"
+ // semantic conventions. It represents the scopes or granted authorities
+ // the client currently possesses extracted from token or application
+ // security context. The value would come from the scope associated with an
+ // [OAuth 2.0 Access
+ // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+ // value in a [SAML 2.0
+ // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'read:message, write:files'
+ EnduserScopeKey = attribute.Key("enduser.scope")
+)
+
+// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
+// semantic conventions. It represents the username or client_id extracted from
+// the access token or
+// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in
+// the inbound request from outside the system.
+func EnduserID(val string) attribute.KeyValue {
+ return EnduserIDKey.String(val)
+}
+
+// EnduserRole returns an attribute KeyValue conforming to the
+// "enduser.role" semantic conventions. It represents the actual/assumed role
+// the client is making the request under extracted from token or application
+// security context.
+func EnduserRole(val string) attribute.KeyValue {
+ return EnduserRoleKey.String(val)
+}
+
+// EnduserScope returns an attribute KeyValue conforming to the
+// "enduser.scope" semantic conventions. It represents the scopes or granted
+// authorities the client currently possesses extracted from token or
+// application security context. The value would come from the scope associated
+// with an [OAuth 2.0 Access
+// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+// value in a [SAML 2.0
+// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+func EnduserScope(val string) attribute.KeyValue {
+ return EnduserScopeKey.String(val)
+}
+
+// These attributes allow to report this unit of code and therefore to provide
+// more context about the span.
+const (
+ // CodeColumnKey is the attribute Key conforming to the "code.column"
+ // semantic conventions. It represents the column number in `code.filepath`
+ // best representing the operation. It SHOULD point within the code unit
+ // named in `code.function`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 16
+ CodeColumnKey = attribute.Key("code.column")
+
+ // CodeFilepathKey is the attribute Key conforming to the "code.filepath"
+ // semantic conventions. It represents the source code file name that
+ // identifies the code unit as uniquely as possible (preferably an absolute
+ // file path).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/usr/local/MyApplication/content_root/app/index.php'
+ CodeFilepathKey = attribute.Key("code.filepath")
+
+ // CodeFunctionKey is the attribute Key conforming to the "code.function"
+ // semantic conventions. It represents the method or function name, or
+ // equivalent (usually rightmost part of the code unit's name).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'serveRequest'
+ CodeFunctionKey = attribute.Key("code.function")
+
+ // CodeLineNumberKey is the attribute Key conforming to the "code.lineno"
+ // semantic conventions. It represents the line number in `code.filepath`
+ // best representing the operation. It SHOULD point within the code unit
+ // named in `code.function`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 42
+ CodeLineNumberKey = attribute.Key("code.lineno")
+
+ // CodeNamespaceKey is the attribute Key conforming to the "code.namespace"
+ // semantic conventions. It represents the "namespace" within which
+ // `code.function` is defined. Usually the qualified class or module name,
+ // such that `code.namespace` + some separator + `code.function` form a
+ // unique identifier for the code unit.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'com.example.MyHTTPService'
+ CodeNamespaceKey = attribute.Key("code.namespace")
+
+ // CodeStacktraceKey is the attribute Key conforming to the
+ // "code.stacktrace" semantic conventions. It represents a stacktrace as a
+ // string in the natural representation for the language runtime. The
+ // representation is to be determined and documented by each language SIG.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'at
+ // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
+ // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
+ // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
+ CodeStacktraceKey = attribute.Key("code.stacktrace")
+)
+
+// CodeColumn returns an attribute KeyValue conforming to the "code.column"
+// semantic conventions. It represents the column number in `code.filepath`
+// best representing the operation. It SHOULD point within the code unit named
+// in `code.function`.
+func CodeColumn(val int) attribute.KeyValue {
+ return CodeColumnKey.Int(val)
+}
+
+// CodeFilepath returns an attribute KeyValue conforming to the
+// "code.filepath" semantic conventions. It represents the source code file
+// name that identifies the code unit as uniquely as possible (preferably an
+// absolute file path).
+func CodeFilepath(val string) attribute.KeyValue {
+ return CodeFilepathKey.String(val)
+}
+
+// CodeFunction returns an attribute KeyValue conforming to the
+// "code.function" semantic conventions. It represents the method or function
+// name, or equivalent (usually rightmost part of the code unit's name).
+func CodeFunction(val string) attribute.KeyValue {
+ return CodeFunctionKey.String(val)
+}
+
+// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno"
+// semantic conventions. It represents the line number in `code.filepath` best
+// representing the operation. It SHOULD point within the code unit named in
+// `code.function`.
+func CodeLineNumber(val int) attribute.KeyValue {
+ return CodeLineNumberKey.Int(val)
+}
+
+// CodeNamespace returns an attribute KeyValue conforming to the
+// "code.namespace" semantic conventions. It represents the "namespace" within
+// which `code.function` is defined. Usually the qualified class or module
+// name, such that `code.namespace` + some separator + `code.function` form a
+// unique identifier for the code unit.
+func CodeNamespace(val string) attribute.KeyValue {
+ return CodeNamespaceKey.String(val)
+}
+
+// CodeStacktrace returns an attribute KeyValue conforming to the
+// "code.stacktrace" semantic conventions. It represents a stacktrace as a
+// string in the natural representation for the language runtime. The
+// representation is to be determined and documented by each language SIG.
+func CodeStacktrace(val string) attribute.KeyValue {
+ return CodeStacktraceKey.String(val)
+}
+
+// These attributes may be used for any operation to store information about a
+// thread that started a span.
+const (
+ // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
+ // conventions. It represents the current "managed" thread ID (as opposed
+ // to OS thread ID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 42
+ ThreadIDKey = attribute.Key("thread.id")
+
+ // ThreadNameKey is the attribute Key conforming to the "thread.name"
+ // semantic conventions. It represents the current thread name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'main'
+ ThreadNameKey = attribute.Key("thread.name")
+)
+
+// ThreadID returns an attribute KeyValue conforming to the "thread.id"
+// semantic conventions. It represents the current "managed" thread ID (as
+// opposed to OS thread ID).
+func ThreadID(val int) attribute.KeyValue {
+ return ThreadIDKey.Int(val)
+}
+
+// ThreadName returns an attribute KeyValue conforming to the "thread.name"
+// semantic conventions. It represents the current thread name.
+func ThreadName(val string) attribute.KeyValue {
+ return ThreadNameKey.String(val)
+}
+
+// Span attributes used by AWS Lambda (in addition to general `faas`
+// attributes).
+const (
+ // AWSLambdaInvokedARNKey is the attribute Key conforming to the
+ // "aws.lambda.invoked_arn" semantic conventions. It represents the full
+ // invoked ARN as provided on the `Context` passed to the function
+ // (`Lambda-Runtime-Invoked-Function-ARN` header on the
+ // `/runtime/invocation/next` applicable).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
+ // Note: This may be different from `cloud.resource_id` if an alias is
+ // involved.
+ AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
+)
+
+// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
+// "aws.lambda.invoked_arn" semantic conventions. It represents the full
+// invoked ARN as provided on the `Context` passed to the function
+// (`Lambda-Runtime-Invoked-Function-ARN` header on the
+// `/runtime/invocation/next` applicable).
+func AWSLambdaInvokedARN(val string) attribute.KeyValue {
+ return AWSLambdaInvokedARNKey.String(val)
+}
+
+// Attributes for CloudEvents. CloudEvents is a specification on how to define
+// event data in a standard way. These attributes can be attached to spans when
+// performing operations with CloudEvents, regardless of the protocol being
+// used.
+const (
+ // CloudeventsEventIDKey is the attribute Key conforming to the
+ // "cloudevents.event_id" semantic conventions. It represents the
+ // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+ // uniquely identifies the event.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
+ CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
+
+ // CloudeventsEventSourceKey is the attribute Key conforming to the
+ // "cloudevents.event_source" semantic conventions. It represents the
+ // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+ // identifies the context in which an event happened.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: 'https://github.com/cloudevents',
+ // '/cloudevents/spec/pull/123', 'my-service'
+ CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
+
+ // CloudeventsEventSpecVersionKey is the attribute Key conforming to the
+ // "cloudevents.event_spec_version" semantic conventions. It represents the
+ // [version of the CloudEvents
+ // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+ // which the event uses.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1.0'
+ CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
+
+ // CloudeventsEventSubjectKey is the attribute Key conforming to the
+ // "cloudevents.event_subject" semantic conventions. It represents the
+ // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+ // of the event in the context of the event producer (identified by
+ // source).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'mynewfile.jpg'
+ CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
+
+ // CloudeventsEventTypeKey is the attribute Key conforming to the
+ // "cloudevents.event_type" semantic conventions. It represents the
+ // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+ // contains a value describing the type of event related to the originating
+ // occurrence.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'com.github.pull_request.opened',
+ // 'com.example.object.deleted.v2'
+ CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
+)
+
+// CloudeventsEventID returns an attribute KeyValue conforming to the
+// "cloudevents.event_id" semantic conventions. It represents the
+// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+// uniquely identifies the event.
+func CloudeventsEventID(val string) attribute.KeyValue {
+ return CloudeventsEventIDKey.String(val)
+}
+
+// CloudeventsEventSource returns an attribute KeyValue conforming to the
+// "cloudevents.event_source" semantic conventions. It represents the
+// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+// identifies the context in which an event happened.
+func CloudeventsEventSource(val string) attribute.KeyValue {
+ return CloudeventsEventSourceKey.String(val)
+}
+
+// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to
+// the "cloudevents.event_spec_version" semantic conventions. It represents the
+// [version of the CloudEvents
+// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+// which the event uses.
+func CloudeventsEventSpecVersion(val string) attribute.KeyValue {
+ return CloudeventsEventSpecVersionKey.String(val)
+}
+
+// CloudeventsEventSubject returns an attribute KeyValue conforming to the
+// "cloudevents.event_subject" semantic conventions. It represents the
+// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+// of the event in the context of the event producer (identified by source).
+func CloudeventsEventSubject(val string) attribute.KeyValue {
+ return CloudeventsEventSubjectKey.String(val)
+}
+
+// CloudeventsEventType returns an attribute KeyValue conforming to the
+// "cloudevents.event_type" semantic conventions. It represents the
+// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+// contains a value describing the type of event related to the originating
+// occurrence.
+func CloudeventsEventType(val string) attribute.KeyValue {
+ return CloudeventsEventTypeKey.String(val)
+}
+
+// Semantic conventions for the OpenTracing Shim
+const (
+ // OpentracingRefTypeKey is the attribute Key conforming to the
+ // "opentracing.ref_type" semantic conventions. It represents the
+ // parent-child Reference type
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: The causal relationship between a child Span and a parent Span.
+ OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
+)
+
+var (
+ // The parent Span depends on the child Span in some capacity
+ OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
+ // The parent Span doesn't depend in any way on the result of the child Span
+ OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
+)
+
+// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's
+// concepts.
+const (
+ // OTelStatusCodeKey is the attribute Key conforming to the
+ // "otel.status_code" semantic conventions. It represents the name of the
+ // code, either "OK" or "ERROR". MUST NOT be set if the status code is
+ // UNSET.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ OTelStatusCodeKey = attribute.Key("otel.status_code")
+
+ // OTelStatusDescriptionKey is the attribute Key conforming to the
+ // "otel.status_description" semantic conventions. It represents the
+ // description of the Status if it has a value, otherwise not set.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'resource not found'
+ OTelStatusDescriptionKey = attribute.Key("otel.status_description")
+)
+
+var (
+ // The operation has been validated by an Application developer or Operator to have completed successfully
+ OTelStatusCodeOk = OTelStatusCodeKey.String("OK")
+ // The operation contains an error
+ OTelStatusCodeError = OTelStatusCodeKey.String("ERROR")
+)
+
+// OTelStatusDescription returns an attribute KeyValue conforming to the
+// "otel.status_description" semantic conventions. It represents the
+// description of the Status if it has a value, otherwise not set.
+func OTelStatusDescription(val string) attribute.KeyValue {
+ return OTelStatusDescriptionKey.String(val)
+}
+
+// This semantic convention describes an instance of a function that runs
+// without provisioning or managing of servers (also known as serverless
+// functions or Function as a Service (FaaS)) with spans.
+const (
+ // FaaSInvocationIDKey is the attribute Key conforming to the
+ // "faas.invocation_id" semantic conventions. It represents the invocation
+ // ID of the current function invocation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
+ FaaSInvocationIDKey = attribute.Key("faas.invocation_id")
+)
+
+// FaaSInvocationID returns an attribute KeyValue conforming to the
+// "faas.invocation_id" semantic conventions. It represents the invocation ID
+// of the current function invocation.
+func FaaSInvocationID(val string) attribute.KeyValue {
+ return FaaSInvocationIDKey.String(val)
+}
+
+// Semantic Convention for FaaS triggered as a response to some data source
+// operation such as a database or filesystem read/write.
+const (
+ // FaaSDocumentCollectionKey is the attribute Key conforming to the
+ // "faas.document.collection" semantic conventions. It represents the name
+ // of the source on which the triggering operation was performed. For
+ // example, in Cloud Storage or S3 corresponds to the bucket name, and in
+ // Cosmos DB to the database name.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: 'myBucketName', 'myDBName'
+ FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
+
+ // FaaSDocumentNameKey is the attribute Key conforming to the
+ // "faas.document.name" semantic conventions. It represents the document
+ // name/table subjected to the operation. For example, in Cloud Storage or
+ // S3 is the name of the file, and in Cosmos DB the table name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myFile.txt', 'myTableName'
+ FaaSDocumentNameKey = attribute.Key("faas.document.name")
+
+ // FaaSDocumentOperationKey is the attribute Key conforming to the
+ // "faas.document.operation" semantic conventions. It represents the
+ // describes the type of the operation that was performed on the data.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: experimental
+ FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
+
+ // FaaSDocumentTimeKey is the attribute Key conforming to the
+ // "faas.document.time" semantic conventions. It represents a string
+ // containing the time when the data was accessed in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSDocumentTimeKey = attribute.Key("faas.document.time")
+)
+
+var (
+ // When a new object is created
+ FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
+ // When an object is modified
+ FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
+ // When an object is deleted
+ FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
+)
+
+// FaaSDocumentCollection returns an attribute KeyValue conforming to the
+// "faas.document.collection" semantic conventions. It represents the name of
+// the source on which the triggering operation was performed. For example, in
+// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
+// database name.
+func FaaSDocumentCollection(val string) attribute.KeyValue {
+ return FaaSDocumentCollectionKey.String(val)
+}
+
+// FaaSDocumentName returns an attribute KeyValue conforming to the
+// "faas.document.name" semantic conventions. It represents the document
+// name/table subjected to the operation. For example, in Cloud Storage or S3
+// is the name of the file, and in Cosmos DB the table name.
+func FaaSDocumentName(val string) attribute.KeyValue {
+ return FaaSDocumentNameKey.String(val)
+}
+
+// FaaSDocumentTime returns an attribute KeyValue conforming to the
+// "faas.document.time" semantic conventions. It represents a string containing
+// the time when the data was accessed in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSDocumentTime(val string) attribute.KeyValue {
+ return FaaSDocumentTimeKey.String(val)
+}
+
+// Semantic Convention for FaaS scheduled to be executed regularly.
+const (
+ // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
+ // conventions. It represents a string containing the schedule period as
+ // [Cron
+ // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '0/5 * * * ? *'
+ FaaSCronKey = attribute.Key("faas.cron")
+
+ // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
+ // conventions. It represents a string containing the function invocation
+ // time in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSTimeKey = attribute.Key("faas.time")
+)
+
+// FaaSCron returns an attribute KeyValue conforming to the "faas.cron"
+// semantic conventions. It represents a string containing the schedule period
+// as [Cron
+// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+func FaaSCron(val string) attribute.KeyValue {
+ return FaaSCronKey.String(val)
+}
+
+// FaaSTime returns an attribute KeyValue conforming to the "faas.time"
+// semantic conventions. It represents a string containing the function
+// invocation time in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSTime(val string) attribute.KeyValue {
+ return FaaSTimeKey.String(val)
+}
+
+// Contains additional attributes for incoming FaaS spans.
+const (
+ // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
+ // semantic conventions. It represents a boolean that is true if the
+ // serverless function is executed for the first time (aka cold-start).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ FaaSColdstartKey = attribute.Key("faas.coldstart")
+)
+
+// FaaSColdstart returns an attribute KeyValue conforming to the
+// "faas.coldstart" semantic conventions. It represents a boolean that is true
+// if the serverless function is executed for the first time (aka cold-start).
+func FaaSColdstart(val bool) attribute.KeyValue {
+ return FaaSColdstartKey.Bool(val)
+}
+
+// The `aws` conventions apply to operations using the AWS SDK. They map
+// request or response parameters in AWS SDK API calls to attributes on a Span.
+// The conventions have been collected over time based on feedback from AWS
+// users of tracing and will continue to evolve as new interesting conventions
+// are found.
+// Some descriptions are also provided for populating general OpenTelemetry
+// semantic conventions based on these APIs.
+const (
+ // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id"
+ // semantic conventions. It represents the AWS request ID as returned in
+ // the response headers `x-amz-request-id` or `x-amz-requestid`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ'
+ AWSRequestIDKey = attribute.Key("aws.request_id")
+)
+
+// AWSRequestID returns an attribute KeyValue conforming to the
+// "aws.request_id" semantic conventions. It represents the AWS request ID as
+// returned in the response headers `x-amz-request-id` or `x-amz-requestid`.
+func AWSRequestID(val string) attribute.KeyValue {
+ return AWSRequestIDKey.String(val)
+}
+
+// Attributes that exist for multiple DynamoDB request types.
+const (
+ // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
+ // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+ // value of the `AttributesToGet` request parameter.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'lives', 'id'
+ AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
+
+ // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
+ // "aws.dynamodb.consistent_read" semantic conventions. It represents the
+ // value of the `ConsistentRead` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
+
+ // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
+ // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+ // JSON-serialized value of each item in the `ConsumedCapacity` response
+ // field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": {
+ // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" :
+ // { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table":
+ // { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number }, "TableName": "string",
+ // "WriteCapacityUnits": number }'
+ AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
+
+ // AWSDynamoDBIndexNameKey is the attribute Key conforming to the
+ // "aws.dynamodb.index_name" semantic conventions. It represents the value
+ // of the `IndexName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'name_to_group'
+ AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
+
+ // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to
+ // the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+ // represents the JSON-serialized value of the `ItemCollectionMetrics`
+ // response field.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B":
+ // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": {
+ // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ],
+ // "NULL": boolean, "S": "string", "SS": [ "string" ] } },
+ // "SizeEstimateRangeGB": [ number ] } ] }'
+ AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
+
+ // AWSDynamoDBLimitKey is the attribute Key conforming to the
+ // "aws.dynamodb.limit" semantic conventions. It represents the value of
+ // the `Limit` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 10
+ AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
+
+ // AWSDynamoDBProjectionKey is the attribute Key conforming to the
+ // "aws.dynamodb.projection" semantic conventions. It represents the value
+ // of the `ProjectionExpression` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Title', 'Title, Price, Color', 'Title, Description,
+ // RelatedItems, ProductReviews'
+ AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
+
+ // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to
+ // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
+ // represents the value of the `ProvisionedThroughput.ReadCapacityUnits`
+ // request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
+
+ // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming
+ // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions.
+ // It represents the value of the
+ // `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
+
+ // AWSDynamoDBSelectKey is the attribute Key conforming to the
+ // "aws.dynamodb.select" semantic conventions. It represents the value of
+ // the `Select` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'ALL_ATTRIBUTES', 'COUNT'
+ AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
+
+ // AWSDynamoDBTableNamesKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_names" semantic conventions. It represents the keys
+ // in the `RequestItems` object field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Users', 'Cats'
+ AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
+)
+
+// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to
+// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+// value of the `AttributesToGet` request parameter.
+func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributesToGetKey.StringSlice(val)
+}
+
+// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
+// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
+// of the `ConsistentRead` request parameter.
+func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
+ return AWSDynamoDBConsistentReadKey.Bool(val)
+}
+
+// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to
+// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+// JSON-serialized value of each item in the `ConsumedCapacity` response field.
+func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
+ return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
+}
+
+// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
+// "aws.dynamodb.index_name" semantic conventions. It represents the value of
+// the `IndexName` request parameter.
+func AWSDynamoDBIndexName(val string) attribute.KeyValue {
+ return AWSDynamoDBIndexNameKey.String(val)
+}
+
+// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming
+// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+// represents the JSON-serialized value of the `ItemCollectionMetrics` response
+// field.
+func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
+ return AWSDynamoDBItemCollectionMetricsKey.String(val)
+}
+
+// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
+// "aws.dynamodb.limit" semantic conventions. It represents the value of the
+// `Limit` request parameter.
+func AWSDynamoDBLimit(val int) attribute.KeyValue {
+ return AWSDynamoDBLimitKey.Int(val)
+}
+
+// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
+// "aws.dynamodb.projection" semantic conventions. It represents the value of
+// the `ProjectionExpression` request parameter.
+func AWSDynamoDBProjection(val string) attribute.KeyValue {
+ return AWSDynamoDBProjectionKey.String(val)
+}
+
+// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.ReadCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
+// "aws.dynamodb.select" semantic conventions. It represents the value of the
+// `Select` request parameter.
+func AWSDynamoDBSelect(val string) attribute.KeyValue {
+ return AWSDynamoDBSelectKey.String(val)
+}
+
+// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_names" semantic conventions. It represents the keys in
+// the `RequestItems` object field.
+func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
+ return AWSDynamoDBTableNamesKey.StringSlice(val)
+}
+
+// DynamoDB.CreateTable
+const (
+ // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to
+ // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
+ // represents the JSON-serialized value of each item of the
+ // `GlobalSecondaryIndexes` request field
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName":
+ // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
+ // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
+
+ // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to
+ // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+ // represents the JSON-serialized value of each item of the
+ // `LocalSecondaryIndexes` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "IndexARN": "string", "IndexName": "string",
+ // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }'
+ AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
+)
+
+// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_indexes" semantic
+// conventions. It represents the JSON-serialized value of each item of the
+// `GlobalSecondaryIndexes` request field
+func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming
+// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+// represents the JSON-serialized value of each item of the
+// `LocalSecondaryIndexes` request field.
+func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
+}
+
+// DynamoDB.ListTables
+const (
+ // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
+ // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents
+ // the value of the `ExclusiveStartTableName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Users', 'CatsTable'
+ AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
+
+ // AWSDynamoDBTableCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_count" semantic conventions. It represents the the
+ // number of items in the `TableNames` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 20
+ AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
+)
+
+// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming
+// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It
+// represents the value of the `ExclusiveStartTableName` request parameter.
+func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
+ return AWSDynamoDBExclusiveStartTableKey.String(val)
+}
+
+// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_count" semantic conventions. It represents the the
+// number of items in the `TableNames` response parameter.
+func AWSDynamoDBTableCount(val int) attribute.KeyValue {
+ return AWSDynamoDBTableCountKey.Int(val)
+}
+
+// DynamoDB.Query
+const (
+ // AWSDynamoDBScanForwardKey is the attribute Key conforming to the
+ // "aws.dynamodb.scan_forward" semantic conventions. It represents the
+ // value of the `ScanIndexForward` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
+)
+
+// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
+// the `ScanIndexForward` request parameter.
+func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
+ return AWSDynamoDBScanForwardKey.Bool(val)
+}
+
+// DynamoDB.Scan
+const (
+ // AWSDynamoDBCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.count" semantic conventions. It represents the value of
+ // the `Count` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 10
+ AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
+
+ // AWSDynamoDBScannedCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.scanned_count" semantic conventions. It represents the
+ // value of the `ScannedCount` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 50
+ AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
+
+ // AWSDynamoDBSegmentKey is the attribute Key conforming to the
+ // "aws.dynamodb.segment" semantic conventions. It represents the value of
+ // the `Segment` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 10
+ AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
+
+ // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
+ // "aws.dynamodb.total_segments" semantic conventions. It represents the
+ // value of the `TotalSegments` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 100
+ AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
+)
+
+// AWSDynamoDBCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.count" semantic conventions. It represents the value of the
+// `Count` response parameter.
+func AWSDynamoDBCount(val int) attribute.KeyValue {
+ return AWSDynamoDBCountKey.Int(val)
+}
+
+// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scanned_count" semantic conventions. It represents the value
+// of the `ScannedCount` response parameter.
+func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
+ return AWSDynamoDBScannedCountKey.Int(val)
+}
+
+// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
+// "aws.dynamodb.segment" semantic conventions. It represents the value of the
+// `Segment` request parameter.
+func AWSDynamoDBSegment(val int) attribute.KeyValue {
+ return AWSDynamoDBSegmentKey.Int(val)
+}
+
+// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
+// "aws.dynamodb.total_segments" semantic conventions. It represents the value
+// of the `TotalSegments` request parameter.
+func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
+ return AWSDynamoDBTotalSegmentsKey.Int(val)
+}
+
+// DynamoDB.UpdateTable
+const (
+ // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to
+ // the "aws.dynamodb.attribute_definitions" semantic conventions. It
+ // represents the JSON-serialized value of each item in the
+ // `AttributeDefinitions` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
+ AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
+
+ // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key
+ // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+ // conventions. It represents the JSON-serialized value of each item in the
+ // the `GlobalSecondaryIndexUpdates` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
+ // "ProvisionedThroughput": { "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
+)
+
+// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming
+// to the "aws.dynamodb.attribute_definitions" semantic conventions. It
+// represents the JSON-serialized value of each item in the
+// `AttributeDefinitions` request field.
+func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+// conventions. It represents the JSON-serialized value of each item in the the
+// `GlobalSecondaryIndexUpdates` request field.
+func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
+}
+
+// Attributes that exist for S3 request types.
+const (
+ // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket"
+ // semantic conventions. It represents the S3 bucket name the request
+ // refers to. Corresponds to the `--bucket` parameter of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // operations.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'some-bucket-name'
+ // Note: The `bucket` attribute is applicable to all S3 operations that
+ // reference a bucket, i.e. that require the bucket name as a mandatory
+ // parameter.
+ // This applies to almost all S3 operations except `list-buckets`.
+ AWSS3BucketKey = attribute.Key("aws.s3.bucket")
+
+ // AWSS3CopySourceKey is the attribute Key conforming to the
+ // "aws.s3.copy_source" semantic conventions. It represents the source
+ // object (in the form `bucket`/`key`) for the copy operation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'someFile.yml'
+ // Note: The `copy_source` attribute applies to S3 copy operations and
+ // corresponds to the `--copy-source` parameter
+ // of the [copy-object operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html).
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source")
+
+ // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete"
+ // semantic conventions. It represents the delete request container that
+ // specifies the objects to be deleted.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean'
+ // Note: The `delete` attribute is only applicable to the
+ // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
+ // operation.
+ // The `delete` attribute corresponds to the `--delete` parameter of the
+ // [delete-objects operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html).
+ AWSS3DeleteKey = attribute.Key("aws.s3.delete")
+
+ // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic
+ // conventions. It represents the S3 object key the request refers to.
+ // Corresponds to the `--key` parameter of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // operations.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'someFile.yml'
+ // Note: The `key` attribute is applicable to all object-related S3
+ // operations, i.e. that require the object key as a mandatory parameter.
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
+ // -
+ // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
+ // -
+ // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html)
+ // -
+ // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html)
+ // -
+ // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html)
+ // -
+ // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html)
+ // -
+ // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html)
+ // -
+ // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
+ // -
+ // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
+ // -
+ // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html)
+ // -
+ // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
+ // -
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3KeyKey = attribute.Key("aws.s3.key")
+
+ // AWSS3PartNumberKey is the attribute Key conforming to the
+ // "aws.s3.part_number" semantic conventions. It represents the part number
+ // of the part being uploaded in a multipart-upload operation. This is a
+ // positive integer between 1 and 10,000.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3456
+ // Note: The `part_number` attribute is only applicable to the
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // and
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ // operations.
+ // The `part_number` attribute corresponds to the `--part-number` parameter
+ // of the
+ // [upload-part operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html).
+ AWSS3PartNumberKey = attribute.Key("aws.s3.part_number")
+
+ // AWSS3UploadIDKey is the attribute Key conforming to the
+ // "aws.s3.upload_id" semantic conventions. It represents the upload ID
+ // that identifies the multipart upload.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ'
+ // Note: The `upload_id` attribute applies to S3 multipart-upload
+ // operations and corresponds to the `--upload-id` parameter
+ // of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // multipart operations.
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
+ // -
+ // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
+ // -
+ // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
+ // -
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id")
+)
+
+// AWSS3Bucket returns an attribute KeyValue conforming to the
+// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the
+// request refers to. Corresponds to the `--bucket` parameter of the [S3
+// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+// operations.
+func AWSS3Bucket(val string) attribute.KeyValue {
+ return AWSS3BucketKey.String(val)
+}
+
+// AWSS3CopySource returns an attribute KeyValue conforming to the
+// "aws.s3.copy_source" semantic conventions. It represents the source object
+// (in the form `bucket`/`key`) for the copy operation.
+func AWSS3CopySource(val string) attribute.KeyValue {
+ return AWSS3CopySourceKey.String(val)
+}
+
+// AWSS3Delete returns an attribute KeyValue conforming to the
+// "aws.s3.delete" semantic conventions. It represents the delete request
+// container that specifies the objects to be deleted.
+func AWSS3Delete(val string) attribute.KeyValue {
+ return AWSS3DeleteKey.String(val)
+}
+
+// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key"
+// semantic conventions. It represents the S3 object key the request refers to.
+// Corresponds to the `--key` parameter of the [S3
+// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+// operations.
+func AWSS3Key(val string) attribute.KeyValue {
+ return AWSS3KeyKey.String(val)
+}
+
+// AWSS3PartNumber returns an attribute KeyValue conforming to the
+// "aws.s3.part_number" semantic conventions. It represents the part number of
+// the part being uploaded in a multipart-upload operation. This is a positive
+// integer between 1 and 10,000.
+func AWSS3PartNumber(val int) attribute.KeyValue {
+ return AWSS3PartNumberKey.Int(val)
+}
+
+// AWSS3UploadID returns an attribute KeyValue conforming to the
+// "aws.s3.upload_id" semantic conventions. It represents the upload ID that
+// identifies the multipart upload.
+func AWSS3UploadID(val string) attribute.KeyValue {
+ return AWSS3UploadIDKey.String(val)
+}
+
+// Semantic conventions to apply when instrumenting the GraphQL implementation.
+// They map GraphQL operations to attributes on a Span.
+const (
+ // GraphqlDocumentKey is the attribute Key conforming to the
+ // "graphql.document" semantic conventions. It represents the GraphQL
+ // document being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'query findBookByID { bookByID(id: ?) { name } }'
+ // Note: The value may be sanitized to exclude sensitive information.
+ GraphqlDocumentKey = attribute.Key("graphql.document")
+
+ // GraphqlOperationNameKey is the attribute Key conforming to the
+ // "graphql.operation.name" semantic conventions. It represents the name of
+ // the operation being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'findBookByID'
+ GraphqlOperationNameKey = attribute.Key("graphql.operation.name")
+
+ // GraphqlOperationTypeKey is the attribute Key conforming to the
+ // "graphql.operation.type" semantic conventions. It represents the type of
+ // the operation being executed.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'query', 'mutation', 'subscription'
+ GraphqlOperationTypeKey = attribute.Key("graphql.operation.type")
+)
+
+var (
+ // GraphQL query
+ GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query")
+ // GraphQL mutation
+ GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation")
+ // GraphQL subscription
+ GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription")
+)
+
+// GraphqlDocument returns an attribute KeyValue conforming to the
+// "graphql.document" semantic conventions. It represents the GraphQL document
+// being executed.
+func GraphqlDocument(val string) attribute.KeyValue {
+ return GraphqlDocumentKey.String(val)
+}
+
+// GraphqlOperationName returns an attribute KeyValue conforming to the
+// "graphql.operation.name" semantic conventions. It represents the name of the
+// operation being executed.
+func GraphqlOperationName(val string) attribute.KeyValue {
+ return GraphqlOperationNameKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go
index d1e47ca2f..db936ba5b 100644
--- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go
+++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go
@@ -17,20 +17,14 @@ package trace // import "go.opentelemetry.io/otel/trace"
import (
"encoding/json"
"fmt"
- "regexp"
"strings"
)
const (
maxListMembers = 32
- listDelimiter = ","
-
- // based on the W3C Trace Context specification, see
- // https://www.w3.org/TR/trace-context-1/#tracestate-header
- noTenantKeyFormat = `[a-z][_0-9a-z\-\*\/]*`
- withTenantKeyFormat = `[a-z0-9][_0-9a-z\-\*\/]*@[a-z][_0-9a-z\-\*\/]*`
- valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]*[\x21-\x2b\x2d-\x3c\x3e-\x7e]`
+ listDelimiters = ","
+ memberDelimiter = "="
errInvalidKey errorConst = "invalid tracestate key"
errInvalidValue errorConst = "invalid tracestate value"
@@ -39,43 +33,128 @@ const (
errDuplicate errorConst = "duplicate list-member in tracestate"
)
-var (
- noTenantKeyRe = regexp.MustCompile(`^` + noTenantKeyFormat + `$`)
- withTenantKeyRe = regexp.MustCompile(`^` + withTenantKeyFormat + `$`)
- valueRe = regexp.MustCompile(`^` + valueFormat + `$`)
- memberRe = regexp.MustCompile(`^\s*((?:` + noTenantKeyFormat + `)|(?:` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`)
-)
-
type member struct {
Key string
Value string
}
-func newMember(key, value string) (member, error) {
- if len(key) > 256 {
- return member{}, fmt.Errorf("%w: %s", errInvalidKey, key)
+// according to (chr = %x20 / (nblk-char = %x21-2B / %x2D-3C / %x3E-7E) )
+// means (chr = %x20-2B / %x2D-3C / %x3E-7E) .
+func checkValueChar(v byte) bool {
+ return v >= '\x20' && v <= '\x7e' && v != '\x2c' && v != '\x3d'
+}
+
+// according to (nblk-chr = %x21-2B / %x2D-3C / %x3E-7E) .
+func checkValueLast(v byte) bool {
+ return v >= '\x21' && v <= '\x7e' && v != '\x2c' && v != '\x3d'
+}
+
+// based on the W3C Trace Context specification
+//
+// value = (0*255(chr)) nblk-chr
+// nblk-chr = %x21-2B / %x2D-3C / %x3E-7E
+// chr = %x20 / nblk-chr
+//
+// see https://www.w3.org/TR/trace-context-1/#value
+func checkValue(val string) bool {
+ n := len(val)
+ if n == 0 || n > 256 {
+ return false
+ }
+ for i := 0; i < n-1; i++ {
+ if !checkValueChar(val[i]) {
+ return false
+ }
}
- if !noTenantKeyRe.MatchString(key) {
- if !withTenantKeyRe.MatchString(key) {
- return member{}, fmt.Errorf("%w: %s", errInvalidKey, key)
+ return checkValueLast(val[n-1])
+}
+
+func checkKeyRemain(key string) bool {
+ // ( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )
+ for _, v := range key {
+ if isAlphaNum(byte(v)) {
+ continue
}
- atIndex := strings.LastIndex(key, "@")
- if atIndex > 241 || len(key)-1-atIndex > 14 {
- return member{}, fmt.Errorf("%w: %s", errInvalidKey, key)
+ switch v {
+ case '_', '-', '*', '/':
+ continue
}
+ return false
+ }
+ return true
+}
+
+// according to
+//
+// simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
+// system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
+//
+// param n is remain part length, should be 255 in simple-key or 13 in system-id.
+func checkKeyPart(key string, n int) bool {
+ if len(key) == 0 {
+ return false
+ }
+ first := key[0] // key's first char
+ ret := len(key[1:]) <= n
+ ret = ret && first >= 'a' && first <= 'z'
+ return ret && checkKeyRemain(key[1:])
+}
+
+func isAlphaNum(c byte) bool {
+ if c >= 'a' && c <= 'z' {
+ return true
}
- if len(value) > 256 || !valueRe.MatchString(value) {
- return member{}, fmt.Errorf("%w: %s", errInvalidValue, value)
+ return c >= '0' && c <= '9'
+}
+
+// according to
+//
+// tenant-id = ( lcalpha / DIGIT ) 0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )
+//
+// param n is remain part length, should be 240 exactly.
+func checkKeyTenant(key string, n int) bool {
+ if len(key) == 0 {
+ return false
+ }
+ return isAlphaNum(key[0]) && len(key[1:]) <= n && checkKeyRemain(key[1:])
+}
+
+// based on the W3C Trace Context specification
+//
+// key = simple-key / multi-tenant-key
+// simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
+// multi-tenant-key = tenant-id "@" system-id
+// tenant-id = ( lcalpha / DIGIT ) (0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
+// system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
+// lcalpha = %x61-7A ; a-z
+//
+// see https://www.w3.org/TR/trace-context-1/#tracestate-header.
+func checkKey(key string) bool {
+ tenant, system, ok := strings.Cut(key, "@")
+ if !ok {
+ return checkKeyPart(key, 255)
+ }
+ return checkKeyTenant(tenant, 240) && checkKeyPart(system, 13)
+}
+
+func newMember(key, value string) (member, error) {
+ if !checkKey(key) {
+ return member{}, errInvalidKey
+ }
+ if !checkValue(value) {
+ return member{}, errInvalidValue
}
return member{Key: key, Value: value}, nil
}
func parseMember(m string) (member, error) {
- matches := memberRe.FindStringSubmatch(m)
- if len(matches) != 3 {
+ key, val, ok := strings.Cut(m, memberDelimiter)
+ if !ok {
return member{}, fmt.Errorf("%w: %s", errInvalidMember, m)
}
- result, e := newMember(matches[1], matches[2])
+ key = strings.TrimLeft(key, " \t")
+ val = strings.TrimRight(val, " \t")
+ result, e := newMember(key, val)
if e != nil {
return member{}, fmt.Errorf("%w: %s", errInvalidMember, m)
}
@@ -85,7 +164,7 @@ func parseMember(m string) (member, error) {
// String encodes member into a string compliant with the W3C Trace Context
// specification.
func (m member) String() string {
- return fmt.Sprintf("%s=%s", m.Key, m.Value)
+ return m.Key + "=" + m.Value
}
// TraceState provides additional vendor-specific trace identification
@@ -109,8 +188,8 @@ var _ json.Marshaler = TraceState{}
// ParseTraceState attempts to decode a TraceState from the passed
// string. It returns an error if the input is invalid according to the W3C
// Trace Context specification.
-func ParseTraceState(tracestate string) (TraceState, error) {
- if tracestate == "" {
+func ParseTraceState(ts string) (TraceState, error) {
+ if ts == "" {
return TraceState{}, nil
}
@@ -120,7 +199,9 @@ func ParseTraceState(tracestate string) (TraceState, error) {
var members []member
found := make(map[string]struct{})
- for _, memberStr := range strings.Split(tracestate, listDelimiter) {
+ for ts != "" {
+ var memberStr string
+ memberStr, ts, _ = strings.Cut(ts, listDelimiters)
if len(memberStr) == 0 {
continue
}
@@ -153,11 +234,29 @@ func (ts TraceState) MarshalJSON() ([]byte, error) {
// Trace Context specification. The returned string will be invalid if the
// TraceState contains any invalid members.
func (ts TraceState) String() string {
- members := make([]string, len(ts.list))
- for i, m := range ts.list {
- members[i] = m.String()
+ if len(ts.list) == 0 {
+ return ""
+ }
+ var n int
+ n += len(ts.list) // member delimiters: '='
+ n += len(ts.list) - 1 // list delimiters: ','
+ for _, mem := range ts.list {
+ n += len(mem.Key)
+ n += len(mem.Value)
}
- return strings.Join(members, listDelimiter)
+
+ var sb strings.Builder
+ sb.Grow(n)
+ _, _ = sb.WriteString(ts.list[0].Key)
+ _ = sb.WriteByte('=')
+ _, _ = sb.WriteString(ts.list[0].Value)
+ for i := 1; i < len(ts.list); i++ {
+ _ = sb.WriteByte(listDelimiters[0])
+ _, _ = sb.WriteString(ts.list[i].Key)
+ _ = sb.WriteByte('=')
+ _, _ = sb.WriteString(ts.list[i].Value)
+ }
+ return sb.String()
}
// Get returns the value paired with key from the corresponding TraceState
@@ -189,15 +288,25 @@ func (ts TraceState) Insert(key, value string) (TraceState, error) {
if err != nil {
return ts, err
}
-
- cTS := ts.Delete(key)
- if cTS.Len()+1 <= maxListMembers {
- cTS.list = append(cTS.list, member{})
+ n := len(ts.list)
+ found := n
+ for i := range ts.list {
+ if ts.list[i].Key == key {
+ found = i
+ }
+ }
+ cTS := TraceState{}
+ if found == n && n < maxListMembers {
+ cTS.list = make([]member, n+1)
+ } else {
+ cTS.list = make([]member, n)
}
- // When the number of members exceeds capacity, drop the "right-most".
- copy(cTS.list[1:], cTS.list)
cTS.list[0] = m
-
+ // When the number of members exceeds capacity, drop the "right-most".
+ copy(cTS.list[1:], ts.list[0:found])
+ if found < n {
+ copy(cTS.list[1+found:], ts.list[found+1:])
+ }
return cTS, nil
}
diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go
index 5a92f1d4b..7b2993a1f 100644
--- a/vendor/go.opentelemetry.io/otel/version.go
+++ b/vendor/go.opentelemetry.io/otel/version.go
@@ -16,5 +16,5 @@ package otel // import "go.opentelemetry.io/otel"
// Version is the current release version of OpenTelemetry in use.
func Version() string {
- return "1.20.0"
+ return "1.24.0"
}
diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml
index 82366e799..1b556e678 100644
--- a/vendor/go.opentelemetry.io/otel/versions.yaml
+++ b/vendor/go.opentelemetry.io/otel/versions.yaml
@@ -14,20 +14,25 @@
module-sets:
stable-v1:
- version: v1.20.0
+ version: v1.24.0
modules:
- go.opentelemetry.io/otel
+ - go.opentelemetry.io/otel/bridge/opencensus
+ - go.opentelemetry.io/otel/bridge/opencensus/test
- go.opentelemetry.io/otel/bridge/opentracing
- go.opentelemetry.io/otel/bridge/opentracing/test
- go.opentelemetry.io/otel/example/dice
- - go.opentelemetry.io/otel/example/fib
- go.opentelemetry.io/otel/example/namedtracer
+ - go.opentelemetry.io/otel/example/opencensus
- go.opentelemetry.io/otel/example/otel-collector
- go.opentelemetry.io/otel/example/passthrough
- go.opentelemetry.io/otel/example/zipkin
+ - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc
+ - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp
- go.opentelemetry.io/otel/exporters/otlp/otlptrace
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp
+ - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric
- go.opentelemetry.io/otel/exporters/stdout/stdouttrace
- go.opentelemetry.io/otel/exporters/zipkin
- go.opentelemetry.io/otel/metric
@@ -35,18 +40,14 @@ module-sets:
- go.opentelemetry.io/otel/sdk/metric
- go.opentelemetry.io/otel/trace
experimental-metrics:
- version: v0.43.0
+ version: v0.46.0
modules:
- - go.opentelemetry.io/otel/bridge/opencensus
- - go.opentelemetry.io/otel/bridge/opencensus/test
- - go.opentelemetry.io/otel/example/opencensus
- go.opentelemetry.io/otel/example/prometheus
- - go.opentelemetry.io/otel/example/view
- - go.opentelemetry.io/otel/exporters/otlp/otlpmetric
- - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc
- - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp
- go.opentelemetry.io/otel/exporters/prometheus
- - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric
+ experimental-logs:
+ version: v0.0.1-alpha
+ modules:
+ - go.opentelemetry.io/otel/log
experimental-schema:
version: v0.0.7
modules:
diff --git a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go
index 51a499816..710908837 100644
--- a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go
+++ b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go
@@ -36,6 +36,69 @@ const (
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
+// SpanFlags represents constants used to interpret the
+// Span.flags field, which is protobuf 'fixed32' type and is to
+// be used as bit-fields. Each non-zero value defined in this enum is
+// a bit-mask. To extract the bit-field, for example, use an
+// expression like:
+//
+// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK)
+//
+// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
+//
+// Note that Span flags were introduced in version 1.1 of the
+// OpenTelemetry protocol. Older Span producers do not set this
+// field, consequently consumers should not rely on the absence of a
+// particular flag bit to indicate the presence of a particular feature.
+type SpanFlags int32
+
+const (
+ // The zero value for the enum. Should not be used for comparisons.
+ // Instead use bitwise "and" with the appropriate mask as shown above.
+ SpanFlags_SPAN_FLAGS_DO_NOT_USE SpanFlags = 0
+ // Bits 0-7 are used for trace flags.
+ SpanFlags_SPAN_FLAGS_TRACE_FLAGS_MASK SpanFlags = 255
+)
+
+// Enum value maps for SpanFlags.
+var (
+ SpanFlags_name = map[int32]string{
+ 0: "SPAN_FLAGS_DO_NOT_USE",
+ 255: "SPAN_FLAGS_TRACE_FLAGS_MASK",
+ }
+ SpanFlags_value = map[string]int32{
+ "SPAN_FLAGS_DO_NOT_USE": 0,
+ "SPAN_FLAGS_TRACE_FLAGS_MASK": 255,
+ }
+)
+
+func (x SpanFlags) Enum() *SpanFlags {
+ p := new(SpanFlags)
+ *p = x
+ return p
+}
+
+func (x SpanFlags) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SpanFlags) Descriptor() protoreflect.EnumDescriptor {
+ return file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[0].Descriptor()
+}
+
+func (SpanFlags) Type() protoreflect.EnumType {
+ return &file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[0]
+}
+
+func (x SpanFlags) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use SpanFlags.Descriptor instead.
+func (SpanFlags) EnumDescriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{0}
+}
+
// SpanKind is the type of span. Can be used to specify additional relationships between spans
// in addition to a parent/child relationship.
type Span_SpanKind int32
@@ -94,11 +157,11 @@ func (x Span_SpanKind) String() string {
}
func (Span_SpanKind) Descriptor() protoreflect.EnumDescriptor {
- return file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[0].Descriptor()
+ return file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[1].Descriptor()
}
func (Span_SpanKind) Type() protoreflect.EnumType {
- return &file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[0]
+ return &file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[1]
}
func (x Span_SpanKind) Number() protoreflect.EnumNumber {
@@ -149,11 +212,11 @@ func (x Status_StatusCode) String() string {
}
func (Status_StatusCode) Descriptor() protoreflect.EnumDescriptor {
- return file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[1].Descriptor()
+ return file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[2].Descriptor()
}
func (Status_StatusCode) Type() protoreflect.EnumType {
- return &file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[1]
+ return &file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[2]
}
func (x Status_StatusCode) Number() protoreflect.EnumNumber {
@@ -238,6 +301,9 @@ type ResourceSpans struct {
Resource *v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"`
// A list of ScopeSpans that originate from a resource.
ScopeSpans []*ScopeSpans `protobuf:"bytes,2,rep,name=scope_spans,json=scopeSpans,proto3" json:"scope_spans,omitempty"`
+ // The Schema URL, if known. This is the identifier of the Schema that the resource data
+ // is recorded in. To learn more about Schema URL see
+ // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
// This schema_url applies to the data in the "resource" field. It does not apply
// to the data in the "scope_spans" field which have their own schema_url field.
SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
@@ -308,6 +374,9 @@ type ScopeSpans struct {
Scope *v11.InstrumentationScope `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope,omitempty"`
// A list of Spans that originate from an instrumentation scope.
Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"`
+ // The Schema URL, if known. This is the identifier of the Schema that the span data
+ // is recorded in. To learn more about Schema URL see
+ // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
// This schema_url applies to all spans and span events in the "spans" field.
SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
}
@@ -394,6 +463,21 @@ type Span struct {
// The `span_id` of this span's parent span. If this is a root span, then this
// field must be empty. The ID is an 8-byte array.
ParentSpanId []byte `protobuf:"bytes,4,opt,name=parent_span_id,json=parentSpanId,proto3" json:"parent_span_id,omitempty"`
+ // Flags, a bit field. 8 least significant bits are the trace
+ // flags as defined in W3C Trace Context specification. Readers
+ // MUST not assume that 24 most significant bits will be zero.
+ // To read the 8-bit W3C trace flag, use `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`.
+ //
+ // When creating span messages, if the message is logically forwarded from another source
+ // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD
+ // be copied as-is. If creating from a source that does not have an equivalent flags field
+ // (such as a runtime representation of an OpenTelemetry span), the high 24 bits MUST
+ // be set to zero.
+ //
+ // [Optional].
+ //
+ // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
+ Flags uint32 `protobuf:"fixed32,16,opt,name=flags,proto3" json:"flags,omitempty"`
// A description of the span's operation.
//
// For example, the name can be a qualified method name or a file name
@@ -517,6 +601,13 @@ func (x *Span) GetParentSpanId() []byte {
return nil
}
+func (x *Span) GetFlags() uint32 {
+ if x != nil {
+ return x.Flags
+ }
+ return 0
+}
+
func (x *Span) GetName() string {
if x != nil {
return x.Name
@@ -757,6 +848,15 @@ type Span_Link struct {
// dropped_attributes_count is the number of dropped attributes. If the value is 0,
// then no attributes were dropped.
DroppedAttributesCount uint32 `protobuf:"varint,5,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
+ // Flags, a bit field. 8 least significant bits are the trace
+ // flags as defined in W3C Trace Context specification. Readers
+ // MUST not assume that 24 most significant bits will be zero.
+ // When creating new spans, the most-significant 24-bits MUST be
+ // zero. To read the 8-bit W3C trace flag (use flags &
+ // SPAN_FLAGS_TRACE_FLAGS_MASK). [Optional].
+ //
+ // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
+ Flags uint32 `protobuf:"fixed32,6,opt,name=flags,proto3" json:"flags,omitempty"`
}
func (x *Span_Link) Reset() {
@@ -826,6 +926,13 @@ func (x *Span_Link) GetDroppedAttributesCount() uint32 {
return 0
}
+func (x *Span_Link) GetFlags() uint32 {
+ if x != nil {
+ return x.Flags
+ }
+ return 0
+}
+
var File_opentelemetry_proto_trace_v1_trace_proto protoreflect.FileDescriptor
var file_opentelemetry_proto_trace_v1_trace_proto_rawDesc = []byte{
@@ -869,7 +976,7 @@ var file_opentelemetry_proto_trace_v1_trace_proto_rawDesc = []byte{
0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x05,
0x73, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f,
0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d,
- 0x61, 0x55, 0x72, 0x6c, 0x22, 0x9c, 0x0a, 0x0a, 0x04, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x19, 0x0a,
+ 0x61, 0x55, 0x72, 0x6c, 0x22, 0xc8, 0x0a, 0x0a, 0x04, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x19, 0x0a,
0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52,
0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x70, 0x61, 0x6e,
0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49,
@@ -877,101 +984,108 @@ var file_opentelemetry_proto_trace_v1_trace_proto_rawDesc = []byte{
0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x72, 0x61, 0x63, 0x65, 0x53, 0x74, 0x61,
0x74, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x70, 0x61,
0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x65,
- 0x6e, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
- 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x04,
- 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65,
- 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x53,
- 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x2f, 0x0a,
- 0x14, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78,
- 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x07, 0x20, 0x01, 0x28, 0x06, 0x52, 0x11, 0x73, 0x74, 0x61,
- 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x2b,
- 0x0a, 0x12, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f,
- 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x08, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0f, 0x65, 0x6e, 0x64, 0x54,
- 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x47, 0x0a, 0x0a, 0x61,
- 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e,
- 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62,
- 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f,
- 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74,
- 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41,
- 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x40,
- 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28,
- 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70,
- 0x61, 0x6e, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73,
- 0x12, 0x30, 0x0a, 0x14, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e,
- 0x74, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12,
- 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x43, 0x6f, 0x75,
- 0x6e, 0x74, 0x12, 0x3d, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28,
- 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72,
- 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31,
- 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b,
- 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x6c, 0x69, 0x6e,
- 0x6b, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11,
- 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x4c, 0x69, 0x6e, 0x6b, 0x73, 0x43, 0x6f, 0x75, 0x6e,
- 0x74, 0x12, 0x3c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72,
- 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31,
- 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x1a,
- 0xc4, 0x01, 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x69, 0x6d,
- 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x06, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12,
- 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
- 0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65,
- 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
- 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f,
- 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65,
- 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18,
- 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74,
- 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16,
- 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65,
- 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0xde, 0x01, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12,
- 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0c, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x70,
- 0x61, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x70, 0x61,
- 0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61,
- 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x72, 0x61, 0x63, 0x65, 0x53,
- 0x74, 0x61, 0x74, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74,
- 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74,
- 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63,
- 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75,
- 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a,
- 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75,
- 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52,
- 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74,
- 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x99, 0x01, 0x0a, 0x08, 0x53, 0x70, 0x61, 0x6e,
- 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e,
- 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12,
- 0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x49, 0x4e, 0x54,
- 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x50, 0x41, 0x4e, 0x5f,
- 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x12, 0x14, 0x0a,
- 0x10, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e,
- 0x54, 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44,
- 0x5f, 0x50, 0x52, 0x4f, 0x44, 0x55, 0x43, 0x45, 0x52, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x53,
- 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4f, 0x4e, 0x53, 0x55, 0x4d, 0x45,
- 0x52, 0x10, 0x05, 0x22, 0xbd, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18,
- 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x43, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c,
- 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61,
- 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x53, 0x74, 0x61,
- 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x22, 0x4e, 0x0a,
- 0x0a, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53,
- 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x45, 0x54,
- 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44,
- 0x45, 0x5f, 0x4f, 0x4b, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53,
- 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x4a, 0x04, 0x08,
- 0x01, 0x10, 0x02, 0x42, 0x77, 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
+ 0x6e, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67,
+ 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x07, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x12,
+ 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e,
+ 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e,
+ 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x04, 0x6b,
+ 0x69, 0x6e, 0x64, 0x12, 0x2f, 0x0a, 0x14, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d,
+ 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x07, 0x20, 0x01, 0x28,
+ 0x06, 0x52, 0x11, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78,
+ 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x2b, 0x0a, 0x12, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65,
+ 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x08, 0x20, 0x01, 0x28, 0x06,
+ 0x52, 0x0f, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e,
+ 0x6f, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18,
+ 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65,
+ 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d,
+ 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a,
+ 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72,
+ 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73,
+ 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72,
+ 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43,
+ 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x40, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x0b,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d,
+ 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65,
+ 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06,
+ 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65,
+ 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0c,
+ 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x45, 0x76, 0x65,
+ 0x6e, 0x74, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3d, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b,
+ 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
+ 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72,
+ 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x4c, 0x69, 0x6e, 0x6b,
+ 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x64, 0x72, 0x6f, 0x70, 0x70,
+ 0x65, 0x64, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e,
+ 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x4c, 0x69, 0x6e,
+ 0x6b, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72,
- 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x72, 0x6f,
- 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x27, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c,
- 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
- 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0xaa, 0x02, 0x1c,
- 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x72,
- 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x33,
+ 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x1a, 0xc4, 0x01, 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12,
+ 0x24, 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e,
+ 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69,
+ 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74,
+ 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e,
+ 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65,
+ 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74,
+ 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74,
+ 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74,
+ 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0xf4, 0x01, 0x0a,
+ 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69,
+ 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64,
+ 0x12, 0x17, 0x0a, 0x07, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0c, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x72, 0x61,
+ 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a,
+ 0x74, 0x72, 0x61, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74,
+ 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27,
+ 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b,
+ 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75,
+ 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61,
+ 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74,
+ 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a,
+ 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x07, 0x52, 0x05, 0x66, 0x6c,
+ 0x61, 0x67, 0x73, 0x22, 0x99, 0x01, 0x0a, 0x08, 0x53, 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64,
+ 0x12, 0x19, 0x0a, 0x15, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x55, 0x4e,
+ 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x53,
+ 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41,
+ 0x4c, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44,
+ 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x50, 0x41,
+ 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x10, 0x03, 0x12,
+ 0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x50, 0x52, 0x4f,
+ 0x44, 0x55, 0x43, 0x45, 0x52, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f,
+ 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4f, 0x4e, 0x53, 0x55, 0x4d, 0x45, 0x52, 0x10, 0x05, 0x22,
+ 0xbd, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65,
+ 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73,
+ 0x73, 0x61, 0x67, 0x65, 0x12, 0x43, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74,
+ 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76,
+ 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43,
+ 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x22, 0x4e, 0x0a, 0x0a, 0x53, 0x74, 0x61,
+ 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x55,
+ 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x45, 0x54, 0x10, 0x00, 0x12, 0x12,
+ 0x0a, 0x0e, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x4f, 0x4b,
+ 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44,
+ 0x45, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x2a,
+ 0x48, 0x0a, 0x09, 0x53, 0x70, 0x61, 0x6e, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x19, 0x0a, 0x15,
+ 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x44, 0x4f, 0x5f, 0x4e, 0x4f,
+ 0x54, 0x5f, 0x55, 0x53, 0x45, 0x10, 0x00, 0x12, 0x20, 0x0a, 0x1b, 0x53, 0x50, 0x41, 0x4e, 0x5f,
+ 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x54, 0x52, 0x41, 0x43, 0x45, 0x5f, 0x46, 0x4c, 0x41, 0x47,
+ 0x53, 0x5f, 0x4d, 0x41, 0x53, 0x4b, 0x10, 0xff, 0x01, 0x42, 0x77, 0x0a, 0x1f, 0x69, 0x6f, 0x2e,
+ 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0a, 0x54, 0x72,
+ 0x61, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x27, 0x67, 0x6f, 0x2e, 0x6f,
+ 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65,
+ 0x2f, 0x76, 0x31, 0xaa, 0x02, 0x1c, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65,
+ 0x74, 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x2e,
+ 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -986,36 +1100,37 @@ func file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP() []byte {
return file_opentelemetry_proto_trace_v1_trace_proto_rawDescData
}
-var file_opentelemetry_proto_trace_v1_trace_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
+var file_opentelemetry_proto_trace_v1_trace_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
var file_opentelemetry_proto_trace_v1_trace_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
var file_opentelemetry_proto_trace_v1_trace_proto_goTypes = []interface{}{
- (Span_SpanKind)(0), // 0: opentelemetry.proto.trace.v1.Span.SpanKind
- (Status_StatusCode)(0), // 1: opentelemetry.proto.trace.v1.Status.StatusCode
- (*TracesData)(nil), // 2: opentelemetry.proto.trace.v1.TracesData
- (*ResourceSpans)(nil), // 3: opentelemetry.proto.trace.v1.ResourceSpans
- (*ScopeSpans)(nil), // 4: opentelemetry.proto.trace.v1.ScopeSpans
- (*Span)(nil), // 5: opentelemetry.proto.trace.v1.Span
- (*Status)(nil), // 6: opentelemetry.proto.trace.v1.Status
- (*Span_Event)(nil), // 7: opentelemetry.proto.trace.v1.Span.Event
- (*Span_Link)(nil), // 8: opentelemetry.proto.trace.v1.Span.Link
- (*v1.Resource)(nil), // 9: opentelemetry.proto.resource.v1.Resource
- (*v11.InstrumentationScope)(nil), // 10: opentelemetry.proto.common.v1.InstrumentationScope
- (*v11.KeyValue)(nil), // 11: opentelemetry.proto.common.v1.KeyValue
+ (SpanFlags)(0), // 0: opentelemetry.proto.trace.v1.SpanFlags
+ (Span_SpanKind)(0), // 1: opentelemetry.proto.trace.v1.Span.SpanKind
+ (Status_StatusCode)(0), // 2: opentelemetry.proto.trace.v1.Status.StatusCode
+ (*TracesData)(nil), // 3: opentelemetry.proto.trace.v1.TracesData
+ (*ResourceSpans)(nil), // 4: opentelemetry.proto.trace.v1.ResourceSpans
+ (*ScopeSpans)(nil), // 5: opentelemetry.proto.trace.v1.ScopeSpans
+ (*Span)(nil), // 6: opentelemetry.proto.trace.v1.Span
+ (*Status)(nil), // 7: opentelemetry.proto.trace.v1.Status
+ (*Span_Event)(nil), // 8: opentelemetry.proto.trace.v1.Span.Event
+ (*Span_Link)(nil), // 9: opentelemetry.proto.trace.v1.Span.Link
+ (*v1.Resource)(nil), // 10: opentelemetry.proto.resource.v1.Resource
+ (*v11.InstrumentationScope)(nil), // 11: opentelemetry.proto.common.v1.InstrumentationScope
+ (*v11.KeyValue)(nil), // 12: opentelemetry.proto.common.v1.KeyValue
}
var file_opentelemetry_proto_trace_v1_trace_proto_depIdxs = []int32{
- 3, // 0: opentelemetry.proto.trace.v1.TracesData.resource_spans:type_name -> opentelemetry.proto.trace.v1.ResourceSpans
- 9, // 1: opentelemetry.proto.trace.v1.ResourceSpans.resource:type_name -> opentelemetry.proto.resource.v1.Resource
- 4, // 2: opentelemetry.proto.trace.v1.ResourceSpans.scope_spans:type_name -> opentelemetry.proto.trace.v1.ScopeSpans
- 10, // 3: opentelemetry.proto.trace.v1.ScopeSpans.scope:type_name -> opentelemetry.proto.common.v1.InstrumentationScope
- 5, // 4: opentelemetry.proto.trace.v1.ScopeSpans.spans:type_name -> opentelemetry.proto.trace.v1.Span
- 0, // 5: opentelemetry.proto.trace.v1.Span.kind:type_name -> opentelemetry.proto.trace.v1.Span.SpanKind
- 11, // 6: opentelemetry.proto.trace.v1.Span.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
- 7, // 7: opentelemetry.proto.trace.v1.Span.events:type_name -> opentelemetry.proto.trace.v1.Span.Event
- 8, // 8: opentelemetry.proto.trace.v1.Span.links:type_name -> opentelemetry.proto.trace.v1.Span.Link
- 6, // 9: opentelemetry.proto.trace.v1.Span.status:type_name -> opentelemetry.proto.trace.v1.Status
- 1, // 10: opentelemetry.proto.trace.v1.Status.code:type_name -> opentelemetry.proto.trace.v1.Status.StatusCode
- 11, // 11: opentelemetry.proto.trace.v1.Span.Event.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
- 11, // 12: opentelemetry.proto.trace.v1.Span.Link.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
+ 4, // 0: opentelemetry.proto.trace.v1.TracesData.resource_spans:type_name -> opentelemetry.proto.trace.v1.ResourceSpans
+ 10, // 1: opentelemetry.proto.trace.v1.ResourceSpans.resource:type_name -> opentelemetry.proto.resource.v1.Resource
+ 5, // 2: opentelemetry.proto.trace.v1.ResourceSpans.scope_spans:type_name -> opentelemetry.proto.trace.v1.ScopeSpans
+ 11, // 3: opentelemetry.proto.trace.v1.ScopeSpans.scope:type_name -> opentelemetry.proto.common.v1.InstrumentationScope
+ 6, // 4: opentelemetry.proto.trace.v1.ScopeSpans.spans:type_name -> opentelemetry.proto.trace.v1.Span
+ 1, // 5: opentelemetry.proto.trace.v1.Span.kind:type_name -> opentelemetry.proto.trace.v1.Span.SpanKind
+ 12, // 6: opentelemetry.proto.trace.v1.Span.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
+ 8, // 7: opentelemetry.proto.trace.v1.Span.events:type_name -> opentelemetry.proto.trace.v1.Span.Event
+ 9, // 8: opentelemetry.proto.trace.v1.Span.links:type_name -> opentelemetry.proto.trace.v1.Span.Link
+ 7, // 9: opentelemetry.proto.trace.v1.Span.status:type_name -> opentelemetry.proto.trace.v1.Status
+ 2, // 10: opentelemetry.proto.trace.v1.Status.code:type_name -> opentelemetry.proto.trace.v1.Status.StatusCode
+ 12, // 11: opentelemetry.proto.trace.v1.Span.Event.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
+ 12, // 12: opentelemetry.proto.trace.v1.Span.Link.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
13, // [13:13] is the sub-list for method output_type
13, // [13:13] is the sub-list for method input_type
13, // [13:13] is the sub-list for extension type_name
@@ -1119,7 +1234,7 @@ func file_opentelemetry_proto_trace_v1_trace_proto_init() {
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_opentelemetry_proto_trace_v1_trace_proto_rawDesc,
- NumEnums: 2,
+ NumEnums: 3,
NumMessages: 7,
NumExtensions: 0,
NumServices: 0,
diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_wrapper.go
index a4411c22b..b5e30cff0 100644
--- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
+++ b/vendor/google.golang.org/grpc/balancer_wrapper.go
@@ -32,21 +32,13 @@ import (
"google.golang.org/grpc/resolver"
)
-type ccbMode int
-
-const (
- ccbModeActive = iota
- ccbModeIdle
- ccbModeClosed
- ccbModeExitingIdle
-)
-
// ccBalancerWrapper sits between the ClientConn and the Balancer.
//
// ccBalancerWrapper implements methods corresponding to the ones on the
// balancer.Balancer interface. The ClientConn is free to call these methods
// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn
-// to the Balancer happen synchronously and in order.
+// to the Balancer happen in order by performing them in the serializer, without
+// any mutexes held.
//
// ccBalancerWrapper also implements the balancer.ClientConn interface and is
// passed to the Balancer implementations. It invokes unexported methods on the
@@ -57,87 +49,75 @@ const (
type ccBalancerWrapper struct {
// The following fields are initialized when the wrapper is created and are
// read-only afterwards, and therefore can be accessed without a mutex.
- cc *ClientConn
- opts balancer.BuildOptions
+ cc *ClientConn
+ opts balancer.BuildOptions
+ serializer *grpcsync.CallbackSerializer
+ serializerCancel context.CancelFunc
- // Outgoing (gRPC --> balancer) calls are guaranteed to execute in a
- // mutually exclusive manner as they are scheduled in the serializer. Fields
- // accessed *only* in these serializer callbacks, can therefore be accessed
- // without a mutex.
- balancer *gracefulswitch.Balancer
+ // The following fields are only accessed within the serializer or during
+ // initialization.
curBalancerName string
+ balancer *gracefulswitch.Balancer
- // mu guards access to the below fields. Access to the serializer and its
- // cancel function needs to be mutex protected because they are overwritten
- // when the wrapper exits idle mode.
- mu sync.Mutex
- serializer *grpcsync.CallbackSerializer // To serialize all outoing calls.
- serializerCancel context.CancelFunc // To close the seralizer at close/enterIdle time.
- mode ccbMode // Tracks the current mode of the wrapper.
+ // The following field is protected by mu. Caller must take cc.mu before
+ // taking mu.
+ mu sync.Mutex
+ closed bool
}
-// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer
-// is not created until the switchTo() method is invoked.
-func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper {
- ctx, cancel := context.WithCancel(context.Background())
+// newCCBalancerWrapper creates a new balancer wrapper in idle state. The
+// underlying balancer is not created until the switchTo() method is invoked.
+func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper {
+ ctx, cancel := context.WithCancel(cc.ctx)
ccb := &ccBalancerWrapper{
- cc: cc,
- opts: bopts,
+ cc: cc,
+ opts: balancer.BuildOptions{
+ DialCreds: cc.dopts.copts.TransportCredentials,
+ CredsBundle: cc.dopts.copts.CredsBundle,
+ Dialer: cc.dopts.copts.Dialer,
+ Authority: cc.authority,
+ CustomUserAgent: cc.dopts.copts.UserAgent,
+ ChannelzParentID: cc.channelzID,
+ Target: cc.parsedTarget,
+ },
serializer: grpcsync.NewCallbackSerializer(ctx),
serializerCancel: cancel,
}
- ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts)
+ ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts)
return ccb
}
// updateClientConnState is invoked by grpc to push a ClientConnState update to
-// the underlying balancer.
+// the underlying balancer. This is always executed from the serializer, so
+// it is safe to call into the balancer here.
func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
- ccb.mu.Lock()
- errCh := make(chan error, 1)
- // Here and everywhere else where Schedule() is called, it is done with the
- // lock held. But the lock guards only the scheduling part. The actual
- // callback is called asynchronously without the lock being held.
- ok := ccb.serializer.Schedule(func(_ context.Context) {
- errCh <- ccb.balancer.UpdateClientConnState(*ccs)
+ errCh := make(chan error)
+ ok := ccb.serializer.Schedule(func(ctx context.Context) {
+ defer close(errCh)
+ if ctx.Err() != nil || ccb.balancer == nil {
+ return
+ }
+ err := ccb.balancer.UpdateClientConnState(*ccs)
+ if logger.V(2) && err != nil {
+ logger.Infof("error from balancer.UpdateClientConnState: %v", err)
+ }
+ errCh <- err
})
if !ok {
- // If we are unable to schedule a function with the serializer, it
- // indicates that it has been closed. A serializer is only closed when
- // the wrapper is closed or is in idle.
- ccb.mu.Unlock()
- return fmt.Errorf("grpc: cannot send state update to a closed or idle balancer")
- }
- ccb.mu.Unlock()
-
- // We get here only if the above call to Schedule succeeds, in which case it
- // is guaranteed that the scheduled function will run. Therefore it is safe
- // to block on this channel.
- err := <-errCh
- if logger.V(2) && err != nil {
- logger.Infof("error from balancer.UpdateClientConnState: %v", err)
+ return nil
}
- return err
-}
-
-// updateSubConnState is invoked by grpc to push a subConn state update to the
-// underlying balancer.
-func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) {
- ccb.mu.Lock()
- ccb.serializer.Schedule(func(_ context.Context) {
- // Even though it is optional for balancers, gracefulswitch ensures
- // opts.StateListener is set, so this cannot ever be nil.
- sc.(*acBalancerWrapper).stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err})
- })
- ccb.mu.Unlock()
+ return <-errCh
}
+// resolverError is invoked by grpc to push a resolver error to the underlying
+// balancer. The call to the balancer is executed from the serializer.
func (ccb *ccBalancerWrapper) resolverError(err error) {
- ccb.mu.Lock()
- ccb.serializer.Schedule(func(_ context.Context) {
+ ccb.serializer.Schedule(func(ctx context.Context) {
+ if ctx.Err() != nil || ccb.balancer == nil {
+ return
+ }
ccb.balancer.ResolverError(err)
})
- ccb.mu.Unlock()
}
// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the
@@ -151,8 +131,10 @@ func (ccb *ccBalancerWrapper) resolverError(err error) {
// the ccBalancerWrapper keeps track of the current LB policy name, and skips
// the graceful balancer switching process if the name does not change.
func (ccb *ccBalancerWrapper) switchTo(name string) {
- ccb.mu.Lock()
- ccb.serializer.Schedule(func(_ context.Context) {
+ ccb.serializer.Schedule(func(ctx context.Context) {
+ if ctx.Err() != nil || ccb.balancer == nil {
+ return
+ }
// TODO: Other languages use case-sensitive balancer registries. We should
// switch as well. See: https://github.com/grpc/grpc-go/issues/5288.
if strings.EqualFold(ccb.curBalancerName, name) {
@@ -160,7 +142,6 @@ func (ccb *ccBalancerWrapper) switchTo(name string) {
}
ccb.buildLoadBalancingPolicy(name)
})
- ccb.mu.Unlock()
}
// buildLoadBalancingPolicy performs the following:
@@ -187,115 +168,49 @@ func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) {
ccb.curBalancerName = builder.Name()
}
+// close initiates async shutdown of the wrapper. cc.mu must be held when
+// calling this function. To determine the wrapper has finished shutting down,
+// the channel should block on ccb.serializer.Done() without cc.mu held.
func (ccb *ccBalancerWrapper) close() {
- channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing")
- ccb.closeBalancer(ccbModeClosed)
-}
-
-// enterIdleMode is invoked by grpc when the channel enters idle mode upon
-// expiry of idle_timeout. This call blocks until the balancer is closed.
-func (ccb *ccBalancerWrapper) enterIdleMode() {
- channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: entering idle mode")
- ccb.closeBalancer(ccbModeIdle)
-}
-
-// closeBalancer is invoked when the channel is being closed or when it enters
-// idle mode upon expiry of idle_timeout.
-func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) {
ccb.mu.Lock()
- if ccb.mode == ccbModeClosed || ccb.mode == ccbModeIdle {
- ccb.mu.Unlock()
- return
- }
-
- ccb.mode = m
- done := ccb.serializer.Done()
- b := ccb.balancer
- ok := ccb.serializer.Schedule(func(_ context.Context) {
- // Close the serializer to ensure that no more calls from gRPC are sent
- // to the balancer.
- ccb.serializerCancel()
- // Empty the current balancer name because we don't have a balancer
- // anymore and also so that we act on the next call to switchTo by
- // creating a new balancer specified by the new resolver.
- ccb.curBalancerName = ""
- })
- if !ok {
- ccb.mu.Unlock()
- return
- }
+ ccb.closed = true
ccb.mu.Unlock()
-
- // Give enqueued callbacks a chance to finish before closing the balancer.
- <-done
- b.Close()
-}
-
-// exitIdleMode is invoked by grpc when the channel exits idle mode either
-// because of an RPC or because of an invocation of the Connect() API. This
-// recreates the balancer that was closed previously when entering idle mode.
-//
-// If the channel is not in idle mode, we know for a fact that we are here as a
-// result of the user calling the Connect() method on the ClientConn. In this
-// case, we can simply forward the call to the underlying balancer, instructing
-// it to reconnect to the backends.
-func (ccb *ccBalancerWrapper) exitIdleMode() {
- ccb.mu.Lock()
- if ccb.mode == ccbModeClosed {
- // Request to exit idle is a no-op when wrapper is already closed.
- ccb.mu.Unlock()
- return
- }
-
- if ccb.mode == ccbModeIdle {
- // Recreate the serializer which was closed when we entered idle.
- ctx, cancel := context.WithCancel(context.Background())
- ccb.serializer = grpcsync.NewCallbackSerializer(ctx)
- ccb.serializerCancel = cancel
- }
-
- // The ClientConn guarantees that mutual exclusion between close() and
- // exitIdleMode(), and since we just created a new serializer, we can be
- // sure that the below function will be scheduled.
- done := make(chan struct{})
- ccb.serializer.Schedule(func(_ context.Context) {
- defer close(done)
-
- ccb.mu.Lock()
- defer ccb.mu.Unlock()
-
- if ccb.mode != ccbModeIdle {
- ccb.balancer.ExitIdle()
+ channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing")
+ ccb.serializer.Schedule(func(context.Context) {
+ if ccb.balancer == nil {
return
}
-
- // Gracefulswitch balancer does not support a switchTo operation after
- // being closed. Hence we need to create a new one here.
- ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts)
- ccb.mode = ccbModeActive
- channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: exiting idle mode")
-
+ ccb.balancer.Close()
+ ccb.balancer = nil
})
- ccb.mu.Unlock()
-
- <-done
+ ccb.serializerCancel()
}
-func (ccb *ccBalancerWrapper) isIdleOrClosed() bool {
- ccb.mu.Lock()
- defer ccb.mu.Unlock()
- return ccb.mode == ccbModeIdle || ccb.mode == ccbModeClosed
+// exitIdle invokes the balancer's exitIdle method in the serializer.
+func (ccb *ccBalancerWrapper) exitIdle() {
+ ccb.serializer.Schedule(func(ctx context.Context) {
+ if ctx.Err() != nil || ccb.balancer == nil {
+ return
+ }
+ ccb.balancer.ExitIdle()
+ })
}
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
- if ccb.isIdleOrClosed() {
- return nil, fmt.Errorf("grpc: cannot create SubConn when balancer is closed or idle")
+ ccb.cc.mu.Lock()
+ defer ccb.cc.mu.Unlock()
+
+ ccb.mu.Lock()
+ if ccb.closed {
+ ccb.mu.Unlock()
+ return nil, fmt.Errorf("balancer is being closed; no new SubConns allowed")
}
+ ccb.mu.Unlock()
if len(addrs) == 0 {
return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list")
}
- ac, err := ccb.cc.newAddrConn(addrs, opts)
+ ac, err := ccb.cc.newAddrConnLocked(addrs, opts)
if err != nil {
channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err)
return nil, err
@@ -316,10 +231,6 @@ func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
}
func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
- if ccb.isIdleOrClosed() {
- return
- }
-
acbw, ok := sc.(*acBalancerWrapper)
if !ok {
return
@@ -328,25 +239,39 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol
}
func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
- if ccb.isIdleOrClosed() {
+ ccb.cc.mu.Lock()
+ defer ccb.cc.mu.Unlock()
+
+ ccb.mu.Lock()
+ if ccb.closed {
+ ccb.mu.Unlock()
return
}
-
+ ccb.mu.Unlock()
// Update picker before updating state. Even though the ordering here does
// not matter, it can lead to multiple calls of Pick in the common start-up
// case where we wait for ready and then perform an RPC. If the picker is
// updated later, we could call the "connecting" picker when the state is
// updated, and then call the "ready" picker after the picker gets updated.
- ccb.cc.blockingpicker.updatePicker(s.Picker)
+
+ // Note that there is no need to check if the balancer wrapper was closed,
+ // as we know the graceful switch LB policy will not call cc if it has been
+ // closed.
+ ccb.cc.pickerWrapper.updatePicker(s.Picker)
ccb.cc.csMgr.updateState(s.ConnectivityState)
}
func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) {
- if ccb.isIdleOrClosed() {
+ ccb.cc.mu.RLock()
+ defer ccb.cc.mu.RUnlock()
+
+ ccb.mu.Lock()
+ if ccb.closed {
+ ccb.mu.Unlock()
return
}
-
- ccb.cc.resolveNow(o)
+ ccb.mu.Unlock()
+ ccb.cc.resolveNowLocked(o)
}
func (ccb *ccBalancerWrapper) Target() string {
@@ -364,6 +289,20 @@ type acBalancerWrapper struct {
producers map[balancer.ProducerBuilder]*refCountedProducer
}
+// updateState is invoked by grpc to push a subConn state update to the
+// underlying balancer.
+func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) {
+ acbw.ccb.serializer.Schedule(func(ctx context.Context) {
+ if ctx.Err() != nil || acbw.ccb.balancer == nil {
+ return
+ }
+ // Even though it is optional for balancers, gracefulswitch ensures
+ // opts.StateListener is set, so this cannot ever be nil.
+ // TODO: delete this comment when UpdateSubConnState is removed.
+ acbw.stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err})
+ })
+}
+
func (acbw *acBalancerWrapper) String() string {
return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int())
}
@@ -377,20 +316,7 @@ func (acbw *acBalancerWrapper) Connect() {
}
func (acbw *acBalancerWrapper) Shutdown() {
- ccb := acbw.ccb
- if ccb.isIdleOrClosed() {
- // It it safe to ignore this call when the balancer is closed or in idle
- // because the ClientConn takes care of closing the connections.
- //
- // Not returning early from here when the balancer is closed or in idle
- // leads to a deadlock though, because of the following sequence of
- // calls when holding cc.mu:
- // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close -->
- // ccb.RemoveAddrConn --> cc.removeAddrConn
- return
- }
-
- ccb.cc.removeAddrConn(acbw.ac, errConnDrain)
+ acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain)
}
// NewStream begins a streaming RPC on the addrConn. If the addrConn is not
diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
index 595480112..e9e97d451 100644
--- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
+++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
@@ -430,7 +430,7 @@ type ClientHeader struct {
MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"`
// A single process may be used to run multiple virtual
// servers with different identities.
- // The authority is the name of such a server identitiy.
+ // The authority is the name of such a server identity.
// It is typically a portion of the URI in the form of
// <host> or <host>:<port> .
Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"`
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
index 429c389e4..f6e815e6b 100644
--- a/vendor/google.golang.org/grpc/clientconn.go
+++ b/vendor/google.golang.org/grpc/clientconn.go
@@ -33,9 +33,7 @@ import (
"google.golang.org/grpc/balancer/base"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/connectivity"
- "google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal"
- "google.golang.org/grpc/internal/backoff"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/internal/idle"
@@ -48,9 +46,9 @@ import (
"google.golang.org/grpc/status"
_ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin.
- _ "google.golang.org/grpc/internal/resolver/dns" // To register dns resolver.
_ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver.
_ "google.golang.org/grpc/internal/resolver/unix" // To register unix resolver.
+ _ "google.golang.org/grpc/resolver/dns" // To register dns resolver.
)
const (
@@ -119,23 +117,8 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires
}, nil
}
-// DialContext creates a client connection to the given target. By default, it's
-// a non-blocking dial (the function won't wait for connections to be
-// established, and connecting happens in the background). To make it a blocking
-// dial, use WithBlock() dial option.
-//
-// In the non-blocking case, the ctx does not act against the connection. It
-// only controls the setup steps.
-//
-// In the blocking case, ctx can be used to cancel or expire the pending
-// connection. Once this function returns, the cancellation and expiration of
-// ctx will be noop. Users should call ClientConn.Close to terminate all the
-// pending operations after this function returns.
-//
-// The target name syntax is defined in
-// https://github.com/grpc/grpc/blob/master/doc/naming.md.
-// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target.
-func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) {
+// newClient returns a new client in idle mode.
+func newClient(target string, opts ...DialOption) (conn *ClientConn, err error) {
cc := &ClientConn{
target: target,
conns: make(map[*addrConn]struct{}),
@@ -143,23 +126,11 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
czData: new(channelzData),
}
- // We start the channel off in idle mode, but kick it out of idle at the end
- // of this method, instead of waiting for the first RPC. Other gRPC
- // implementations do wait for the first RPC to kick the channel out of
- // idle. But doing so would be a major behavior change for our users who are
- // used to seeing the channel active after Dial.
- //
- // Taking this approach of kicking it out of idle at the end of this method
- // allows us to share the code between channel creation and exiting idle
- // mode. This will also make it easy for us to switch to starting the
- // channel off in idle, if at all we ever get to do that.
- cc.idlenessState = ccIdlenessStateIdle
-
cc.retryThrottler.Store((*retryThrottler)(nil))
cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil})
cc.ctx, cc.cancel = context.WithCancel(context.Background())
- cc.exitIdleCond = sync.NewCond(&cc.mu)
+ // Apply dial options.
disableGlobalOpts := false
for _, opt := range opts {
if _, ok := opt.(*disableGlobalDialOptions); ok {
@@ -177,21 +148,9 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
for _, opt := range opts {
opt.apply(&cc.dopts)
}
-
chainUnaryClientInterceptors(cc)
chainStreamClientInterceptors(cc)
- defer func() {
- if err != nil {
- cc.Close()
- }
- }()
-
- // Register ClientConn with channelz.
- cc.channelzRegistration(target)
-
- cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID)
-
if err := cc.validateTransportCredentials(); err != nil {
return nil, err
}
@@ -205,10 +164,80 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
}
cc.mkp = cc.dopts.copts.KeepaliveParams
- if cc.dopts.copts.UserAgent != "" {
- cc.dopts.copts.UserAgent += " " + grpcUA
- } else {
- cc.dopts.copts.UserAgent = grpcUA
+ // Register ClientConn with channelz.
+ cc.channelzRegistration(target)
+
+ // TODO: Ideally it should be impossible to error from this function after
+ // channelz registration. This will require removing some channelz logs
+ // from the following functions that can error. Errors can be returned to
+ // the user, and successful logs can be emitted here, after the checks have
+ // passed and channelz is subsequently registered.
+
+ // Determine the resolver to use.
+ if err := cc.parseTargetAndFindResolver(); err != nil {
+ channelz.RemoveEntry(cc.channelzID)
+ return nil, err
+ }
+ if err = cc.determineAuthority(); err != nil {
+ channelz.RemoveEntry(cc.channelzID)
+ return nil, err
+ }
+
+ cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID)
+ cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers)
+
+ cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc.
+ cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout)
+ return cc, nil
+}
+
+// DialContext creates a client connection to the given target. By default, it's
+// a non-blocking dial (the function won't wait for connections to be
+// established, and connecting happens in the background). To make it a blocking
+// dial, use WithBlock() dial option.
+//
+// In the non-blocking case, the ctx does not act against the connection. It
+// only controls the setup steps.
+//
+// In the blocking case, ctx can be used to cancel or expire the pending
+// connection. Once this function returns, the cancellation and expiration of
+// ctx will be noop. Users should call ClientConn.Close to terminate all the
+// pending operations after this function returns.
+//
+// The target name syntax is defined in
+// https://github.com/grpc/grpc/blob/master/doc/naming.md.
+// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target.
+func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) {
+ cc, err := newClient(target, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ // We start the channel off in idle mode, but kick it out of idle now,
+ // instead of waiting for the first RPC. Other gRPC implementations do wait
+ // for the first RPC to kick the channel out of idle. But doing so would be
+ // a major behavior change for our users who are used to seeing the channel
+ // active after Dial.
+ //
+ // Taking this approach of kicking it out of idle at the end of this method
+ // allows us to share the code between channel creation and exiting idle
+ // mode. This will also make it easy for us to switch to starting the
+ // channel off in idle, i.e. by making newClient exported.
+
+ defer func() {
+ if err != nil {
+ cc.Close()
+ }
+ }()
+
+ // This creates the name resolver, load balancer, etc.
+ if err := cc.idlenessMgr.ExitIdleMode(); err != nil {
+ return nil, err
+ }
+
+ // Return now for non-blocking dials.
+ if !cc.dopts.block {
+ return cc, nil
}
if cc.dopts.timeout > 0 {
@@ -231,49 +260,6 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
}
}()
- if cc.dopts.bs == nil {
- cc.dopts.bs = backoff.DefaultExponential
- }
-
- // Determine the resolver to use.
- if err := cc.parseTargetAndFindResolver(); err != nil {
- return nil, err
- }
- if err = cc.determineAuthority(); err != nil {
- return nil, err
- }
-
- if cc.dopts.scChan != nil {
- // Blocking wait for the initial service config.
- select {
- case sc, ok := <-cc.dopts.scChan:
- if ok {
- cc.sc = &sc
- cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc})
- }
- case <-ctx.Done():
- return nil, ctx.Err()
- }
- }
- if cc.dopts.scChan != nil {
- go cc.scWatcher()
- }
-
- // This creates the name resolver, load balancer, blocking picker etc.
- if err := cc.exitIdleMode(); err != nil {
- return nil, err
- }
-
- // Configure idleness support with configured idle timeout or default idle
- // timeout duration. Idleness can be explicitly disabled by the user, by
- // setting the dial option to 0.
- cc.idlenessMgr = idle.NewManager(idle.ManagerOptions{Enforcer: (*idler)(cc), Timeout: cc.dopts.idleTimeout, Logger: logger})
-
- // Return early for non-blocking dials.
- if !cc.dopts.block {
- return cc, nil
- }
-
// A blocking dial blocks until the clientConn is ready.
for {
s := cc.GetState()
@@ -320,8 +306,8 @@ func (cc *ClientConn) addTraceEvent(msg string) {
type idler ClientConn
-func (i *idler) EnterIdleMode() error {
- return (*ClientConn)(i).enterIdleMode()
+func (i *idler) EnterIdleMode() {
+ (*ClientConn)(i).enterIdleMode()
}
func (i *idler) ExitIdleMode() error {
@@ -329,117 +315,71 @@ func (i *idler) ExitIdleMode() error {
}
// exitIdleMode moves the channel out of idle mode by recreating the name
-// resolver and load balancer.
-func (cc *ClientConn) exitIdleMode() error {
+// resolver and load balancer. This should never be called directly; use
+// cc.idlenessMgr.ExitIdleMode instead.
+func (cc *ClientConn) exitIdleMode() (err error) {
cc.mu.Lock()
if cc.conns == nil {
cc.mu.Unlock()
return errConnClosing
}
- if cc.idlenessState != ccIdlenessStateIdle {
- channelz.Infof(logger, cc.channelzID, "ClientConn asked to exit idle mode, current mode is %v", cc.idlenessState)
- cc.mu.Unlock()
- return nil
- }
-
- defer func() {
- // When Close() and exitIdleMode() race against each other, one of the
- // following two can happen:
- // - Close() wins the race and runs first. exitIdleMode() runs after, and
- // sees that the ClientConn is already closed and hence returns early.
- // - exitIdleMode() wins the race and runs first and recreates the balancer
- // and releases the lock before recreating the resolver. If Close() runs
- // in this window, it will wait for exitIdleMode to complete.
- //
- // We achieve this synchronization using the below condition variable.
- cc.mu.Lock()
- cc.idlenessState = ccIdlenessStateActive
- cc.exitIdleCond.Signal()
- cc.mu.Unlock()
- }()
-
- cc.idlenessState = ccIdlenessStateExitingIdle
- exitedIdle := false
- if cc.blockingpicker == nil {
- cc.blockingpicker = newPickerWrapper(cc.dopts.copts.StatsHandlers)
- } else {
- cc.blockingpicker.exitIdleMode()
- exitedIdle = true
- }
-
- var credsClone credentials.TransportCredentials
- if creds := cc.dopts.copts.TransportCredentials; creds != nil {
- credsClone = creds.Clone()
- }
- if cc.balancerWrapper == nil {
- cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{
- DialCreds: credsClone,
- CredsBundle: cc.dopts.copts.CredsBundle,
- Dialer: cc.dopts.copts.Dialer,
- Authority: cc.authority,
- CustomUserAgent: cc.dopts.copts.UserAgent,
- ChannelzParentID: cc.channelzID,
- Target: cc.parsedTarget,
- })
- } else {
- cc.balancerWrapper.exitIdleMode()
- }
- cc.firstResolveEvent = grpcsync.NewEvent()
cc.mu.Unlock()
// This needs to be called without cc.mu because this builds a new resolver
- // which might update state or report error inline which needs to be handled
- // by cc.updateResolverState() which also grabs cc.mu.
- if err := cc.initResolverWrapper(credsClone); err != nil {
+ // which might update state or report error inline, which would then need to
+ // acquire cc.mu.
+ if err := cc.resolverWrapper.start(); err != nil {
return err
}
- if exitedIdle {
- cc.addTraceEvent("exiting idle mode")
- }
+ cc.addTraceEvent("exiting idle mode")
return nil
}
+// initIdleStateLocked initializes common state to how it should be while idle.
+func (cc *ClientConn) initIdleStateLocked() {
+ cc.resolverWrapper = newCCResolverWrapper(cc)
+ cc.balancerWrapper = newCCBalancerWrapper(cc)
+ cc.firstResolveEvent = grpcsync.NewEvent()
+ // cc.conns == nil is a proxy for the ClientConn being closed. So, instead
+ // of setting it to nil here, we recreate the map. This also means that we
+ // don't have to do this when exiting idle mode.
+ cc.conns = make(map[*addrConn]struct{})
+}
+
// enterIdleMode puts the channel in idle mode, and as part of it shuts down the
-// name resolver, load balancer and any subchannels.
-func (cc *ClientConn) enterIdleMode() error {
+// name resolver, load balancer, and any subchannels. This should never be
+// called directly; use cc.idlenessMgr.EnterIdleMode instead.
+func (cc *ClientConn) enterIdleMode() {
cc.mu.Lock()
- defer cc.mu.Unlock()
if cc.conns == nil {
- return ErrClientConnClosing
- }
- if cc.idlenessState != ccIdlenessStateActive {
- channelz.Warningf(logger, cc.channelzID, "ClientConn asked to enter idle mode, current mode is %v", cc.idlenessState)
- return nil
+ cc.mu.Unlock()
+ return
}
- // cc.conns == nil is a proxy for the ClientConn being closed. So, instead
- // of setting it to nil here, we recreate the map. This also means that we
- // don't have to do this when exiting idle mode.
conns := cc.conns
- cc.conns = make(map[*addrConn]struct{})
- // TODO: Currently, we close the resolver wrapper upon entering idle mode
- // and create a new one upon exiting idle mode. This means that the
- // `cc.resolverWrapper` field would be overwritten everytime we exit idle
- // mode. While this means that we need to hold `cc.mu` when accessing
- // `cc.resolverWrapper`, it makes the code simpler in the wrapper. We should
- // try to do the same for the balancer and picker wrappers too.
- cc.resolverWrapper.close()
- cc.blockingpicker.enterIdleMode()
- cc.balancerWrapper.enterIdleMode()
+ rWrapper := cc.resolverWrapper
+ rWrapper.close()
+ cc.pickerWrapper.reset()
+ bWrapper := cc.balancerWrapper
+ bWrapper.close()
cc.csMgr.updateState(connectivity.Idle)
- cc.idlenessState = ccIdlenessStateIdle
cc.addTraceEvent("entering idle mode")
- go func() {
- for ac := range conns {
- ac.tearDown(errConnIdling)
- }
- }()
+ cc.initIdleStateLocked()
- return nil
+ cc.mu.Unlock()
+
+ // Block until the name resolver and LB policy are closed.
+ <-rWrapper.serializer.Done()
+ <-bWrapper.serializer.Done()
+
+ // Close all subchannels after the LB policy is closed.
+ for ac := range conns {
+ ac.tearDown(errConnIdling)
+ }
}
// validateTransportCredentials performs a series of checks on the configured
@@ -649,66 +589,35 @@ type ClientConn struct {
dopts dialOptions // Default and user specified dial options.
channelzID *channelz.Identifier // Channelz identifier for the channel.
resolverBuilder resolver.Builder // See parseTargetAndFindResolver().
- balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath.
- idlenessMgr idle.Manager
+ idlenessMgr *idle.Manager
// The following provide their own synchronization, and therefore don't
// require cc.mu to be held to access them.
csMgr *connectivityStateManager
- blockingpicker *pickerWrapper
+ pickerWrapper *pickerWrapper
safeConfigSelector iresolver.SafeConfigSelector
czData *channelzData
retryThrottler atomic.Value // Updated from service config.
- // firstResolveEvent is used to track whether the name resolver sent us at
- // least one update. RPCs block on this event.
- firstResolveEvent *grpcsync.Event
-
// mu protects the following fields.
// TODO: split mu so the same mutex isn't used for everything.
mu sync.RWMutex
- resolverWrapper *ccResolverWrapper // Initialized in Dial; cleared in Close.
+ resolverWrapper *ccResolverWrapper // Always recreated whenever entering idle to simplify Close.
+ balancerWrapper *ccBalancerWrapper // Always recreated whenever entering idle to simplify Close.
sc *ServiceConfig // Latest service config received from the resolver.
conns map[*addrConn]struct{} // Set to nil on close.
mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway.
- idlenessState ccIdlenessState // Tracks idleness state of the channel.
- exitIdleCond *sync.Cond // Signalled when channel exits idle.
+ // firstResolveEvent is used to track whether the name resolver sent us at
+ // least one update. RPCs block on this event. May be accessed without mu
+ // if we know we cannot be asked to enter idle mode while accessing it (e.g.
+ // when the idle manager has already been closed, or if we are already
+ // entering idle mode).
+ firstResolveEvent *grpcsync.Event
lceMu sync.Mutex // protects lastConnectionError
lastConnectionError error
}
-// ccIdlenessState tracks the idleness state of the channel.
-//
-// Channels start off in `active` and move to `idle` after a period of
-// inactivity. When moving back to `active` upon an incoming RPC, they
-// transition through `exiting_idle`. This state is useful for synchronization
-// with Close().
-//
-// This state tracking is mostly for self-protection. The idlenessManager is
-// expected to keep track of the state as well, and is expected not to call into
-// the ClientConn unnecessarily.
-type ccIdlenessState int8
-
-const (
- ccIdlenessStateActive ccIdlenessState = iota
- ccIdlenessStateIdle
- ccIdlenessStateExitingIdle
-)
-
-func (s ccIdlenessState) String() string {
- switch s {
- case ccIdlenessStateActive:
- return "active"
- case ccIdlenessStateIdle:
- return "idle"
- case ccIdlenessStateExitingIdle:
- return "exitingIdle"
- default:
- return "unknown"
- }
-}
-
// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
// ctx expires. A true value is returned in former case and false in latter.
//
@@ -748,29 +657,15 @@ func (cc *ClientConn) GetState() connectivity.State {
// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
// release.
func (cc *ClientConn) Connect() {
- cc.exitIdleMode()
+ if err := cc.idlenessMgr.ExitIdleMode(); err != nil {
+ cc.addTraceEvent(err.Error())
+ return
+ }
// If the ClientConn was not in idle mode, we need to call ExitIdle on the
// LB policy so that connections can be created.
- cc.balancerWrapper.exitIdleMode()
-}
-
-func (cc *ClientConn) scWatcher() {
- for {
- select {
- case sc, ok := <-cc.dopts.scChan:
- if !ok {
- return
- }
- cc.mu.Lock()
- // TODO: load balance policy runtime change is ignored.
- // We may revisit this decision in the future.
- cc.sc = &sc
- cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc})
- cc.mu.Unlock()
- case <-cc.ctx.Done():
- return
- }
- }
+ cc.mu.Lock()
+ cc.balancerWrapper.exitIdle()
+ cc.mu.Unlock()
}
// waitForResolvedAddrs blocks until the resolver has provided addresses or the
@@ -804,11 +699,11 @@ func init() {
internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() {
return cc.csMgr.pubSub.Subscribe(s)
}
- internal.EnterIdleModeForTesting = func(cc *ClientConn) error {
- return cc.enterIdleMode()
+ internal.EnterIdleModeForTesting = func(cc *ClientConn) {
+ cc.idlenessMgr.EnterIdleModeForTesting()
}
internal.ExitIdleModeForTesting = func(cc *ClientConn) error {
- return cc.exitIdleMode()
+ return cc.idlenessMgr.ExitIdleMode()
}
}
@@ -824,9 +719,8 @@ func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) {
}
}
-func (cc *ClientConn) updateResolverState(s resolver.State, err error) error {
+func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) error {
defer cc.firstResolveEvent.Fire()
- cc.mu.Lock()
// Check if the ClientConn is already closed. Some fields (e.g.
// balancerWrapper) are set to nil when closing the ClientConn, and could
// cause nil pointer panic if we don't have this check.
@@ -872,7 +766,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error {
if cc.sc == nil {
// Apply the failing LB only if we haven't received valid service config
// from the name resolver in the past.
- cc.applyFailingLB(s.ServiceConfig)
+ cc.applyFailingLBLocked(s.ServiceConfig)
cc.mu.Unlock()
return ret
}
@@ -894,15 +788,13 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error {
return ret
}
-// applyFailingLB is akin to configuring an LB policy on the channel which
+// applyFailingLBLocked is akin to configuring an LB policy on the channel which
// always fails RPCs. Here, an actual LB policy is not configured, but an always
// erroring picker is configured, which returns errors with information about
// what was invalid in the received service config. A config selector with no
// service config is configured, and the connectivity state of the channel is
// set to TransientFailure.
-//
-// Caller must hold cc.mu.
-func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) {
+func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) {
var err error
if sc.Err != nil {
err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err)
@@ -910,14 +802,10 @@ func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) {
err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config)
}
cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil})
- cc.blockingpicker.updatePicker(base.NewErrPicker(err))
+ cc.pickerWrapper.updatePicker(base.NewErrPicker(err))
cc.csMgr.updateState(connectivity.TransientFailure)
}
-func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) {
- cc.balancerWrapper.updateSubConnState(sc, s, err)
-}
-
// Makes a copy of the input addresses slice and clears out the balancer
// attributes field. Addresses are passed during subconn creation and address
// update operations. In both cases, we will clear the balancer attributes by
@@ -932,10 +820,14 @@ func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Ad
return out
}
-// newAddrConn creates an addrConn for addrs and adds it to cc.conns.
+// newAddrConnLocked creates an addrConn for addrs and adds it to cc.conns.
//
// Caller needs to make sure len(addrs) > 0.
-func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) {
+func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) {
+ if cc.conns == nil {
+ return nil, ErrClientConnClosing
+ }
+
ac := &addrConn{
state: connectivity.Idle,
cc: cc,
@@ -947,12 +839,6 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub
stateChan: make(chan struct{}),
}
ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
- // Track ac in cc. This needs to be done before any getTransport(...) is called.
- cc.mu.Lock()
- defer cc.mu.Unlock()
- if cc.conns == nil {
- return nil, ErrClientConnClosing
- }
var err error
ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "")
@@ -968,6 +854,7 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub
},
})
+ // Track ac in cc. This needs to be done before any getTransport(...) is called.
cc.conns[ac] = struct{}{}
return ac, nil
}
@@ -1174,7 +1061,7 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig {
}
func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) {
- return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{
+ return cc.pickerWrapper.pick(ctx, failfast, balancer.PickInfo{
Ctx: ctx,
FullMethodName: method,
})
@@ -1216,12 +1103,12 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel
func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) {
cc.mu.RLock()
- r := cc.resolverWrapper
+ cc.resolverWrapper.resolveNow(o)
cc.mu.RUnlock()
- if r == nil {
- return
- }
- go r.resolveNow(o)
+}
+
+func (cc *ClientConn) resolveNowLocked(o resolver.ResolveNowOptions) {
+ cc.resolverWrapper.resolveNow(o)
}
// ResetConnectBackoff wakes up all subchannels in transient failure and causes
@@ -1253,40 +1140,32 @@ func (cc *ClientConn) Close() error {
<-cc.csMgr.pubSub.Done()
}()
+ // Prevent calls to enter/exit idle immediately, and ensure we are not
+ // currently entering/exiting idle mode.
+ cc.idlenessMgr.Close()
+
cc.mu.Lock()
if cc.conns == nil {
cc.mu.Unlock()
return ErrClientConnClosing
}
- for cc.idlenessState == ccIdlenessStateExitingIdle {
- cc.exitIdleCond.Wait()
- }
-
conns := cc.conns
cc.conns = nil
cc.csMgr.updateState(connectivity.Shutdown)
- pWrapper := cc.blockingpicker
- rWrapper := cc.resolverWrapper
- bWrapper := cc.balancerWrapper
- idlenessMgr := cc.idlenessMgr
+ // We can safely unlock and continue to access all fields now as
+ // cc.conns==nil, preventing any further operations on cc.
cc.mu.Unlock()
+ cc.resolverWrapper.close()
// The order of closing matters here since the balancer wrapper assumes the
// picker is closed before it is closed.
- if pWrapper != nil {
- pWrapper.close()
- }
- if bWrapper != nil {
- bWrapper.close()
- }
- if rWrapper != nil {
- rWrapper.close()
- }
- if idlenessMgr != nil {
- idlenessMgr.Close()
- }
+ cc.pickerWrapper.close()
+ cc.balancerWrapper.close()
+
+ <-cc.resolverWrapper.serializer.Done()
+ <-cc.balancerWrapper.serializer.Done()
for ac := range conns {
ac.tearDown(ErrClientConnClosing)
@@ -1307,7 +1186,7 @@ type addrConn struct {
cc *ClientConn
dopts dialOptions
- acbw balancer.SubConn
+ acbw *acBalancerWrapper
scopts balancer.NewSubConnOptions
// transport is set when there's a viable transport (note: ac state may not be READY as LB channel
@@ -1345,7 +1224,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error)
} else {
channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v, last error: %s", s, lastErr)
}
- ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr)
+ ac.acbw.updateState(s, lastErr)
}
// adjustParams updates parameters used to create transports upon
@@ -1849,7 +1728,7 @@ func (cc *ClientConn) parseTargetAndFindResolver() error {
if err != nil {
channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err)
} else {
- channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget)
+ channelz.Infof(logger, cc.channelzID, "parsed dial target is: %#v", parsedTarget)
rb = cc.getResolver(parsedTarget.URL.Scheme)
if rb != nil {
cc.parsedTarget = parsedTarget
@@ -1981,58 +1860,17 @@ func (cc *ClientConn) determineAuthority() error {
}
endpoint := cc.parsedTarget.Endpoint()
- target := cc.target
- switch {
- case authorityFromDialOption != "":
+ if authorityFromDialOption != "" {
cc.authority = authorityFromDialOption
- case authorityFromCreds != "":
+ } else if authorityFromCreds != "" {
cc.authority = authorityFromCreds
- case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"):
- // TODO: remove when the unix resolver implements optional interface to
- // return channel authority.
- cc.authority = "localhost"
- case strings.HasPrefix(endpoint, ":"):
+ } else if auth, ok := cc.resolverBuilder.(resolver.AuthorityOverrider); ok {
+ cc.authority = auth.OverrideAuthority(cc.parsedTarget)
+ } else if strings.HasPrefix(endpoint, ":") {
cc.authority = "localhost" + endpoint
- default:
- // TODO: Define an optional interface on the resolver builder to return
- // the channel authority given the user's dial target. For resolvers
- // which don't implement this interface, we will use the endpoint from
- // "scheme://authority/endpoint" as the default authority.
- // Escape the endpoint to handle use cases where the endpoint
- // might not be a valid authority by default.
- // For example an endpoint which has multiple paths like
- // 'a/b/c', which is not a valid authority by default.
+ } else {
cc.authority = encodeAuthority(endpoint)
}
channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority)
return nil
}
-
-// initResolverWrapper creates a ccResolverWrapper, which builds the name
-// resolver. This method grabs the lock to assign the newly built resolver
-// wrapper to the cc.resolverWrapper field.
-func (cc *ClientConn) initResolverWrapper(creds credentials.TransportCredentials) error {
- rw, err := newCCResolverWrapper(cc, ccResolverWrapperOpts{
- target: cc.parsedTarget,
- builder: cc.resolverBuilder,
- bOpts: resolver.BuildOptions{
- DisableServiceConfig: cc.dopts.disableServiceConfig,
- DialCreds: creds,
- CredsBundle: cc.dopts.copts.CredsBundle,
- Dialer: cc.dopts.copts.Dialer,
- },
- channelzID: cc.channelzID,
- })
- if err != nil {
- return fmt.Errorf("failed to build resolver: %v", err)
- }
- // Resolver implementations may report state update or error inline when
- // built (or right after), and this is handled in cc.updateResolverState.
- // Also, an error from the resolver might lead to a re-resolution request
- // from the balancer, which is handled in resolveNow() where
- // `cc.resolverWrapper` is accessed. Hence, we need to hold the lock here.
- cc.mu.Lock()
- cc.resolverWrapper = rw
- cc.mu.Unlock()
- return nil
-}
diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go
index 11b106182..08476ad1f 100644
--- a/vendor/google.golang.org/grpc/codes/codes.go
+++ b/vendor/google.golang.org/grpc/codes/codes.go
@@ -25,7 +25,13 @@ import (
"strconv"
)
-// A Code is an unsigned 32-bit error code as defined in the gRPC spec.
+// A Code is a status code defined according to the [gRPC documentation].
+//
+// Only the codes defined as consts in this package are valid codes. Do not use
+// other code values. Behavior of other codes is implementation-specific and
+// interoperability between implementations is not guaranteed.
+//
+// [gRPC documentation]: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md
type Code uint32
const (
diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go
index 877b7cd21..5dafd34ed 100644
--- a/vendor/google.golang.org/grpc/credentials/tls.go
+++ b/vendor/google.golang.org/grpc/credentials/tls.go
@@ -44,10 +44,25 @@ func (t TLSInfo) AuthType() string {
return "tls"
}
+// cipherSuiteLookup returns the string version of a TLS cipher suite ID.
+func cipherSuiteLookup(cipherSuiteID uint16) string {
+ for _, s := range tls.CipherSuites() {
+ if s.ID == cipherSuiteID {
+ return s.Name
+ }
+ }
+ for _, s := range tls.InsecureCipherSuites() {
+ if s.ID == cipherSuiteID {
+ return s.Name
+ }
+ }
+ return fmt.Sprintf("unknown ID: %v", cipherSuiteID)
+}
+
// GetSecurityValue returns security info requested by channelz.
func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue {
v := &TLSChannelzSecurityValue{
- StandardName: cipherSuiteLookup[t.State.CipherSuite],
+ StandardName: cipherSuiteLookup(t.State.CipherSuite),
}
// Currently there's no way to get LocalCertificate info from tls package.
if len(t.State.PeerCertificates) > 0 {
@@ -138,10 +153,39 @@ func (c *tlsCreds) OverrideServerName(serverNameOverride string) error {
return nil
}
+// The following cipher suites are forbidden for use with HTTP/2 by
+// https://datatracker.ietf.org/doc/html/rfc7540#appendix-A
+var tls12ForbiddenCipherSuites = map[uint16]struct{}{
+ tls.TLS_RSA_WITH_AES_128_CBC_SHA: {},
+ tls.TLS_RSA_WITH_AES_256_CBC_SHA: {},
+ tls.TLS_RSA_WITH_AES_128_GCM_SHA256: {},
+ tls.TLS_RSA_WITH_AES_256_GCM_SHA384: {},
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: {},
+ tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: {},
+ tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: {},
+ tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: {},
+}
+
// NewTLS uses c to construct a TransportCredentials based on TLS.
func NewTLS(c *tls.Config) TransportCredentials {
tc := &tlsCreds{credinternal.CloneTLSConfig(c)}
tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos)
+ // If the user did not configure a MinVersion and did not configure a
+ // MaxVersion < 1.2, use MinVersion=1.2, which is required by
+ // https://datatracker.ietf.org/doc/html/rfc7540#section-9.2
+ if tc.config.MinVersion == 0 && (tc.config.MaxVersion == 0 || tc.config.MaxVersion >= tls.VersionTLS12) {
+ tc.config.MinVersion = tls.VersionTLS12
+ }
+ // If the user did not configure CipherSuites, use all "secure" cipher
+ // suites reported by the TLS package, but remove some explicitly forbidden
+ // by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A
+ if tc.config.CipherSuites == nil {
+ for _, cs := range tls.CipherSuites() {
+ if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok {
+ tc.config.CipherSuites = append(tc.config.CipherSuites, cs.ID)
+ }
+ }
+ }
return tc
}
@@ -205,32 +249,3 @@ type TLSChannelzSecurityValue struct {
LocalCertificate []byte
RemoteCertificate []byte
}
-
-var cipherSuiteLookup = map[uint16]string{
- tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA",
- tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
- tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA",
- tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA",
- tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256",
- tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384",
- tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA",
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
- tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA",
- tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA",
- tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
- tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
- tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
- tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
- tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV",
- tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256",
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
- tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
- tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
- tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
- tls.TLS_AES_128_GCM_SHA256: "TLS_AES_128_GCM_SHA256",
- tls.TLS_AES_256_GCM_SHA384: "TLS_AES_256_GCM_SHA384",
- tls.TLS_CHACHA20_POLY1305_SHA256: "TLS_CHACHA20_POLY1305_SHA256",
-}
diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go
index cfc9fd85e..ba2426180 100644
--- a/vendor/google.golang.org/grpc/dialoptions.go
+++ b/vendor/google.golang.org/grpc/dialoptions.go
@@ -46,6 +46,7 @@ func init() {
internal.WithBinaryLogger = withBinaryLogger
internal.JoinDialOptions = newJoinDialOption
internal.DisableGlobalDialOptions = newDisableGlobalDialOptions
+ internal.WithRecvBufferPool = withRecvBufferPool
}
// dialOptions configure a Dial call. dialOptions are set by the DialOption
@@ -63,7 +64,6 @@ type dialOptions struct {
block bool
returnLastError bool
timeout time.Duration
- scChan <-chan ServiceConfig
authority string
binaryLogger binarylog.Logger
copts transport.ConnectOptions
@@ -250,19 +250,6 @@ func WithDecompressor(dc Decompressor) DialOption {
})
}
-// WithServiceConfig returns a DialOption which has a channel to read the
-// service configuration.
-//
-// Deprecated: service config should be received through name resolver or via
-// WithDefaultServiceConfig, as specified at
-// https://github.com/grpc/grpc/blob/master/doc/service_config.md. Will be
-// removed in a future 1.x release.
-func WithServiceConfig(c <-chan ServiceConfig) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.scChan = c
- })
-}
-
// WithConnectParams configures the ClientConn to use the provided ConnectParams
// for creating and maintaining connections to servers.
//
@@ -413,6 +400,17 @@ func WithTimeout(d time.Duration) DialOption {
// connections. If FailOnNonTempDialError() is set to true, and an error is
// returned by f, gRPC checks the error's Temporary() method to decide if it
// should try to reconnect to the network address.
+//
+// Note: All supported releases of Go (as of December 2023) override the OS
+// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive
+// with OS defaults for keepalive time and interval, use a net.Dialer that sets
+// the KeepAlive field to a negative value, and sets the SO_KEEPALIVE socket
+// option to true from the Control field. For a concrete example of how to do
+// this, see internal.NetDialerWithTCPKeepalive().
+//
+// For more information, please see [issue 23459] in the Go github repo.
+//
+// [issue 23459]: https://github.com/golang/go/issues/23459
func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.copts.Dialer = f
@@ -487,7 +485,7 @@ func FailOnNonTempDialError(f bool) DialOption {
// the RPCs.
func WithUserAgent(s string) DialOption {
return newFuncDialOption(func(o *dialOptions) {
- o.copts.UserAgent = s
+ o.copts.UserAgent = s + " " + grpcUA
})
}
@@ -637,14 +635,16 @@ func withHealthCheckFunc(f internal.HealthChecker) DialOption {
func defaultDialOptions() dialOptions {
return dialOptions{
- healthCheckFunc: internal.HealthCheckFunc,
copts: transport.ConnectOptions{
- WriteBufferSize: defaultWriteBufSize,
ReadBufferSize: defaultReadBufSize,
+ WriteBufferSize: defaultWriteBufSize,
UseProxy: true,
+ UserAgent: grpcUA,
},
- recvBufferPool: nopBufferPool{},
- idleTimeout: 30 * time.Minute,
+ bs: internalbackoff.DefaultExponential,
+ healthCheckFunc: internal.HealthCheckFunc,
+ idleTimeout: 30 * time.Minute,
+ recvBufferPool: nopBufferPool{},
}
}
@@ -705,11 +705,13 @@ func WithIdleTimeout(d time.Duration) DialOption {
// options are used: WithStatsHandler, EnableTracing, or binary logging. In such
// cases, the shared buffer pool will be ignored.
//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
+// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in
+// v1.60.0 or later.
func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption {
+ return withRecvBufferPool(bufferPool)
+}
+
+func withRecvBufferPool(bufferPool SharedBufferPool) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.recvBufferPool = bufferPool
})
diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
index 4399c3df4..11f91668a 100644
--- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
+++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
@@ -18,7 +18,10 @@
// Package buffer provides an implementation of an unbounded buffer.
package buffer
-import "sync"
+import (
+ "errors"
+ "sync"
+)
// Unbounded is an implementation of an unbounded buffer which does not use
// extra goroutines. This is typically used for passing updates from one entity
@@ -36,6 +39,7 @@ import "sync"
type Unbounded struct {
c chan any
closed bool
+ closing bool
mu sync.Mutex
backlog []any
}
@@ -45,32 +49,32 @@ func NewUnbounded() *Unbounded {
return &Unbounded{c: make(chan any, 1)}
}
+var errBufferClosed = errors.New("Put called on closed buffer.Unbounded")
+
// Put adds t to the unbounded buffer.
-func (b *Unbounded) Put(t any) {
+func (b *Unbounded) Put(t any) error {
b.mu.Lock()
defer b.mu.Unlock()
- if b.closed {
- return
+ if b.closing {
+ return errBufferClosed
}
if len(b.backlog) == 0 {
select {
case b.c <- t:
- return
+ return nil
default:
}
}
b.backlog = append(b.backlog, t)
+ return nil
}
-// Load sends the earliest buffered data, if any, onto the read channel
-// returned by Get(). Users are expected to call this every time they read a
+// Load sends the earliest buffered data, if any, onto the read channel returned
+// by Get(). Users are expected to call this every time they successfully read a
// value from the read channel.
func (b *Unbounded) Load() {
b.mu.Lock()
defer b.mu.Unlock()
- if b.closed {
- return
- }
if len(b.backlog) > 0 {
select {
case b.c <- b.backlog[0]:
@@ -78,6 +82,8 @@ func (b *Unbounded) Load() {
b.backlog = b.backlog[1:]
default:
}
+ } else if b.closing && !b.closed {
+ close(b.c)
}
}
@@ -88,18 +94,23 @@ func (b *Unbounded) Load() {
// send the next buffered value onto the channel if there is any.
//
// If the unbounded buffer is closed, the read channel returned by this method
-// is closed.
+// is closed after all data is drained.
func (b *Unbounded) Get() <-chan any {
return b.c
}
-// Close closes the unbounded buffer.
+// Close closes the unbounded buffer. No subsequent data may be Put(), and the
+// channel returned from Get() will be closed after all the data is read and
+// Load() is called for the final time.
func (b *Unbounded) Close() {
b.mu.Lock()
defer b.mu.Unlock()
- if b.closed {
+ if b.closing {
return
}
- b.closed = true
- close(b.c)
+ b.closing = true
+ if len(b.backlog) == 0 {
+ b.closed = true
+ close(b.c)
+ }
}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go
index 5395e7752..fc094f344 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go
@@ -31,6 +31,7 @@ import (
"time"
"google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/internal"
)
const (
@@ -58,6 +59,12 @@ func TurnOn() {
}
}
+func init() {
+ internal.ChannelzTurnOffForTesting = func() {
+ atomic.StoreInt32(&curState, 0)
+ }
+}
+
// IsOn returns whether channelz data collection is on.
func IsOn() bool {
return atomic.LoadInt32(&curState) == 1
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
index 3cf10ddfb..685a3cb41 100644
--- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
+++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
@@ -36,9 +36,6 @@ var (
// "GRPC_RING_HASH_CAP". This does not override the default bounds
// checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M).
RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024)
- // PickFirstLBConfig is set if we should support configuration of the
- // pick_first LB policy.
- PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", true)
// LeastRequestLB is set if we should support the least_request_experimental
// LB policy, which can be enabled by setting the environment variable
// "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true".
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
index 02b4b6a1c..29f234acb 100644
--- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go
+++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
@@ -50,46 +50,7 @@ var (
//
// When both bootstrap FileName and FileContent are set, FileName is used.
XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv)
- // XDSRingHash indicates whether ring hash support is enabled, which can be
- // disabled by setting the environment variable
- // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false".
- XDSRingHash = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH", true)
- // XDSClientSideSecurity is used to control processing of security
- // configuration on the client-side.
- //
- // Note that there is no env var protection for the server-side because we
- // have a brand new API on the server-side and users explicitly need to use
- // the new API to get security integration on the server.
- XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true)
- // XDSAggregateAndDNS indicates whether processing of aggregated cluster and
- // DNS cluster is enabled, which can be disabled by setting the environment
- // variable "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER"
- // to "false".
- XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true)
-
- // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled,
- // which can be disabled by setting the environment variable
- // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false".
- XDSRBAC = boolFromEnv("GRPC_XDS_EXPERIMENTAL_RBAC", true)
- // XDSOutlierDetection indicates whether outlier detection support is
- // enabled, which can be disabled by setting the environment variable
- // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false".
- XDSOutlierDetection = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION", true)
- // XDSFederation indicates whether federation support is enabled, which can
- // be enabled by setting the environment variable
- // "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true".
- XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", true)
-
- // XDSRLS indicates whether processing of Cluster Specifier plugins and
- // support for the RLS CLuster Specifier is enabled, which can be disabled by
- // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to
- // "false".
- XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", true)
// C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing.
C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI")
- // XDSCustomLBPolicy indicates whether Custom LB Policies are enabled, which
- // can be disabled by setting the environment variable
- // "GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG" to "false".
- XDSCustomLBPolicy = boolFromEnv("GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG", true)
)
diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go
new file mode 100644
index 000000000..7f7044e17
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/experimental.go
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package internal
+
+var (
+ // WithRecvBufferPool is implemented by the grpc package and returns a dial
+ // option to configure a shared buffer pool for a grpc.ClientConn.
+ WithRecvBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption
+
+ // RecvBufferPool is implemented by the grpc package and returns a server
+ // option to configure a shared buffer pool for a grpc.Server.
+ RecvBufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption
+)
diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
index 900917dbe..f7f40a16a 100644
--- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
+++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
@@ -20,7 +20,6 @@ package grpcsync
import (
"context"
- "sync"
"google.golang.org/grpc/internal/buffer"
)
@@ -38,8 +37,6 @@ type CallbackSerializer struct {
done chan struct{}
callbacks *buffer.Unbounded
- closedMu sync.Mutex
- closed bool
}
// NewCallbackSerializer returns a new CallbackSerializer instance. The provided
@@ -65,56 +62,34 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer {
// callbacks to be executed by the serializer. It is not possible to add
// callbacks once the context passed to NewCallbackSerializer is cancelled.
func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool {
- cs.closedMu.Lock()
- defer cs.closedMu.Unlock()
-
- if cs.closed {
- return false
- }
- cs.callbacks.Put(f)
- return true
+ return cs.callbacks.Put(f) == nil
}
func (cs *CallbackSerializer) run(ctx context.Context) {
- var backlog []func(context.Context)
-
defer close(cs.done)
+
+ // TODO: when Go 1.21 is the oldest supported version, this loop and Close
+ // can be replaced with:
+ //
+ // context.AfterFunc(ctx, cs.callbacks.Close)
for ctx.Err() == nil {
select {
case <-ctx.Done():
// Do nothing here. Next iteration of the for loop will not happen,
// since ctx.Err() would be non-nil.
- case callback, ok := <-cs.callbacks.Get():
- if !ok {
- return
- }
+ case cb := <-cs.callbacks.Get():
cs.callbacks.Load()
- callback.(func(ctx context.Context))(ctx)
+ cb.(func(context.Context))(ctx)
}
}
- // Fetch pending callbacks if any, and execute them before returning from
- // this method and closing cs.done.
- cs.closedMu.Lock()
- cs.closed = true
- backlog = cs.fetchPendingCallbacks()
+ // Close the buffer to prevent new callbacks from being added.
cs.callbacks.Close()
- cs.closedMu.Unlock()
- for _, b := range backlog {
- b(ctx)
- }
-}
-func (cs *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) {
- var backlog []func(context.Context)
- for {
- select {
- case b := <-cs.callbacks.Get():
- backlog = append(backlog, b.(func(context.Context)))
- cs.callbacks.Load()
- default:
- return backlog
- }
+ // Run all pending callbacks.
+ for cb := range cs.callbacks.Get() {
+ cs.callbacks.Load()
+ cb.(func(context.Context))(ctx)
}
}
diff --git a/vendor/google.golang.org/grpc/internal/idle/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go
index 6c272476e..fe49cb74c 100644
--- a/vendor/google.golang.org/grpc/internal/idle/idle.go
+++ b/vendor/google.golang.org/grpc/internal/idle/idle.go
@@ -26,8 +26,6 @@ import (
"sync"
"sync/atomic"
"time"
-
- "google.golang.org/grpc/grpclog"
)
// For overriding in unit tests.
@@ -39,27 +37,12 @@ var timeAfterFunc = func(d time.Duration, f func()) *time.Timer {
// and exit from idle mode.
type Enforcer interface {
ExitIdleMode() error
- EnterIdleMode() error
-}
-
-// Manager defines the functionality required to track RPC activity on a
-// channel.
-type Manager interface {
- OnCallBegin() error
- OnCallEnd()
- Close()
+ EnterIdleMode()
}
-type noopManager struct{}
-
-func (noopManager) OnCallBegin() error { return nil }
-func (noopManager) OnCallEnd() {}
-func (noopManager) Close() {}
-
-// manager implements the Manager interface. It uses atomic operations to
-// synchronize access to shared state and a mutex to guarantee mutual exclusion
-// in a critical section.
-type manager struct {
+// Manager implements idleness detection and calls the configured Enforcer to
+// enter/exit idle mode when appropriate. Must be created by NewManager.
+type Manager struct {
// State accessed atomically.
lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed.
activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there.
@@ -69,8 +52,7 @@ type manager struct {
// Can be accessed without atomics or mutex since these are set at creation
// time and read-only after that.
enforcer Enforcer // Functionality provided by grpc.ClientConn.
- timeout int64 // Idle timeout duration nanos stored as an int64.
- logger grpclog.LoggerV2
+ timeout time.Duration
// idleMu is used to guarantee mutual exclusion in two scenarios:
// - Opposing intentions:
@@ -88,57 +70,48 @@ type manager struct {
timer *time.Timer
}
-// ManagerOptions is a collection of options used by
-// NewManager.
-type ManagerOptions struct {
- Enforcer Enforcer
- Timeout time.Duration
- Logger grpclog.LoggerV2
+// NewManager creates a new idleness manager implementation for the
+// given idle timeout. It begins in idle mode.
+func NewManager(enforcer Enforcer, timeout time.Duration) *Manager {
+ return &Manager{
+ enforcer: enforcer,
+ timeout: timeout,
+ actuallyIdle: true,
+ activeCallsCount: -math.MaxInt32,
+ }
}
-// NewManager creates a new idleness manager implementation for the
-// given idle timeout.
-func NewManager(opts ManagerOptions) Manager {
- if opts.Timeout == 0 {
- return noopManager{}
+// resetIdleTimerLocked resets the idle timer to the given duration. Called
+// when exiting idle mode or when the timer fires and we need to reset it.
+func (m *Manager) resetIdleTimerLocked(d time.Duration) {
+ if m.isClosed() || m.timeout == 0 || m.actuallyIdle {
+ return
}
- m := &manager{
- enforcer: opts.Enforcer,
- timeout: int64(opts.Timeout),
- logger: opts.Logger,
+ // It is safe to ignore the return value from Reset() because this method is
+ // only ever called from the timer callback or when exiting idle mode.
+ if m.timer != nil {
+ m.timer.Stop()
}
- m.timer = timeAfterFunc(opts.Timeout, m.handleIdleTimeout)
- return m
+ m.timer = timeAfterFunc(d, m.handleIdleTimeout)
}
-// resetIdleTimer resets the idle timer to the given duration. This method
-// should only be called from the timer callback.
-func (m *manager) resetIdleTimer(d time.Duration) {
+func (m *Manager) resetIdleTimer(d time.Duration) {
m.idleMu.Lock()
defer m.idleMu.Unlock()
-
- if m.timer == nil {
- // Only close sets timer to nil. We are done.
- return
- }
-
- // It is safe to ignore the return value from Reset() because this method is
- // only ever called from the timer callback, which means the timer has
- // already fired.
- m.timer.Reset(d)
+ m.resetIdleTimerLocked(d)
}
// handleIdleTimeout is the timer callback that is invoked upon expiry of the
// configured idle timeout. The channel is considered inactive if there are no
// ongoing calls and no RPC activity since the last time the timer fired.
-func (m *manager) handleIdleTimeout() {
+func (m *Manager) handleIdleTimeout() {
if m.isClosed() {
return
}
if atomic.LoadInt32(&m.activeCallsCount) > 0 {
- m.resetIdleTimer(time.Duration(m.timeout))
+ m.resetIdleTimer(m.timeout)
return
}
@@ -148,24 +121,12 @@ func (m *manager) handleIdleTimeout() {
// Set the timer to fire after a duration of idle timeout, calculated
// from the time the most recent RPC completed.
atomic.StoreInt32(&m.activeSinceLastTimerCheck, 0)
- m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime) + m.timeout - time.Now().UnixNano()))
+ m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime)-time.Now().UnixNano()) + m.timeout)
return
}
- // This CAS operation is extremely likely to succeed given that there has
- // been no activity since the last time we were here. Setting the
- // activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() that the
- // channel is either in idle mode or is trying to get there.
- if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) {
- // This CAS operation can fail if an RPC started after we checked for
- // activity at the top of this method, or one was ongoing from before
- // the last time we were here. In both case, reset the timer and return.
- m.resetIdleTimer(time.Duration(m.timeout))
- return
- }
-
- // Now that we've set the active calls count to -math.MaxInt32, it's time to
- // actually move to idle mode.
+ // Now that we've checked that there has been no activity, attempt to enter
+ // idle mode, which is very likely to succeed.
if m.tryEnterIdleMode() {
// Successfully entered idle mode. No timer needed until we exit idle.
return
@@ -174,8 +135,7 @@ func (m *manager) handleIdleTimeout() {
// Failed to enter idle mode due to a concurrent RPC that kept the channel
// active, or because of an error from the channel. Undo the attempt to
// enter idle, and reset the timer to try again later.
- atomic.AddInt32(&m.activeCallsCount, math.MaxInt32)
- m.resetIdleTimer(time.Duration(m.timeout))
+ m.resetIdleTimer(m.timeout)
}
// tryEnterIdleMode instructs the channel to enter idle mode. But before
@@ -185,36 +145,49 @@ func (m *manager) handleIdleTimeout() {
// Return value indicates whether or not the channel moved to idle mode.
//
// Holds idleMu which ensures mutual exclusion with exitIdleMode.
-func (m *manager) tryEnterIdleMode() bool {
+func (m *Manager) tryEnterIdleMode() bool {
+ // Setting the activeCallsCount to -math.MaxInt32 indicates to OnCallBegin()
+ // that the channel is either in idle mode or is trying to get there.
+ if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) {
+ // This CAS operation can fail if an RPC started after we checked for
+ // activity in the timer handler, or one was ongoing from before the
+ // last time the timer fired, or if a test is attempting to enter idle
+ // mode without checking. In all cases, abort going into idle mode.
+ return false
+ }
+ // N.B. if we fail to enter idle mode after this, we must re-add
+ // math.MaxInt32 to m.activeCallsCount.
+
m.idleMu.Lock()
defer m.idleMu.Unlock()
if atomic.LoadInt32(&m.activeCallsCount) != -math.MaxInt32 {
// We raced and lost to a new RPC. Very rare, but stop entering idle.
+ atomic.AddInt32(&m.activeCallsCount, math.MaxInt32)
return false
}
if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 {
- // An very short RPC could have come in (and also finished) after we
+ // A very short RPC could have come in (and also finished) after we
// checked for calls count and activity in handleIdleTimeout(), but
// before the CAS operation. So, we need to check for activity again.
+ atomic.AddInt32(&m.activeCallsCount, math.MaxInt32)
return false
}
- // No new RPCs have come in since we last set the active calls count value
- // -math.MaxInt32 in the timer callback. And since we have the lock, it is
- // safe to enter idle mode now.
- if err := m.enforcer.EnterIdleMode(); err != nil {
- m.logger.Errorf("Failed to enter idle mode: %v", err)
- return false
- }
-
- // Successfully entered idle mode.
+ // No new RPCs have come in since we set the active calls count value to
+ // -math.MaxInt32. And since we have the lock, it is safe to enter idle mode
+ // unconditionally now.
+ m.enforcer.EnterIdleMode()
m.actuallyIdle = true
return true
}
+func (m *Manager) EnterIdleModeForTesting() {
+ m.tryEnterIdleMode()
+}
+
// OnCallBegin is invoked at the start of every RPC.
-func (m *manager) OnCallBegin() error {
+func (m *Manager) OnCallBegin() error {
if m.isClosed() {
return nil
}
@@ -227,7 +200,7 @@ func (m *manager) OnCallBegin() error {
// Channel is either in idle mode or is in the process of moving to idle
// mode. Attempt to exit idle mode to allow this RPC.
- if err := m.exitIdleMode(); err != nil {
+ if err := m.ExitIdleMode(); err != nil {
// Undo the increment to calls count, and return an error causing the
// RPC to fail.
atomic.AddInt32(&m.activeCallsCount, -1)
@@ -238,28 +211,30 @@ func (m *manager) OnCallBegin() error {
return nil
}
-// exitIdleMode instructs the channel to exit idle mode.
-//
-// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode.
-func (m *manager) exitIdleMode() error {
+// ExitIdleMode instructs m to call the enforcer's ExitIdleMode and update m's
+// internal state.
+func (m *Manager) ExitIdleMode() error {
+ // Holds idleMu which ensures mutual exclusion with tryEnterIdleMode.
m.idleMu.Lock()
defer m.idleMu.Unlock()
- if !m.actuallyIdle {
- // This can happen in two scenarios:
+ if m.isClosed() || !m.actuallyIdle {
+ // This can happen in three scenarios:
// - handleIdleTimeout() set the calls count to -math.MaxInt32 and called
// tryEnterIdleMode(). But before the latter could grab the lock, an RPC
// came in and OnCallBegin() noticed that the calls count is negative.
// - Channel is in idle mode, and multiple new RPCs come in at the same
// time, all of them notice a negative calls count in OnCallBegin and get
// here. The first one to get the lock would got the channel to exit idle.
+ // - Channel is not in idle mode, and the user calls Connect which calls
+ // m.ExitIdleMode.
//
- // Either way, nothing to do here.
+ // In any case, there is nothing to do here.
return nil
}
if err := m.enforcer.ExitIdleMode(); err != nil {
- return fmt.Errorf("channel failed to exit idle mode: %v", err)
+ return fmt.Errorf("failed to exit idle mode: %w", err)
}
// Undo the idle entry process. This also respects any new RPC attempts.
@@ -267,12 +242,12 @@ func (m *manager) exitIdleMode() error {
m.actuallyIdle = false
// Start a new timer to fire after the configured idle timeout.
- m.timer = timeAfterFunc(time.Duration(m.timeout), m.handleIdleTimeout)
+ m.resetIdleTimerLocked(m.timeout)
return nil
}
// OnCallEnd is invoked at the end of every RPC.
-func (m *manager) OnCallEnd() {
+func (m *Manager) OnCallEnd() {
if m.isClosed() {
return
}
@@ -287,15 +262,17 @@ func (m *manager) OnCallEnd() {
atomic.AddInt32(&m.activeCallsCount, -1)
}
-func (m *manager) isClosed() bool {
+func (m *Manager) isClosed() bool {
return atomic.LoadInt32(&m.closed) == 1
}
-func (m *manager) Close() {
+func (m *Manager) Close() {
atomic.StoreInt32(&m.closed, 1)
m.idleMu.Lock()
- m.timer.Stop()
- m.timer = nil
+ if m.timer != nil {
+ m.timer.Stop()
+ m.timer = nil
+ }
m.idleMu.Unlock()
}
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
index 0d94c63e0..6c7ea6a53 100644
--- a/vendor/google.golang.org/grpc/internal/internal.go
+++ b/vendor/google.golang.org/grpc/internal/internal.go
@@ -57,7 +57,7 @@ var (
// GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo
// stored in the passed in attributes. This is set by
// credentials/xds/xds.go.
- GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *xds.HandshakeInfo
+ GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *unsafe.Pointer
// GetServerCredentials returns the transport credentials configured on a
// gRPC server. An xDS-enabled server needs to know what type of credentials
// is configured on the underlying gRPC server. This is set by server.go.
@@ -68,11 +68,11 @@ var (
// This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed.
CanonicalString any // func (codes.Code) string
- // DrainServerTransports initiates a graceful close of existing connections
- // on a gRPC server accepted on the provided listener address. An
- // xDS-enabled server invokes this method on a grpc.Server when a particular
- // listener moves to "not-serving" mode.
- DrainServerTransports any // func(*grpc.Server, string)
+ // IsRegisteredMethod returns whether the passed in method is registered as
+ // a method on the server.
+ IsRegisteredMethod any // func(*grpc.Server, string) bool
+ // ServerFromContext returns the server from the context.
+ ServerFromContext any // func(context.Context) *grpc.Server
// AddGlobalServerOptions adds an array of ServerOption that will be
// effective globally for newly created servers. The priority will be: 1.
// user-provided; 2. this method; 3. default values.
@@ -177,10 +177,25 @@ var (
GRPCResolverSchemeExtraMetadata string = "xds"
// EnterIdleModeForTesting gets the ClientConn to enter IDLE mode.
- EnterIdleModeForTesting any // func(*grpc.ClientConn) error
+ EnterIdleModeForTesting any // func(*grpc.ClientConn)
// ExitIdleModeForTesting gets the ClientConn to exit IDLE mode.
ExitIdleModeForTesting any // func(*grpc.ClientConn) error
+
+ ChannelzTurnOffForTesting func()
+
+ // TriggerXDSResourceNameNotFoundForTesting triggers the resource-not-found
+ // error for a given resource type and name. This is usually triggered when
+ // the associated watch timer fires. For testing purposes, having this
+ // function makes events more predictable than relying on timer events.
+ TriggerXDSResourceNameNotFoundForTesting any // func(func(xdsresource.Type, string), string, string) error
+
+ // TriggerXDSResourceNotFoundClient invokes the testing xDS Client singleton
+ // to invoke resource not found for a resource type name and resource name.
+ TriggerXDSResourceNameNotFoundClient any // func(string, string) error
+
+ // FromOutgoingContextRaw returns the un-merged, intermediary contents of metadata.rawMD.
+ FromOutgoingContextRaw any // func(context.Context) (metadata.MD, [][]string, bool)
)
// HealthChecker defines the signature of the client-side LB channel health checking function.
diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
index 99e1e5b36..b66dcb213 100644
--- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
+++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
@@ -23,7 +23,6 @@ package dns
import (
"context"
"encoding/json"
- "errors"
"fmt"
"net"
"os"
@@ -37,6 +36,7 @@ import (
"google.golang.org/grpc/internal/backoff"
"google.golang.org/grpc/internal/envconfig"
"google.golang.org/grpc/internal/grpcrand"
+ "google.golang.org/grpc/internal/resolver/dns/internal"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/serviceconfig"
)
@@ -47,15 +47,11 @@ var EnableSRVLookups = false
var logger = grpclog.Component("dns")
-// Globals to stub out in tests. TODO: Perhaps these two can be combined into a
-// single variable for testing the resolver?
-var (
- newTimer = time.NewTimer
- newTimerDNSResRate = time.NewTimer
-)
-
func init() {
resolver.Register(NewBuilder())
+ internal.TimeAfterFunc = time.After
+ internal.NewNetResolver = newNetResolver
+ internal.AddressDialer = addressDialer
}
const (
@@ -70,23 +66,6 @@ const (
txtAttribute = "grpc_config="
)
-var (
- errMissingAddr = errors.New("dns resolver: missing address")
-
- // Addresses ending with a colon that is supposed to be the separator
- // between host and port is not allowed. E.g. "::" is a valid address as
- // it is an IPv6 address (host only) and "[::]:" is invalid as it ends with
- // a colon as the host and port separator
- errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon")
-)
-
-var (
- defaultResolver netResolver = net.DefaultResolver
- // To prevent excessive re-resolution, we enforce a rate limit on DNS
- // resolution requests.
- minDNSResRate = 30 * time.Second
-)
-
var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) {
return func(ctx context.Context, network, _ string) (net.Conn, error) {
var dialer net.Dialer
@@ -94,7 +73,11 @@ var addressDialer = func(address string) func(context.Context, string, string) (
}
}
-var newNetResolver = func(authority string) (netResolver, error) {
+var newNetResolver = func(authority string) (internal.NetResolver, error) {
+ if authority == "" {
+ return net.DefaultResolver, nil
+ }
+
host, port, err := parseTarget(authority, defaultDNSSvrPort)
if err != nil {
return nil, err
@@ -104,7 +87,7 @@ var newNetResolver = func(authority string) (netResolver, error) {
return &net.Resolver{
PreferGo: true,
- Dial: addressDialer(authorityWithPort),
+ Dial: internal.AddressDialer(authorityWithPort),
}, nil
}
@@ -142,13 +125,9 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts
disableServiceConfig: opts.DisableServiceConfig,
}
- if target.URL.Host == "" {
- d.resolver = defaultResolver
- } else {
- d.resolver, err = newNetResolver(target.URL.Host)
- if err != nil {
- return nil, err
- }
+ d.resolver, err = internal.NewNetResolver(target.URL.Host)
+ if err != nil {
+ return nil, err
}
d.wg.Add(1)
@@ -161,12 +140,6 @@ func (b *dnsBuilder) Scheme() string {
return "dns"
}
-type netResolver interface {
- LookupHost(ctx context.Context, host string) (addrs []string, err error)
- LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error)
- LookupTXT(ctx context.Context, name string) (txts []string, err error)
-}
-
// deadResolver is a resolver that does nothing.
type deadResolver struct{}
@@ -178,7 +151,7 @@ func (deadResolver) Close() {}
type dnsResolver struct {
host string
port string
- resolver netResolver
+ resolver internal.NetResolver
ctx context.Context
cancel context.CancelFunc
cc resolver.ClientConn
@@ -223,29 +196,27 @@ func (d *dnsResolver) watcher() {
err = d.cc.UpdateState(*state)
}
- var timer *time.Timer
+ var waitTime time.Duration
if err == nil {
// Success resolving, wait for the next ResolveNow. However, also wait 30
// seconds at the very least to prevent constantly re-resolving.
backoffIndex = 1
- timer = newTimerDNSResRate(minDNSResRate)
+ waitTime = internal.MinResolutionRate
select {
case <-d.ctx.Done():
- timer.Stop()
return
case <-d.rn:
}
} else {
// Poll on an error found in DNS Resolver or an error received from
// ClientConn.
- timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex))
+ waitTime = backoff.DefaultExponential.Backoff(backoffIndex)
backoffIndex++
}
select {
case <-d.ctx.Done():
- timer.Stop()
return
- case <-timer.C:
+ case <-internal.TimeAfterFunc(waitTime):
}
}
}
@@ -387,7 +358,7 @@ func formatIP(addr string) (addrIP string, ok bool) {
// target: ":80" defaultPort: "443" returns host: "localhost", port: "80"
func parseTarget(target, defaultPort string) (host, port string, err error) {
if target == "" {
- return "", "", errMissingAddr
+ return "", "", internal.ErrMissingAddr
}
if ip := net.ParseIP(target); ip != nil {
// target is an IPv4 or IPv6(without brackets) address
@@ -397,7 +368,7 @@ func parseTarget(target, defaultPort string) (host, port string, err error) {
if port == "" {
// If the port field is empty (target ends with colon), e.g. "[::1]:",
// this is an error.
- return "", "", errEndsWithColon
+ return "", "", internal.ErrEndsWithColon
}
// target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port
if host == "" {
diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go
new file mode 100644
index 000000000..c7fc557d0
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go
@@ -0,0 +1,70 @@
+/*
+ *
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package internal contains functionality internal to the dns resolver package.
+package internal
+
+import (
+ "context"
+ "errors"
+ "net"
+ "time"
+)
+
+// NetResolver groups the methods on net.Resolver that are used by the DNS
+// resolver implementation. This allows the default net.Resolver instance to be
+// overidden from tests.
+type NetResolver interface {
+ LookupHost(ctx context.Context, host string) (addrs []string, err error)
+ LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error)
+ LookupTXT(ctx context.Context, name string) (txts []string, err error)
+}
+
+var (
+ // ErrMissingAddr is the error returned when building a DNS resolver when
+ // the provided target name is empty.
+ ErrMissingAddr = errors.New("dns resolver: missing address")
+
+ // ErrEndsWithColon is the error returned when building a DNS resolver when
+ // the provided target name ends with a colon that is supposed to be the
+ // separator between host and port. E.g. "::" is a valid address as it is
+ // an IPv6 address (host only) and "[::]:" is invalid as it ends with a
+ // colon as the host and port separator
+ ErrEndsWithColon = errors.New("dns resolver: missing port after port-separator colon")
+)
+
+// The following vars are overridden from tests.
+var (
+ // MinResolutionRate is the minimum rate at which re-resolutions are
+ // allowed. This helps to prevent excessive re-resolution.
+ MinResolutionRate = 30 * time.Second
+
+ // TimeAfterFunc is used by the DNS resolver to wait for the given duration
+ // to elapse. In non-test code, this is implemented by time.After. In test
+ // code, this can be used to control the amount of time the resolver is
+ // blocked waiting for the duration to elapse.
+ TimeAfterFunc func(time.Duration) <-chan time.Time
+
+ // NewNetResolver returns the net.Resolver instance for the given target.
+ NewNetResolver func(string) (NetResolver, error)
+
+ // AddressDialer is the dialer used to dial the DNS server. It accepts the
+ // Host portion of the URL corresponding to the user's dial target and
+ // returns a dial function.
+ AddressDialer func(address string) func(context.Context, string, string) (net.Conn, error)
+)
diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
index 160911687..27cd81af9 100644
--- a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
+++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
@@ -61,6 +61,10 @@ func (b *builder) Scheme() string {
return b.scheme
}
+func (b *builder) OverrideAuthority(resolver.Target) string {
+ return "localhost"
+}
+
type nopResolver struct {
}
diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go
new file mode 100644
index 000000000..4f347edd4
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go
@@ -0,0 +1,29 @@
+//go:build !unix && !windows
+
+/*
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package internal
+
+import (
+ "net"
+)
+
+// NetDialerWithTCPKeepalive returns a vanilla net.Dialer on non-unix platforms.
+func NetDialerWithTCPKeepalive() *net.Dialer {
+ return &net.Dialer{}
+}
diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go
new file mode 100644
index 000000000..078137b7f
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go
@@ -0,0 +1,54 @@
+//go:build unix
+
+/*
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package internal
+
+import (
+ "net"
+ "syscall"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on
+// the underlying connection with OS default values for keepalive parameters.
+//
+// TODO: Once https://github.com/golang/go/issues/62254 lands, and the
+// appropriate Go version becomes less than our least supported Go version, we
+// should look into using the new API to make things more straightforward.
+func NetDialerWithTCPKeepalive() *net.Dialer {
+ return &net.Dialer{
+ // Setting a negative value here prevents the Go stdlib from overriding
+ // the values of TCP keepalive time and interval. It also prevents the
+ // Go stdlib from enabling TCP keepalives by default.
+ KeepAlive: time.Duration(-1),
+ // This method is called after the underlying network socket is created,
+ // but before dialing the socket (or calling its connect() method). The
+ // combination of unconditionally enabling TCP keepalives here, and
+ // disabling the overriding of TCP keepalive parameters by setting the
+ // KeepAlive field to a negative value above, results in OS defaults for
+ // the TCP keealive interval and time parameters.
+ Control: func(_, _ string, c syscall.RawConn) error {
+ return c.Control(func(fd uintptr) {
+ unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1)
+ })
+ },
+ }
+}
diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go
new file mode 100644
index 000000000..fd7d43a89
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go
@@ -0,0 +1,54 @@
+//go:build windows
+
+/*
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package internal
+
+import (
+ "net"
+ "syscall"
+ "time"
+
+ "golang.org/x/sys/windows"
+)
+
+// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on
+// the underlying connection with OS default values for keepalive parameters.
+//
+// TODO: Once https://github.com/golang/go/issues/62254 lands, and the
+// appropriate Go version becomes less than our least supported Go version, we
+// should look into using the new API to make things more straightforward.
+func NetDialerWithTCPKeepalive() *net.Dialer {
+ return &net.Dialer{
+ // Setting a negative value here prevents the Go stdlib from overriding
+ // the values of TCP keepalive time and interval. It also prevents the
+ // Go stdlib from enabling TCP keepalives by default.
+ KeepAlive: time.Duration(-1),
+ // This method is called after the underlying network socket is created,
+ // but before dialing the socket (or calling its connect() method). The
+ // combination of unconditionally enabling TCP keepalives here, and
+ // disabling the overriding of TCP keepalive parameters by setting the
+ // KeepAlive field to a negative value above, results in OS defaults for
+ // the TCP keealive interval and time parameters.
+ Control: func(_, _ string, c syscall.RawConn) error {
+ return c.Control(func(fd uintptr) {
+ windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1)
+ })
+ },
+ }
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
index b330ccedc..83c382982 100644
--- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
+++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
@@ -535,8 +535,8 @@ const minBatchSize = 1000
// size is too low to give stream goroutines a chance to fill it up.
//
// Upon exiting, if the error causing the exit is not an I/O error, run()
-// flushes and closes the underlying connection. Otherwise, the connection is
-// left open to allow the I/O error to be encountered by the reader instead.
+// flushes the underlying connection. The connection is always left open to
+// allow different closing behavior on the client and server.
func (l *loopyWriter) run() (err error) {
defer func() {
if l.logger.V(logLevel) {
@@ -544,7 +544,6 @@ func (l *loopyWriter) run() (err error) {
}
if !isIOError(err) {
l.framer.writer.Flush()
- l.conn.Close()
}
l.cbuf.finish()
}()
diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
index 17f7a21b5..a9d70e2a1 100644
--- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
@@ -75,11 +75,25 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s
return nil, errors.New(msg)
}
+ var localAddr net.Addr
+ if la := r.Context().Value(http.LocalAddrContextKey); la != nil {
+ localAddr, _ = la.(net.Addr)
+ }
+ var authInfo credentials.AuthInfo
+ if r.TLS != nil {
+ authInfo = credentials.TLSInfo{State: *r.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}}
+ }
+ p := peer.Peer{
+ Addr: strAddr(r.RemoteAddr),
+ LocalAddr: localAddr,
+ AuthInfo: authInfo,
+ }
st := &serverHandlerTransport{
rw: w,
req: r,
closedCh: make(chan struct{}),
writes: make(chan func()),
+ peer: p,
contentType: contentType,
contentSubtype: contentSubtype,
stats: stats,
@@ -134,6 +148,8 @@ type serverHandlerTransport struct {
headerMD metadata.MD
+ peer peer.Peer
+
closeOnce sync.Once
closedCh chan struct{} // closed on Close
@@ -165,7 +181,13 @@ func (ht *serverHandlerTransport) Close(err error) {
})
}
-func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) }
+func (ht *serverHandlerTransport) Peer() *peer.Peer {
+ return &peer.Peer{
+ Addr: ht.peer.Addr,
+ LocalAddr: ht.peer.LocalAddr,
+ AuthInfo: ht.peer.AuthInfo,
+ }
+}
// strAddr is a net.Addr backed by either a TCP "ip:port" string, or
// the empty string if unknown.
@@ -347,10 +369,8 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
return err
}
-func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) {
+func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*Stream)) {
// With this transport type there will be exactly 1 stream: this HTTP request.
-
- ctx := ht.req.Context()
var cancel context.CancelFunc
if ht.timeoutSet {
ctx, cancel = context.WithTimeout(ctx, ht.timeout)
@@ -370,34 +390,19 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) {
ht.Close(errors.New("request is done processing"))
}()
+ ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
req := ht.req
-
s := &Stream{
- id: 0, // irrelevant
- requestRead: func(int) {},
- cancel: cancel,
- buf: newRecvBuffer(),
- st: ht,
- method: req.URL.Path,
- recvCompress: req.Header.Get("grpc-encoding"),
- contentSubtype: ht.contentSubtype,
- }
- pr := &peer.Peer{
- Addr: ht.RemoteAddr(),
- }
- if req.TLS != nil {
- pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}}
- }
- ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
- s.ctx = peer.NewContext(ctx, pr)
- for _, sh := range ht.stats {
- s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
- inHeader := &stats.InHeader{
- FullMethod: s.method,
- RemoteAddr: ht.RemoteAddr(),
- Compression: s.recvCompress,
- }
- sh.HandleRPC(s.ctx, inHeader)
+ id: 0, // irrelevant
+ ctx: ctx,
+ requestRead: func(int) {},
+ cancel: cancel,
+ buf: newRecvBuffer(),
+ st: ht,
+ method: req.URL.Path,
+ recvCompress: req.Header.Get("grpc-encoding"),
+ contentSubtype: ht.contentSubtype,
+ headerWireLength: 0, // won't have access to header wire length until golang/go#18997.
}
s.trReader = &transportReader{
reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}},
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
index d6f5c4935..eff879964 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
@@ -36,6 +36,7 @@ import (
"golang.org/x/net/http2/hpack"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/channelz"
icredentials "google.golang.org/grpc/internal/credentials"
"google.golang.org/grpc/internal/grpclog"
@@ -43,7 +44,7 @@ import (
"google.golang.org/grpc/internal/grpcutil"
imetadata "google.golang.org/grpc/internal/metadata"
istatus "google.golang.org/grpc/internal/status"
- "google.golang.org/grpc/internal/syscall"
+ isyscall "google.golang.org/grpc/internal/syscall"
"google.golang.org/grpc/internal/transport/networktype"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
@@ -58,6 +59,8 @@ import (
// atomically.
var clientConnectionCounter uint64
+var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool))
+
// http2Client implements the ClientTransport interface with HTTP2.
type http2Client struct {
lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
@@ -176,7 +179,7 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error
if networkType == "tcp" && useProxy {
return proxyDial(ctx, address, grpcUA)
}
- return (&net.Dialer{}).DialContext(ctx, networkType, address)
+ return internal.NetDialerWithTCPKeepalive().DialContext(ctx, networkType, address)
}
func isTemporary(err error) bool {
@@ -262,7 +265,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
}
keepaliveEnabled := false
if kp.Time != infinity {
- if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil {
+ if err = isyscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil {
return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err)
}
keepaliveEnabled = true
@@ -448,7 +451,13 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
}
go func() {
t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger)
- t.loopy.run()
+ if err := t.loopy.run(); !isIOError(err) {
+ // Immediately close the connection, as the loopy writer returns
+ // when there are no more active streams and we were draining (the
+ // server sent a GOAWAY). For I/O errors, the reader will hit it
+ // after draining any remaining incoming data.
+ t.conn.Close()
+ }
close(t.writerDone)
}()
return t, nil
@@ -493,8 +502,9 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
func (t *http2Client) getPeer() *peer.Peer {
return &peer.Peer{
- Addr: t.remoteAddr,
- AuthInfo: t.authInfo, // Can be nil
+ Addr: t.remoteAddr,
+ AuthInfo: t.authInfo, // Can be nil
+ LocalAddr: t.localAddr,
}
}
@@ -566,7 +576,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)})
}
- if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok {
+ if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok {
var k string
for k, vv := range md {
// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
@@ -1321,10 +1331,8 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
for streamID, stream := range t.activeStreams {
if streamID > id && streamID <= upperLimit {
// The stream was unprocessed by the server.
- if streamID > id && streamID <= upperLimit {
- atomic.StoreUint32(&stream.unprocessed, 1)
- streamsToClose = append(streamsToClose, stream)
- }
+ atomic.StoreUint32(&stream.unprocessed, 1)
+ streamsToClose = append(streamsToClose, stream)
}
}
t.mu.Unlock()
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
index 6fa1eb419..a206e2eef 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
@@ -68,18 +68,15 @@ var serverConnectionCounter uint64
// http2Server implements the ServerTransport interface with HTTP2.
type http2Server struct {
- lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
- ctx context.Context
- done chan struct{}
- conn net.Conn
- loopy *loopyWriter
- readerDone chan struct{} // sync point to enable testing.
- writerDone chan struct{} // sync point to enable testing.
- remoteAddr net.Addr
- localAddr net.Addr
- authInfo credentials.AuthInfo // auth info about the connection
- inTapHandle tap.ServerInHandle
- framer *framer
+ lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
+ done chan struct{}
+ conn net.Conn
+ loopy *loopyWriter
+ readerDone chan struct{} // sync point to enable testing.
+ loopyWriterDone chan struct{}
+ peer peer.Peer
+ inTapHandle tap.ServerInHandle
+ framer *framer
// The max number of concurrent streams.
maxStreams uint32
// controlBuf delivers all the control related tasks (e.g., window
@@ -243,16 +240,18 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
}
done := make(chan struct{})
+ peer := peer.Peer{
+ Addr: conn.RemoteAddr(),
+ LocalAddr: conn.LocalAddr(),
+ AuthInfo: authInfo,
+ }
t := &http2Server{
- ctx: setConnection(context.Background(), rawConn),
done: done,
conn: conn,
- remoteAddr: conn.RemoteAddr(),
- localAddr: conn.LocalAddr(),
- authInfo: authInfo,
+ peer: peer,
framer: framer,
readerDone: make(chan struct{}),
- writerDone: make(chan struct{}),
+ loopyWriterDone: make(chan struct{}),
maxStreams: config.MaxStreams,
inTapHandle: config.InTapHandle,
fc: &trInFlow{limit: uint32(icwz)},
@@ -267,8 +266,6 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
bufferPool: newBufferPool(),
}
t.logger = prefixLoggerForServerTransport(t)
- // Add peer information to the http2server context.
- t.ctx = peer.NewContext(t.ctx, t.getPeer())
t.controlBuf = newControlBuffer(t.done)
if dynamicWindow {
@@ -277,15 +274,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
updateFlowControl: t.updateFlowControl,
}
}
- for _, sh := range t.stats {
- t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{
- RemoteAddr: t.remoteAddr,
- LocalAddr: t.localAddr,
- })
- connBegin := &stats.ConnBegin{}
- sh.HandleConn(t.ctx, connBegin)
- }
- t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr))
+ t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.peer.Addr, t.peer.LocalAddr))
if err != nil {
return nil, err
}
@@ -333,8 +322,24 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
go func() {
t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger)
t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler
- t.loopy.run()
- close(t.writerDone)
+ err := t.loopy.run()
+ close(t.loopyWriterDone)
+ if !isIOError(err) {
+ // Close the connection if a non-I/O error occurs (for I/O errors
+ // the reader will also encounter the error and close). Wait 1
+ // second before closing the connection, or when the reader is done
+ // (i.e. the client already closed the connection or a connection
+ // error occurred). This avoids the potential problem where there
+ // is unread data on the receive side of the connection, which, if
+ // closed, would lead to a TCP RST instead of FIN, and the client
+ // encountering errors. For more info:
+ // https://github.com/grpc/grpc-go/issues/5358
+ select {
+ case <-t.readerDone:
+ case <-time.After(time.Second):
+ }
+ t.conn.Close()
+ }
}()
go t.keepalive()
return t, nil
@@ -342,7 +347,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
// operateHeaders takes action on the decoded headers. Returns an error if fatal
// error encountered and transport needs to close, otherwise returns nil.
-func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream)) error {
+func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*Stream)) error {
// Acquire max stream ID lock for entire duration
t.maxStreamMu.Lock()
defer t.maxStreamMu.Unlock()
@@ -369,10 +374,11 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
buf := newRecvBuffer()
s := &Stream{
- id: streamID,
- st: t,
- buf: buf,
- fc: &inFlow{limit: uint32(t.initialWindowSize)},
+ id: streamID,
+ st: t,
+ buf: buf,
+ fc: &inFlow{limit: uint32(t.initialWindowSize)},
+ headerWireLength: int(frame.Header().Length),
}
var (
// if false, content-type was missing or invalid
@@ -511,9 +517,9 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
s.state = streamReadDone
}
if timeoutSet {
- s.ctx, s.cancel = context.WithTimeout(t.ctx, timeout)
+ s.ctx, s.cancel = context.WithTimeout(ctx, timeout)
} else {
- s.ctx, s.cancel = context.WithCancel(t.ctx)
+ s.ctx, s.cancel = context.WithCancel(ctx)
}
// Attach the received metadata to the context.
@@ -592,18 +598,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
s.requestRead = func(n int) {
t.adjustWindow(s, uint32(n))
}
- for _, sh := range t.stats {
- s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
- inHeader := &stats.InHeader{
- FullMethod: s.method,
- RemoteAddr: t.remoteAddr,
- LocalAddr: t.localAddr,
- Compression: s.recvCompress,
- WireLength: int(frame.Header().Length),
- Header: mdata.Copy(),
- }
- sh.HandleRPC(s.ctx, inHeader)
- }
s.ctxDone = s.ctx.Done()
s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone)
s.trReader = &transportReader{
@@ -629,8 +623,11 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
// HandleStreams receives incoming streams using the given handler. This is
// typically run in a separate goroutine.
// traceCtx attaches trace to ctx and returns the new context.
-func (t *http2Server) HandleStreams(handle func(*Stream)) {
- defer close(t.readerDone)
+func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) {
+ defer func() {
+ close(t.readerDone)
+ <-t.loopyWriterDone
+ }()
for {
t.controlBuf.throttle()
frame, err := t.framer.fr.ReadFrame()
@@ -664,7 +661,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream)) {
}
switch frame := frame.(type) {
case *http2.MetaHeadersFrame:
- if err := t.operateHeaders(frame, handle); err != nil {
+ if err := t.operateHeaders(ctx, frame, handle); err != nil {
t.Close(err)
break
}
@@ -979,7 +976,12 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
}
}
if err := t.writeHeaderLocked(s); err != nil {
- return status.Convert(err).Err()
+ switch e := err.(type) {
+ case ConnectionError:
+ return status.Error(codes.Unavailable, e.Desc)
+ default:
+ return status.Convert(err).Err()
+ }
}
return nil
}
@@ -1242,10 +1244,6 @@ func (t *http2Server) Close(err error) {
for _, s := range streams {
s.cancel()
}
- for _, sh := range t.stats {
- connEnd := &stats.ConnEnd{}
- sh.HandleConn(t.ctx, connEnd)
- }
}
// deleteStream deletes the stream s from transport's active streams.
@@ -1311,10 +1309,6 @@ func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eo
})
}
-func (t *http2Server) RemoteAddr() net.Addr {
- return t.remoteAddr
-}
-
func (t *http2Server) Drain(debugData string) {
t.mu.Lock()
defer t.mu.Unlock()
@@ -1351,6 +1345,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil {
return false, err
}
+ t.framer.writer.Flush()
if retErr != nil {
return false, retErr
}
@@ -1371,7 +1366,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
return false, err
}
go func() {
- timer := time.NewTimer(time.Minute)
+ timer := time.NewTimer(5 * time.Second)
defer timer.Stop()
select {
case <-t.drainEvent.Done():
@@ -1397,11 +1392,11 @@ func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric {
LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)),
LocalFlowControlWindow: int64(t.fc.getSize()),
SocketOptions: channelz.GetSocketOption(t.conn),
- LocalAddr: t.localAddr,
- RemoteAddr: t.remoteAddr,
+ LocalAddr: t.peer.LocalAddr,
+ RemoteAddr: t.peer.Addr,
// RemoteName :
}
- if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok {
+ if au, ok := t.peer.AuthInfo.(credentials.ChannelzSecurityInfo); ok {
s.Security = au.GetSecurityValue()
}
s.RemoteFlowControlWindow = t.getOutFlowWindow()
@@ -1433,10 +1428,12 @@ func (t *http2Server) getOutFlowWindow() int64 {
}
}
-func (t *http2Server) getPeer() *peer.Peer {
+// Peer returns the peer of the transport.
+func (t *http2Server) Peer() *peer.Peer {
return &peer.Peer{
- Addr: t.remoteAddr,
- AuthInfo: t.authInfo, // Can be nil
+ Addr: t.peer.Addr,
+ LocalAddr: t.peer.LocalAddr,
+ AuthInfo: t.peer.AuthInfo, // Can be nil
}
}
@@ -1461,6 +1458,6 @@ func GetConnection(ctx context.Context) net.Conn {
// SetConnection adds the connection to the context to be able to get
// information about the destination ip and port for an incoming RPC. This also
// allows any unary or streaming interceptors to see the connection.
-func setConnection(ctx context.Context, conn net.Conn) context.Context {
+func SetConnection(ctx context.Context, conn net.Conn) context.Context {
return context.WithValue(ctx, connectionKey{}, conn)
}
diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go
index 415961987..24fa10325 100644
--- a/vendor/google.golang.org/grpc/internal/transport/proxy.go
+++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go
@@ -28,6 +28,8 @@ import (
"net/http"
"net/http/httputil"
"net/url"
+
+ "google.golang.org/grpc/internal"
)
const proxyAuthHeaderKey = "Proxy-Authorization"
@@ -112,7 +114,7 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri
// proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy
// is necessary, dials, does the HTTP CONNECT handshake, and returns the
// connection.
-func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) {
+func proxyDial(ctx context.Context, addr string, grpcUA string) (net.Conn, error) {
newAddr := addr
proxyURL, err := mapAddress(addr)
if err != nil {
@@ -122,15 +124,15 @@ func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn,
newAddr = proxyURL.Host
}
- conn, err = (&net.Dialer{}).DialContext(ctx, "tcp", newAddr)
+ conn, err := internal.NetDialerWithTCPKeepalive().DialContext(ctx, "tcp", newAddr)
if err != nil {
- return
+ return nil, err
}
- if proxyURL != nil {
+ if proxyURL == nil {
// proxy is disabled if proxyURL is nil.
- conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA)
+ return conn, err
}
- return
+ return doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA)
}
func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go
index aac056e72..b7b8fec18 100644
--- a/vendor/google.golang.org/grpc/internal/transport/transport.go
+++ b/vendor/google.golang.org/grpc/internal/transport/transport.go
@@ -37,6 +37,7 @@ import (
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/peer"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
@@ -265,7 +266,8 @@ type Stream struct {
// headerValid indicates whether a valid header was received. Only
// meaningful after headerChan is closed (always call waitOnHeader() before
// reading its value). Not valid on server side.
- headerValid bool
+ headerValid bool
+ headerWireLength int // Only set on server side.
// hdrMu protects header and trailer metadata on the server-side.
hdrMu sync.Mutex
@@ -425,6 +427,12 @@ func (s *Stream) Context() context.Context {
return s.ctx
}
+// SetContext sets the context of the stream. This will be deleted once the
+// stats handler callouts all move to gRPC layer.
+func (s *Stream) SetContext(ctx context.Context) {
+ s.ctx = ctx
+}
+
// Method returns the method for the stream.
func (s *Stream) Method() string {
return s.method
@@ -437,6 +445,12 @@ func (s *Stream) Status() *status.Status {
return s.status
}
+// HeaderWireLength returns the size of the headers of the stream as received
+// from the wire. Valid only on the server.
+func (s *Stream) HeaderWireLength() int {
+ return s.headerWireLength
+}
+
// SetHeader sets the header metadata. This can be called multiple times.
// Server side only.
// This should not be called in parallel to other data writes.
@@ -698,7 +712,7 @@ type ClientTransport interface {
// Write methods for a given Stream will be called serially.
type ServerTransport interface {
// HandleStreams receives incoming streams using the given handler.
- HandleStreams(func(*Stream))
+ HandleStreams(context.Context, func(*Stream))
// WriteHeader sends the header metadata for the given stream.
// WriteHeader may not be called on all streams.
@@ -717,8 +731,8 @@ type ServerTransport interface {
// handlers will be terminated asynchronously.
Close(err error)
- // RemoteAddr returns the remote network address.
- RemoteAddr() net.Addr
+ // Peer returns the peer of the server transport.
+ Peer() *peer.Peer
// Drain notifies the client this ServerTransport stops accepting new RPCs.
Drain(debugData string)
diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go
index a2cdcaf12..1e9485fd6 100644
--- a/vendor/google.golang.org/grpc/metadata/metadata.go
+++ b/vendor/google.golang.org/grpc/metadata/metadata.go
@@ -25,8 +25,14 @@ import (
"context"
"fmt"
"strings"
+
+ "google.golang.org/grpc/internal"
)
+func init() {
+ internal.FromOutgoingContextRaw = fromOutgoingContextRaw
+}
+
// DecodeKeyValue returns k, v, nil.
//
// Deprecated: use k and v directly instead.
@@ -153,14 +159,16 @@ func Join(mds ...MD) MD {
type mdIncomingKey struct{}
type mdOutgoingKey struct{}
-// NewIncomingContext creates a new context with incoming md attached.
+// NewIncomingContext creates a new context with incoming md attached. md must
+// not be modified after calling this function.
func NewIncomingContext(ctx context.Context, md MD) context.Context {
return context.WithValue(ctx, mdIncomingKey{}, md)
}
// NewOutgoingContext creates a new context with outgoing md attached. If used
// in conjunction with AppendToOutgoingContext, NewOutgoingContext will
-// overwrite any previously-appended metadata.
+// overwrite any previously-appended metadata. md must not be modified after
+// calling this function.
func NewOutgoingContext(ctx context.Context, md MD) context.Context {
return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md})
}
@@ -203,7 +211,8 @@ func FromIncomingContext(ctx context.Context) (MD, bool) {
}
// ValueFromIncomingContext returns the metadata value corresponding to the metadata
-// key from the incoming metadata if it exists. Key must be lower-case.
+// key from the incoming metadata if it exists. Keys are matched in a case insensitive
+// manner.
//
// # Experimental
//
@@ -219,33 +228,29 @@ func ValueFromIncomingContext(ctx context.Context, key string) []string {
return copyOf(v)
}
for k, v := range md {
- // We need to manually convert all keys to lower case, because MD is a
- // map, and there's no guarantee that the MD attached to the context is
- // created using our helper functions.
- if strings.ToLower(k) == key {
+ // Case insenitive comparison: MD is a map, and there's no guarantee
+ // that the MD attached to the context is created using our helper
+ // functions.
+ if strings.EqualFold(k, key) {
return copyOf(v)
}
}
return nil
}
-// the returned slice must not be modified in place
func copyOf(v []string) []string {
vals := make([]string, len(v))
copy(vals, v)
return vals
}
-// FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD.
+// fromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD.
//
// Remember to perform strings.ToLower on the keys, for both the returned MD (MD
// is a map, there's no guarantee it's created using our helper functions) and
// the extra kv pairs (AppendToOutgoingContext doesn't turn them into
// lowercase).
-//
-// This is intended for gRPC-internal use ONLY. Users should use
-// FromOutgoingContext instead.
-func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) {
+func fromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) {
raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD)
if !ok {
return nil, nil, false
diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go
index e01d219ff..a821ff9b2 100644
--- a/vendor/google.golang.org/grpc/peer/peer.go
+++ b/vendor/google.golang.org/grpc/peer/peer.go
@@ -32,6 +32,8 @@ import (
type Peer struct {
// Addr is the peer address.
Addr net.Addr
+ // LocalAddr is the local address.
+ LocalAddr net.Addr
// AuthInfo is the authentication information of the transport.
// It is nil if there is no transport security being used.
AuthInfo credentials.AuthInfo
diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go
index 236837f41..bf56faa76 100644
--- a/vendor/google.golang.org/grpc/picker_wrapper.go
+++ b/vendor/google.golang.org/grpc/picker_wrapper.go
@@ -37,7 +37,6 @@ import (
type pickerWrapper struct {
mu sync.Mutex
done bool
- idle bool
blockingCh chan struct{}
picker balancer.Picker
statsHandlers []stats.Handler // to record blocking picker calls
@@ -53,11 +52,7 @@ func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper {
// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
func (pw *pickerWrapper) updatePicker(p balancer.Picker) {
pw.mu.Lock()
- if pw.done || pw.idle {
- // There is a small window where a picker update from the LB policy can
- // race with the channel going to idle mode. If the picker is idle here,
- // it is because the channel asked it to do so, and therefore it is sage
- // to ignore the update from the LB policy.
+ if pw.done {
pw.mu.Unlock()
return
}
@@ -210,23 +205,15 @@ func (pw *pickerWrapper) close() {
close(pw.blockingCh)
}
-func (pw *pickerWrapper) enterIdleMode() {
- pw.mu.Lock()
- defer pw.mu.Unlock()
- if pw.done {
- return
- }
- pw.idle = true
-}
-
-func (pw *pickerWrapper) exitIdleMode() {
+// reset clears the pickerWrapper and prepares it for being used again when idle
+// mode is exited.
+func (pw *pickerWrapper) reset() {
pw.mu.Lock()
defer pw.mu.Unlock()
if pw.done {
return
}
pw.blockingCh = make(chan struct{})
- pw.idle = false
}
// dropError is a wrapper error that indicates the LB policy wishes to drop the
diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go
index 2e9cf66b4..5128f9364 100644
--- a/vendor/google.golang.org/grpc/pickfirst.go
+++ b/vendor/google.golang.org/grpc/pickfirst.go
@@ -25,7 +25,6 @@ import (
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/connectivity"
- "google.golang.org/grpc/internal/envconfig"
internalgrpclog "google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/grpcrand"
"google.golang.org/grpc/internal/pretty"
@@ -65,19 +64,6 @@ type pfConfig struct {
}
func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
- if !envconfig.PickFirstLBConfig {
- // Prior to supporting loadbalancing configuration, the pick_first LB
- // policy did not implement the balancer.ConfigParser interface. This
- // meant that if a non-empty configuration was passed to it, the service
- // config unmarshaling code would throw a warning log, but would
- // continue using the pick_first LB policy. The code below ensures the
- // same behavior is retained if the env var is not set.
- if string(js) != "{}" {
- logger.Warningf("Ignoring non-empty balancer configuration %q for the pick_first LB policy", string(js))
- }
- return nil, nil
- }
-
var cfg pfConfig
if err := json.Unmarshal(js, &cfg); err != nil {
return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
new file mode 100644
index 000000000..14aa6f20a
--- /dev/null
+++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
@@ -0,0 +1,36 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package dns implements a dns resolver to be installed as the default resolver
+// in grpc.
+//
+// Deprecated: this package is imported by grpc and should not need to be
+// imported directly by users.
+package dns
+
+import (
+ "google.golang.org/grpc/internal/resolver/dns"
+ "google.golang.org/grpc/resolver"
+)
+
+// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers.
+//
+// Deprecated: import grpc and use resolver.Get("dns") instead.
+func NewBuilder() resolver.Builder {
+ return dns.NewBuilder()
+}
diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go
index 804be887d..ada5b9bb7 100644
--- a/vendor/google.golang.org/grpc/resolver/map.go
+++ b/vendor/google.golang.org/grpc/resolver/map.go
@@ -136,3 +136,116 @@ func (a *AddressMap) Values() []any {
}
return ret
}
+
+type endpointNode struct {
+ addrs map[string]struct{}
+}
+
+// Equal returns whether the unordered set of addrs are the same between the
+// endpoint nodes.
+func (en *endpointNode) Equal(en2 *endpointNode) bool {
+ if len(en.addrs) != len(en2.addrs) {
+ return false
+ }
+ for addr := range en.addrs {
+ if _, ok := en2.addrs[addr]; !ok {
+ return false
+ }
+ }
+ return true
+}
+
+func toEndpointNode(endpoint Endpoint) endpointNode {
+ en := make(map[string]struct{})
+ for _, addr := range endpoint.Addresses {
+ en[addr.Addr] = struct{}{}
+ }
+ return endpointNode{
+ addrs: en,
+ }
+}
+
+// EndpointMap is a map of endpoints to arbitrary values keyed on only the
+// unordered set of address strings within an endpoint. This map is not thread
+// safe, thus it is unsafe to access concurrently. Must be created via
+// NewEndpointMap; do not construct directly.
+type EndpointMap struct {
+ endpoints map[*endpointNode]any
+}
+
+// NewEndpointMap creates a new EndpointMap.
+func NewEndpointMap() *EndpointMap {
+ return &EndpointMap{
+ endpoints: make(map[*endpointNode]any),
+ }
+}
+
+// Get returns the value for the address in the map, if present.
+func (em *EndpointMap) Get(e Endpoint) (value any, ok bool) {
+ en := toEndpointNode(e)
+ if endpoint := em.find(en); endpoint != nil {
+ return em.endpoints[endpoint], true
+ }
+ return nil, false
+}
+
+// Set updates or adds the value to the address in the map.
+func (em *EndpointMap) Set(e Endpoint, value any) {
+ en := toEndpointNode(e)
+ if endpoint := em.find(en); endpoint != nil {
+ em.endpoints[endpoint] = value
+ return
+ }
+ em.endpoints[&en] = value
+}
+
+// Len returns the number of entries in the map.
+func (em *EndpointMap) Len() int {
+ return len(em.endpoints)
+}
+
+// Keys returns a slice of all current map keys, as endpoints specifying the
+// addresses present in the endpoint keys, in which uniqueness is determined by
+// the unordered set of addresses. Thus, endpoint information returned is not
+// the full endpoint data (drops duplicated addresses and attributes) but can be
+// used for EndpointMap accesses.
+func (em *EndpointMap) Keys() []Endpoint {
+ ret := make([]Endpoint, 0, len(em.endpoints))
+ for en := range em.endpoints {
+ var endpoint Endpoint
+ for addr := range en.addrs {
+ endpoint.Addresses = append(endpoint.Addresses, Address{Addr: addr})
+ }
+ ret = append(ret, endpoint)
+ }
+ return ret
+}
+
+// Values returns a slice of all current map values.
+func (em *EndpointMap) Values() []any {
+ ret := make([]any, 0, len(em.endpoints))
+ for _, val := range em.endpoints {
+ ret = append(ret, val)
+ }
+ return ret
+}
+
+// find returns a pointer to the endpoint node in em if the endpoint node is
+// already present. If not found, nil is returned. The comparisons are done on
+// the unordered set of addresses within an endpoint.
+func (em EndpointMap) find(e endpointNode) *endpointNode {
+ for endpoint := range em.endpoints {
+ if e.Equal(endpoint) {
+ return endpoint
+ }
+ }
+ return nil
+}
+
+// Delete removes the specified endpoint from the map.
+func (em *EndpointMap) Delete(e Endpoint) {
+ en := toEndpointNode(e)
+ if entry := em.find(en); entry != nil {
+ delete(em.endpoints, entry)
+ }
+}
diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go
index 11384e228..adf89dd9c 100644
--- a/vendor/google.golang.org/grpc/resolver/resolver.go
+++ b/vendor/google.golang.org/grpc/resolver/resolver.go
@@ -240,11 +240,6 @@ type ClientConn interface {
//
// Deprecated: Use UpdateState instead.
NewAddress(addresses []Address)
- // NewServiceConfig is called by resolver to notify ClientConn a new
- // service config. The service config should be provided as a json string.
- //
- // Deprecated: Use UpdateState instead.
- NewServiceConfig(serviceConfig string)
// ParseServiceConfig parses the provided service config and returns an
// object that provides the parsed config.
ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult
@@ -286,6 +281,11 @@ func (t Target) Endpoint() string {
return strings.TrimPrefix(endpoint, "/")
}
+// String returns a string representation of Target.
+func (t Target) String() string {
+ return t.URL.String()
+}
+
// Builder creates a resolver that will be used to watch name resolution updates.
type Builder interface {
// Build creates a new resolver for the given target.
@@ -314,3 +314,13 @@ type Resolver interface {
// Close closes the resolver.
Close()
}
+
+// AuthorityOverrider is implemented by Builders that wish to override the
+// default authority for the ClientConn.
+// By default, the authority used is target.Endpoint().
+type AuthorityOverrider interface {
+ // OverrideAuthority returns the authority to use for a ClientConn with the
+ // given target. The implementation must generate it without blocking,
+ // typically in line, and must keep it unchanged.
+ OverrideAuthority(Target) string
+}
diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
deleted file mode 100644
index d68330560..000000000
--- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
+++ /dev/null
@@ -1,247 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "context"
- "strings"
- "sync"
-
- "google.golang.org/grpc/balancer"
- "google.golang.org/grpc/internal/channelz"
- "google.golang.org/grpc/internal/grpcsync"
- "google.golang.org/grpc/internal/pretty"
- "google.golang.org/grpc/resolver"
- "google.golang.org/grpc/serviceconfig"
-)
-
-// resolverStateUpdater wraps the single method used by ccResolverWrapper to
-// report a state update from the actual resolver implementation.
-type resolverStateUpdater interface {
- updateResolverState(s resolver.State, err error) error
-}
-
-// ccResolverWrapper is a wrapper on top of cc for resolvers.
-// It implements resolver.ClientConn interface.
-type ccResolverWrapper struct {
- // The following fields are initialized when the wrapper is created and are
- // read-only afterwards, and therefore can be accessed without a mutex.
- cc resolverStateUpdater
- channelzID *channelz.Identifier
- ignoreServiceConfig bool
- opts ccResolverWrapperOpts
- serializer *grpcsync.CallbackSerializer // To serialize all incoming calls.
- serializerCancel context.CancelFunc // To close the serializer, accessed only from close().
-
- // All incoming (resolver --> gRPC) calls are guaranteed to execute in a
- // mutually exclusive manner as they are scheduled on the serializer.
- // Fields accessed *only* in these serializer callbacks, can therefore be
- // accessed without a mutex.
- curState resolver.State
-
- // mu guards access to the below fields.
- mu sync.Mutex
- closed bool
- resolver resolver.Resolver // Accessed only from outgoing calls.
-}
-
-// ccResolverWrapperOpts wraps the arguments to be passed when creating a new
-// ccResolverWrapper.
-type ccResolverWrapperOpts struct {
- target resolver.Target // User specified dial target to resolve.
- builder resolver.Builder // Resolver builder to use.
- bOpts resolver.BuildOptions // Resolver build options to use.
- channelzID *channelz.Identifier // Channelz identifier for the channel.
-}
-
-// newCCResolverWrapper uses the resolver.Builder to build a Resolver and
-// returns a ccResolverWrapper object which wraps the newly built resolver.
-func newCCResolverWrapper(cc resolverStateUpdater, opts ccResolverWrapperOpts) (*ccResolverWrapper, error) {
- ctx, cancel := context.WithCancel(context.Background())
- ccr := &ccResolverWrapper{
- cc: cc,
- channelzID: opts.channelzID,
- ignoreServiceConfig: opts.bOpts.DisableServiceConfig,
- opts: opts,
- serializer: grpcsync.NewCallbackSerializer(ctx),
- serializerCancel: cancel,
- }
-
- // Cannot hold the lock at build time because the resolver can send an
- // update or error inline and these incoming calls grab the lock to schedule
- // a callback in the serializer.
- r, err := opts.builder.Build(opts.target, ccr, opts.bOpts)
- if err != nil {
- cancel()
- return nil, err
- }
-
- // Any error reported by the resolver at build time that leads to a
- // re-resolution request from the balancer is dropped by grpc until we
- // return from this function. So, we don't have to handle pending resolveNow
- // requests here.
- ccr.mu.Lock()
- ccr.resolver = r
- ccr.mu.Unlock()
-
- return ccr, nil
-}
-
-func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) {
- ccr.mu.Lock()
- defer ccr.mu.Unlock()
-
- // ccr.resolver field is set only after the call to Build() returns. But in
- // the process of building, the resolver may send an error update which when
- // propagated to the balancer may result in a re-resolution request.
- if ccr.closed || ccr.resolver == nil {
- return
- }
- ccr.resolver.ResolveNow(o)
-}
-
-func (ccr *ccResolverWrapper) close() {
- ccr.mu.Lock()
- if ccr.closed {
- ccr.mu.Unlock()
- return
- }
-
- channelz.Info(logger, ccr.channelzID, "Closing the name resolver")
-
- // Close the serializer to ensure that no more calls from the resolver are
- // handled, before actually closing the resolver.
- ccr.serializerCancel()
- ccr.closed = true
- r := ccr.resolver
- ccr.mu.Unlock()
-
- // Give enqueued callbacks a chance to finish.
- <-ccr.serializer.Done()
-
- // Spawn a goroutine to close the resolver (since it may block trying to
- // cleanup all allocated resources) and return early.
- go r.Close()
-}
-
-// serializerScheduleLocked is a convenience method to schedule a function to be
-// run on the serializer while holding ccr.mu.
-func (ccr *ccResolverWrapper) serializerScheduleLocked(f func(context.Context)) {
- ccr.mu.Lock()
- ccr.serializer.Schedule(f)
- ccr.mu.Unlock()
-}
-
-// UpdateState is called by resolver implementations to report new state to gRPC
-// which includes addresses and service config.
-func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error {
- errCh := make(chan error, 1)
- if s.Endpoints == nil {
- s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses))
- for _, a := range s.Addresses {
- ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes}
- ep.Addresses[0].BalancerAttributes = nil
- s.Endpoints = append(s.Endpoints, ep)
- }
- }
- ok := ccr.serializer.Schedule(func(context.Context) {
- ccr.addChannelzTraceEvent(s)
- ccr.curState = s
- if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState {
- errCh <- balancer.ErrBadResolverState
- return
- }
- errCh <- nil
- })
- if !ok {
- // The only time when Schedule() fail to add the callback to the
- // serializer is when the serializer is closed, and this happens only
- // when the resolver wrapper is closed.
- return nil
- }
- return <-errCh
-}
-
-// ReportError is called by resolver implementations to report errors
-// encountered during name resolution to gRPC.
-func (ccr *ccResolverWrapper) ReportError(err error) {
- ccr.serializerScheduleLocked(func(_ context.Context) {
- channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: reporting error to cc: %v", err)
- ccr.cc.updateResolverState(resolver.State{}, err)
- })
-}
-
-// NewAddress is called by the resolver implementation to send addresses to
-// gRPC.
-func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
- ccr.serializerScheduleLocked(func(_ context.Context) {
- ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig})
- ccr.curState.Addresses = addrs
- ccr.cc.updateResolverState(ccr.curState, nil)
- })
-}
-
-// NewServiceConfig is called by the resolver implementation to send service
-// configs to gRPC.
-func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
- ccr.serializerScheduleLocked(func(_ context.Context) {
- channelz.Infof(logger, ccr.channelzID, "ccResolverWrapper: got new service config: %s", sc)
- if ccr.ignoreServiceConfig {
- channelz.Info(logger, ccr.channelzID, "Service config lookups disabled; ignoring config")
- return
- }
- scpr := parseServiceConfig(sc)
- if scpr.Err != nil {
- channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err)
- return
- }
- ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr})
- ccr.curState.ServiceConfig = scpr
- ccr.cc.updateResolverState(ccr.curState, nil)
- })
-}
-
-// ParseServiceConfig is called by resolver implementations to parse a JSON
-// representation of the service config.
-func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult {
- return parseServiceConfig(scJSON)
-}
-
-// addChannelzTraceEvent adds a channelz trace event containing the new
-// state received from resolver implementations.
-func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
- var updates []string
- var oldSC, newSC *ServiceConfig
- var oldOK, newOK bool
- if ccr.curState.ServiceConfig != nil {
- oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig)
- }
- if s.ServiceConfig != nil {
- newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig)
- }
- if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) {
- updates = append(updates, "service config updated")
- }
- if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 {
- updates = append(updates, "resolver returned an empty address list")
- } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 {
- updates = append(updates, "resolver returned new addresses")
- }
- channelz.Infof(logger, ccr.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; "))
-}
diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go
new file mode 100644
index 000000000..c79bab121
--- /dev/null
+++ b/vendor/google.golang.org/grpc/resolver_wrapper.go
@@ -0,0 +1,197 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+ "context"
+ "strings"
+ "sync"
+
+ "google.golang.org/grpc/internal/channelz"
+ "google.golang.org/grpc/internal/grpcsync"
+ "google.golang.org/grpc/internal/pretty"
+ "google.golang.org/grpc/resolver"
+ "google.golang.org/grpc/serviceconfig"
+)
+
+// ccResolverWrapper is a wrapper on top of cc for resolvers.
+// It implements resolver.ClientConn interface.
+type ccResolverWrapper struct {
+ // The following fields are initialized when the wrapper is created and are
+ // read-only afterwards, and therefore can be accessed without a mutex.
+ cc *ClientConn
+ ignoreServiceConfig bool
+ serializer *grpcsync.CallbackSerializer
+ serializerCancel context.CancelFunc
+
+ resolver resolver.Resolver // only accessed within the serializer
+
+ // The following fields are protected by mu. Caller must take cc.mu before
+ // taking mu.
+ mu sync.Mutex
+ curState resolver.State
+ closed bool
+}
+
+// newCCResolverWrapper initializes the ccResolverWrapper. It can only be used
+// after calling start, which builds the resolver.
+func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper {
+ ctx, cancel := context.WithCancel(cc.ctx)
+ return &ccResolverWrapper{
+ cc: cc,
+ ignoreServiceConfig: cc.dopts.disableServiceConfig,
+ serializer: grpcsync.NewCallbackSerializer(ctx),
+ serializerCancel: cancel,
+ }
+}
+
+// start builds the name resolver using the resolver.Builder in cc and returns
+// any error encountered. It must always be the first operation performed on
+// any newly created ccResolverWrapper, except that close may be called instead.
+func (ccr *ccResolverWrapper) start() error {
+ errCh := make(chan error)
+ ccr.serializer.Schedule(func(ctx context.Context) {
+ if ctx.Err() != nil {
+ return
+ }
+ opts := resolver.BuildOptions{
+ DisableServiceConfig: ccr.cc.dopts.disableServiceConfig,
+ DialCreds: ccr.cc.dopts.copts.TransportCredentials,
+ CredsBundle: ccr.cc.dopts.copts.CredsBundle,
+ Dialer: ccr.cc.dopts.copts.Dialer,
+ }
+ var err error
+ ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts)
+ errCh <- err
+ })
+ return <-errCh
+}
+
+func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) {
+ ccr.serializer.Schedule(func(ctx context.Context) {
+ if ctx.Err() != nil || ccr.resolver == nil {
+ return
+ }
+ ccr.resolver.ResolveNow(o)
+ })
+}
+
+// close initiates async shutdown of the wrapper. To determine the wrapper has
+// finished shutting down, the channel should block on ccr.serializer.Done()
+// without cc.mu held.
+func (ccr *ccResolverWrapper) close() {
+ channelz.Info(logger, ccr.cc.channelzID, "Closing the name resolver")
+ ccr.mu.Lock()
+ ccr.closed = true
+ ccr.mu.Unlock()
+
+ ccr.serializer.Schedule(func(context.Context) {
+ if ccr.resolver == nil {
+ return
+ }
+ ccr.resolver.Close()
+ ccr.resolver = nil
+ })
+ ccr.serializerCancel()
+}
+
+// UpdateState is called by resolver implementations to report new state to gRPC
+// which includes addresses and service config.
+func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error {
+ ccr.cc.mu.Lock()
+ ccr.mu.Lock()
+ if ccr.closed {
+ ccr.mu.Unlock()
+ ccr.cc.mu.Unlock()
+ return nil
+ }
+ if s.Endpoints == nil {
+ s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses))
+ for _, a := range s.Addresses {
+ ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes}
+ ep.Addresses[0].BalancerAttributes = nil
+ s.Endpoints = append(s.Endpoints, ep)
+ }
+ }
+ ccr.addChannelzTraceEvent(s)
+ ccr.curState = s
+ ccr.mu.Unlock()
+ return ccr.cc.updateResolverStateAndUnlock(s, nil)
+}
+
+// ReportError is called by resolver implementations to report errors
+// encountered during name resolution to gRPC.
+func (ccr *ccResolverWrapper) ReportError(err error) {
+ ccr.cc.mu.Lock()
+ ccr.mu.Lock()
+ if ccr.closed {
+ ccr.mu.Unlock()
+ ccr.cc.mu.Unlock()
+ return
+ }
+ ccr.mu.Unlock()
+ channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err)
+ ccr.cc.updateResolverStateAndUnlock(resolver.State{}, err)
+}
+
+// NewAddress is called by the resolver implementation to send addresses to
+// gRPC.
+func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
+ ccr.cc.mu.Lock()
+ ccr.mu.Lock()
+ if ccr.closed {
+ ccr.mu.Unlock()
+ ccr.cc.mu.Unlock()
+ return
+ }
+ s := resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}
+ ccr.addChannelzTraceEvent(s)
+ ccr.curState = s
+ ccr.mu.Unlock()
+ ccr.cc.updateResolverStateAndUnlock(s, nil)
+}
+
+// ParseServiceConfig is called by resolver implementations to parse a JSON
+// representation of the service config.
+func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult {
+ return parseServiceConfig(scJSON)
+}
+
+// addChannelzTraceEvent adds a channelz trace event containing the new
+// state received from resolver implementations.
+func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
+ var updates []string
+ var oldSC, newSC *ServiceConfig
+ var oldOK, newOK bool
+ if ccr.curState.ServiceConfig != nil {
+ oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig)
+ }
+ if s.ServiceConfig != nil {
+ newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig)
+ }
+ if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) {
+ updates = append(updates, "service config updated")
+ }
+ if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 {
+ updates = append(updates, "resolver returned an empty address list")
+ } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 {
+ updates = append(updates, "resolver returned new addresses")
+ }
+ channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; "))
+}
diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go
index b7723aa09..a4b6bc687 100644
--- a/vendor/google.golang.org/grpc/rpc_util.go
+++ b/vendor/google.golang.org/grpc/rpc_util.go
@@ -640,14 +640,18 @@ func encode(c baseCodec, msg any) ([]byte, error) {
return b, nil
}
-// compress returns the input bytes compressed by compressor or cp. If both
-// compressors are nil, returns nil.
+// compress returns the input bytes compressed by compressor or cp.
+// If both compressors are nil, or if the message has zero length, returns nil,
+// indicating no compression was done.
//
// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor.
func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) {
if compressor == nil && cp == nil {
return nil, nil
}
+ if len(in) == 0 {
+ return nil, nil
+ }
wrapErr := func(err error) error {
return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
}
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
index 8f60d4214..e89c5ac61 100644
--- a/vendor/google.golang.org/grpc/server.go
+++ b/vendor/google.golang.org/grpc/server.go
@@ -70,9 +70,10 @@ func init() {
internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials {
return srv.opts.creds
}
- internal.DrainServerTransports = func(srv *Server, addr string) {
- srv.drainServerTransports(addr)
+ internal.IsRegisteredMethod = func(srv *Server, method string) bool {
+ return srv.isRegisteredMethod(method)
}
+ internal.ServerFromContext = serverFromContext
internal.AddGlobalServerOptions = func(opt ...ServerOption) {
globalServerOptions = append(globalServerOptions, opt...)
}
@@ -81,6 +82,7 @@ func init() {
}
internal.BinaryLogger = binaryLogger
internal.JoinServerOptions = newJoinServerOption
+ internal.RecvBufferPool = recvBufferPool
}
var statusOK = status.New(codes.OK, "")
@@ -134,12 +136,14 @@ type Server struct {
quit *grpcsync.Event
done *grpcsync.Event
channelzRemoveOnce sync.Once
- serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop
+ serveWG sync.WaitGroup // counts active Serve goroutines for Stop/GracefulStop
+ handlersWG sync.WaitGroup // counts active method handler goroutines
channelzID *channelz.Identifier
czData *channelzData
- serverWorkerChannel chan func()
+ serverWorkerChannel chan func()
+ serverWorkerChannelClose func()
}
type serverOptions struct {
@@ -170,6 +174,7 @@ type serverOptions struct {
headerTableSize *uint32
numServerWorkers uint32
recvBufferPool SharedBufferPool
+ waitForHandlers bool
}
var defaultServerOptions = serverOptions{
@@ -567,6 +572,21 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption {
})
}
+// WaitForHandlers cause Stop to wait until all outstanding method handlers have
+// exited before returning. If false, Stop will return as soon as all
+// connections have closed, but method handlers may still be running. By
+// default, Stop does not wait for method handlers to return.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WaitForHandlers(w bool) ServerOption {
+ return newFuncServerOption(func(o *serverOptions) {
+ o.waitForHandlers = w
+ })
+}
+
// RecvBufferPool returns a ServerOption that configures the server
// to use the provided shared buffer pool for parsing incoming messages. Depending
// on the application's workload, this could result in reduced memory allocation.
@@ -578,11 +598,13 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption {
// options are used: StatsHandler, EnableTracing, or binary logging. In such
// cases, the shared buffer pool will be ignored.
//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
+// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in
+// v1.60.0 or later.
func RecvBufferPool(bufferPool SharedBufferPool) ServerOption {
+ return recvBufferPool(bufferPool)
+}
+
+func recvBufferPool(bufferPool SharedBufferPool) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
o.recvBufferPool = bufferPool
})
@@ -616,15 +638,14 @@ func (s *Server) serverWorker() {
// connections to reduce the time spent overall on runtime.morestack.
func (s *Server) initServerWorkers() {
s.serverWorkerChannel = make(chan func())
+ s.serverWorkerChannelClose = grpcsync.OnceFunc(func() {
+ close(s.serverWorkerChannel)
+ })
for i := uint32(0); i < s.opts.numServerWorkers; i++ {
go s.serverWorker()
}
}
-func (s *Server) stopServerWorkers() {
- close(s.serverWorkerChannel)
-}
-
// NewServer creates a gRPC server which has no service registered and has not
// started to accept requests yet.
func NewServer(opt ...ServerOption) *Server {
@@ -806,6 +827,18 @@ func (l *listenSocket) Close() error {
// Serve returns when lis.Accept fails with fatal errors. lis will be closed when
// this method returns.
// Serve will return a non-nil error unless Stop or GracefulStop is called.
+//
+// Note: All supported releases of Go (as of December 2023) override the OS
+// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive
+// with OS defaults for keepalive time and interval, callers need to do the
+// following two things:
+// - pass a net.Listener created by calling the Listen method on a
+// net.ListenConfig with the `KeepAlive` field set to a negative value. This
+// will result in the Go standard library not overriding OS defaults for TCP
+// keepalive interval and time. But this will also result in the Go standard
+// library not enabling TCP keepalives by default.
+// - override the Accept method on the passed in net.Listener and set the
+// SO_KEEPALIVE socket option to enable TCP keepalives, with OS defaults.
func (s *Server) Serve(lis net.Listener) error {
s.mu.Lock()
s.printf("serving")
@@ -913,24 +946,21 @@ func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) {
return
}
+ if cc, ok := rawConn.(interface {
+ PassServerTransport(transport.ServerTransport)
+ }); ok {
+ cc.PassServerTransport(st)
+ }
+
if !s.addConn(lisAddr, st) {
return
}
go func() {
- s.serveStreams(st)
+ s.serveStreams(context.Background(), st, rawConn)
s.removeConn(lisAddr, st)
}()
}
-func (s *Server) drainServerTransports(addr string) {
- s.mu.Lock()
- conns := s.conns[addr]
- for st := range conns {
- st.Drain("")
- }
- s.mu.Unlock()
-}
-
// newHTTP2Transport sets up a http/2 transport (using the
// gRPC http2 server transport in transport/http2_server.go).
func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
@@ -971,18 +1001,31 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
return st
}
-func (s *Server) serveStreams(st transport.ServerTransport) {
- defer st.Close(errors.New("finished serving streams for the server transport"))
- var wg sync.WaitGroup
+func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, rawConn net.Conn) {
+ ctx = transport.SetConnection(ctx, rawConn)
+ ctx = peer.NewContext(ctx, st.Peer())
+ for _, sh := range s.opts.statsHandlers {
+ ctx = sh.TagConn(ctx, &stats.ConnTagInfo{
+ RemoteAddr: st.Peer().Addr,
+ LocalAddr: st.Peer().LocalAddr,
+ })
+ sh.HandleConn(ctx, &stats.ConnBegin{})
+ }
- streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams)
- st.HandleStreams(func(stream *transport.Stream) {
- wg.Add(1)
+ defer func() {
+ st.Close(errors.New("finished serving streams for the server transport"))
+ for _, sh := range s.opts.statsHandlers {
+ sh.HandleConn(ctx, &stats.ConnEnd{})
+ }
+ }()
+ streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams)
+ st.HandleStreams(ctx, func(stream *transport.Stream) {
+ s.handlersWG.Add(1)
streamQuota.acquire()
f := func() {
defer streamQuota.release()
- defer wg.Done()
+ defer s.handlersWG.Done()
s.handleStream(st, stream)
}
@@ -996,7 +1039,6 @@ func (s *Server) serveStreams(st transport.ServerTransport) {
}
go f()
})
- wg.Wait()
}
var _ http.Handler = (*Server)(nil)
@@ -1040,7 +1082,7 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
defer s.removeConn(listenerAddressForServeHTTP, st)
- s.serveStreams(st)
+ s.serveStreams(r.Context(), st, nil)
}
func (s *Server) addConn(addr string, st transport.ServerTransport) bool {
@@ -1689,6 +1731,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran
func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) {
ctx := stream.Context()
+ ctx = contextWithServer(ctx, s)
var ti *traceInfo
if EnableTracing {
tr := trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method())
@@ -1697,7 +1740,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
tr: tr,
firstLine: firstLine{
client: false,
- remoteAddr: t.RemoteAddr(),
+ remoteAddr: t.Peer().Addr,
},
}
if dl, ok := ctx.Deadline(); ok {
@@ -1731,6 +1774,22 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
service := sm[:pos]
method := sm[pos+1:]
+ md, _ := metadata.FromIncomingContext(ctx)
+ for _, sh := range s.opts.statsHandlers {
+ ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()})
+ sh.HandleRPC(ctx, &stats.InHeader{
+ FullMethod: stream.Method(),
+ RemoteAddr: t.Peer().Addr,
+ LocalAddr: t.Peer().LocalAddr,
+ Compression: stream.RecvCompress(),
+ WireLength: stream.HeaderWireLength(),
+ Header: md,
+ })
+ }
+ // To have calls in stream callouts work. Will delete once all stats handler
+ // calls come from the gRPC layer.
+ stream.SetContext(ctx)
+
srv, knownService := s.services[service]
if knownService {
if md, ok := srv.methods[method]; ok {
@@ -1820,62 +1879,72 @@ func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream
// pending RPCs on the client side will get notified by connection
// errors.
func (s *Server) Stop() {
- s.quit.Fire()
+ s.stop(false)
+}
- defer func() {
- s.serveWG.Wait()
- s.done.Fire()
- }()
+// GracefulStop stops the gRPC server gracefully. It stops the server from
+// accepting new connections and RPCs and blocks until all the pending RPCs are
+// finished.
+func (s *Server) GracefulStop() {
+ s.stop(true)
+}
+
+func (s *Server) stop(graceful bool) {
+ s.quit.Fire()
+ defer s.done.Fire()
s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) })
s.mu.Lock()
- listeners := s.lis
- s.lis = nil
- conns := s.conns
- s.conns = nil
- // interrupt GracefulStop if Stop and GracefulStop are called concurrently.
- s.cv.Broadcast()
+ s.closeListenersLocked()
+ // Wait for serving threads to be ready to exit. Only then can we be sure no
+ // new conns will be created.
s.mu.Unlock()
+ s.serveWG.Wait()
- for lis := range listeners {
- lis.Close()
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if graceful {
+ s.drainAllServerTransportsLocked()
+ } else {
+ s.closeServerTransportsLocked()
}
- for _, cs := range conns {
- for st := range cs {
- st.Close(errors.New("Server.Stop called"))
- }
+
+ for len(s.conns) != 0 {
+ s.cv.Wait()
}
+ s.conns = nil
+
if s.opts.numServerWorkers > 0 {
- s.stopServerWorkers()
+ // Closing the channel (only once, via grpcsync.OnceFunc) after all the
+ // connections have been closed above ensures that there are no
+ // goroutines executing the callback passed to st.HandleStreams (where
+ // the channel is written to).
+ s.serverWorkerChannelClose()
+ }
+
+ if graceful || s.opts.waitForHandlers {
+ s.handlersWG.Wait()
}
- s.mu.Lock()
if s.events != nil {
s.events.Finish()
s.events = nil
}
- s.mu.Unlock()
}
-// GracefulStop stops the gRPC server gracefully. It stops the server from
-// accepting new connections and RPCs and blocks until all the pending RPCs are
-// finished.
-func (s *Server) GracefulStop() {
- s.quit.Fire()
- defer s.done.Fire()
-
- s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) })
- s.mu.Lock()
- if s.conns == nil {
- s.mu.Unlock()
- return
+// s.mu must be held by the caller.
+func (s *Server) closeServerTransportsLocked() {
+ for _, conns := range s.conns {
+ for st := range conns {
+ st.Close(errors.New("Server.Stop called"))
+ }
}
+}
- for lis := range s.lis {
- lis.Close()
- }
- s.lis = nil
+// s.mu must be held by the caller.
+func (s *Server) drainAllServerTransportsLocked() {
if !s.drain {
for _, conns := range s.conns {
for st := range conns {
@@ -1884,22 +1953,14 @@ func (s *Server) GracefulStop() {
}
s.drain = true
}
+}
- // Wait for serving threads to be ready to exit. Only then can we be sure no
- // new conns will be created.
- s.mu.Unlock()
- s.serveWG.Wait()
- s.mu.Lock()
-
- for len(s.conns) != 0 {
- s.cv.Wait()
- }
- s.conns = nil
- if s.events != nil {
- s.events.Finish()
- s.events = nil
+// s.mu must be held by the caller.
+func (s *Server) closeListenersLocked() {
+ for lis := range s.lis {
+ lis.Close()
}
- s.mu.Unlock()
+ s.lis = nil
}
// contentSubtype must be lowercase
@@ -1913,11 +1974,50 @@ func (s *Server) getCodec(contentSubtype string) baseCodec {
}
codec := encoding.GetCodec(contentSubtype)
if codec == nil {
+ logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name)
return encoding.GetCodec(proto.Name)
}
return codec
}
+type serverKey struct{}
+
+// serverFromContext gets the Server from the context.
+func serverFromContext(ctx context.Context) *Server {
+ s, _ := ctx.Value(serverKey{}).(*Server)
+ return s
+}
+
+// contextWithServer sets the Server in the context.
+func contextWithServer(ctx context.Context, server *Server) context.Context {
+ return context.WithValue(ctx, serverKey{}, server)
+}
+
+// isRegisteredMethod returns whether the passed in method is registered as a
+// method on the server. /service/method and service/method will match if the
+// service and method are registered on the server.
+func (s *Server) isRegisteredMethod(serviceMethod string) bool {
+ if serviceMethod != "" && serviceMethod[0] == '/' {
+ serviceMethod = serviceMethod[1:]
+ }
+ pos := strings.LastIndex(serviceMethod, "/")
+ if pos == -1 { // Invalid method name syntax.
+ return false
+ }
+ service := serviceMethod[:pos]
+ method := serviceMethod[pos+1:]
+ srv, knownService := s.services[service]
+ if knownService {
+ if _, ok := srv.methods[method]; ok {
+ return true
+ }
+ if _, ok := srv.streams[method]; ok {
+ return true
+ }
+ }
+ return false
+}
+
// SetHeader sets the header metadata to be sent from the server to the client.
// The context provided must be the context passed to the server's handler.
//
diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go
index b14b2fbea..d621f52b1 100644
--- a/vendor/google.golang.org/grpc/stream.go
+++ b/vendor/google.golang.org/grpc/stream.go
@@ -48,6 +48,8 @@ import (
"google.golang.org/grpc/status"
)
+var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool))
+
// StreamHandler defines the handler called by gRPC server to complete the
// execution of a streaming RPC.
//
@@ -184,7 +186,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
// when the RPC completes.
opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...)
- if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok {
+ if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok {
// validate md
if err := imetadata.Validate(md); err != nil {
return nil, status.Error(codes.Internal, err.Error())
diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go
index 6d2cadd79..f1aec4c0a 100644
--- a/vendor/google.golang.org/grpc/version.go
+++ b/vendor/google.golang.org/grpc/version.go
@@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
-const Version = "1.59.0"
+const Version = "1.61.1"
diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh
index bb480f1f9..5da38a409 100644
--- a/vendor/google.golang.org/grpc/vet.sh
+++ b/vendor/google.golang.org/grpc/vet.sh
@@ -35,7 +35,6 @@ if [[ "$1" = "-install" ]]; then
# Install the pinned versions as defined in module tools.
pushd ./test/tools
go install \
- golang.org/x/lint/golint \
golang.org/x/tools/cmd/goimports \
honnef.co/go/tools/cmd/staticcheck \
github.com/client9/misspell/cmd/misspell
@@ -77,15 +76,19 @@ fi
not grep 'func Test[^(]' *_test.go
not grep 'func Test[^(]' test/*.go
+# - Check for typos in test function names
+git grep 'func (s) ' -- "*_test.go" | not grep -v 'func (s) Test'
+git grep 'func [A-Z]' -- "*_test.go" | not grep -v 'func Test\|Benchmark\|Example'
+
# - Do not import x/net/context.
not git grep -l 'x/net/context' -- "*.go"
# - Do not import math/rand for real library code. Use internal/grpcrand for
# thread safety.
-git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test'
+git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^interop/stress\|grpcrand\|^benchmark\|wrr_test'
# - Do not use "interface{}"; use "any" instead.
-git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc'
+git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc\|grpc_testing_not_regenerate'
# - Do not call grpclog directly. Use grpclog.Component instead.
git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go'
@@ -94,15 +97,14 @@ git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpc
not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go"
# - Ensure all usages of grpc_testing package are renamed when importing.
-not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go"
+not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go"
# - Ensure all xds proto imports are renamed to *pb or *grpc.
git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "'
misspell -error .
-# - gofmt, goimports, golint (with exceptions for generated code), go vet,
-# go mod tidy.
+# - gofmt, goimports, go vet, go mod tidy.
# Perform these checks on each module inside gRPC.
for MOD_FILE in $(find . -name 'go.mod'); do
MOD_DIR=$(dirname ${MOD_FILE})
@@ -110,7 +112,6 @@ for MOD_FILE in $(find . -name 'go.mod'); do
go vet -all ./... | fail_on_output
gofmt -s -d -l . 2>&1 | fail_on_output
goimports -l . 2>&1 | not grep -vE "\.pb\.go"
- golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:"
go mod tidy -compat=1.19
git status --porcelain 2>&1 | fail_on_output || \
@@ -119,94 +120,71 @@ for MOD_FILE in $(find . -name 'go.mod'); do
done
# - Collection of static analysis checks
-#
-# TODO(dfawley): don't use deprecated functions in examples or first-party
-# plugins.
-# TODO(dfawley): enable ST1019 (duplicate imports) but allow for protobufs.
SC_OUT="$(mktemp)"
-staticcheck -go 1.19 -checks 'inherit,-ST1015,-ST1019,-SA1019' ./... > "${SC_OUT}" || true
-# Error if anything other than deprecation warnings are printed.
-not grep -v "is deprecated:.*SA1019" "${SC_OUT}"
-# Only ignore the following deprecated types/fields/functions.
-not grep -Fv '.CredsBundle
-.HeaderMap
-.Metadata is deprecated: use Attributes
-.NewAddress
-.NewServiceConfig
-.Type is deprecated: use Attributes
-BuildVersion is deprecated
-balancer.ErrTransientFailure
-balancer.Picker
-extDesc.Filename is deprecated
-github.com/golang/protobuf/jsonpb is deprecated
-grpc.CallCustomCodec
-grpc.Code
-grpc.Compressor
-grpc.CustomCodec
-grpc.Decompressor
-grpc.MaxMsgSize
-grpc.MethodConfig
-grpc.NewGZIPCompressor
-grpc.NewGZIPDecompressor
-grpc.RPCCompressor
-grpc.RPCDecompressor
-grpc.ServiceConfig
-grpc.WithCompressor
-grpc.WithDecompressor
-grpc.WithDialer
-grpc.WithMaxMsgSize
-grpc.WithServiceConfig
-grpc.WithTimeout
-http.CloseNotifier
-info.SecurityVersion
-proto is deprecated
-proto.InternalMessageInfo is deprecated
-proto.EnumName is deprecated
-proto.ErrInternalBadWireType is deprecated
-proto.FileDescriptor is deprecated
-proto.Marshaler is deprecated
-proto.MessageType is deprecated
-proto.RegisterEnum is deprecated
-proto.RegisterFile is deprecated
-proto.RegisterType is deprecated
-proto.RegisterExtension is deprecated
-proto.RegisteredExtension is deprecated
-proto.RegisteredExtensions is deprecated
-proto.RegisterMapType is deprecated
-proto.Unmarshaler is deprecated
+staticcheck -go 1.19 -checks 'all' ./... > "${SC_OUT}" || true
+
+# Error for anything other than checks that need exclusions.
+grep -v "(ST1000)" "${SC_OUT}" | grep -v "(SA1019)" | grep -v "(ST1003)" | not grep -v "(ST1019)\|\(other import of\)"
+
+# Exclude underscore checks for generated code.
+grep "(ST1003)" "${SC_OUT}" | not grep -v '\(.pb.go:\)\|\(code_string_test.go:\)\|\(grpc_testing_not_regenerate\)'
+
+# Error for duplicate imports not including grpc protos.
+grep "(ST1019)\|\(other import of\)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused
+channelz/grpc_channelz_v1"
+go-control-plane/envoy
+grpclb/grpc_lb_v1"
+health/grpc_health_v1"
+interop/grpc_testing"
+orca/v3"
+proto/grpc_gcp"
+proto/grpc_lookup_v1"
+reflection/grpc_reflection_v1"
+reflection/grpc_reflection_v1alpha"
+XXXXX PleaseIgnoreUnused'
+
+# Error for any package comments not in generated code.
+grep "(ST1000)" "${SC_OUT}" | not grep -v "\.pb\.go:"
+
+# Only ignore the following deprecated types/fields/functions and exclude
+# generated code.
+grep "(SA1019)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused
+XXXXX Protobuf related deprecation errors:
+"github.com/golang/protobuf
+.pb.go:
+grpc_testing_not_regenerate
+: ptypes.
+proto.RegisterType
+XXXXX gRPC internal usage deprecation errors:
+"google.golang.org/grpc
+: grpc.
+: v1alpha.
+: v1alphareflectionpb.
+BalancerAttributes is deprecated:
+CredsBundle is deprecated:
+Metadata is deprecated: use Attributes instead.
+NewSubConn is deprecated:
+OverrideServerName is deprecated:
+RemoveSubConn is deprecated:
+SecurityVersion is deprecated:
Target is deprecated: Use the Target field in the BuildOptions instead.
-xxx_messageInfo_
-' "${SC_OUT}"
-
-# - special golint on package comments.
-lint_package_comment_per_package() {
- # Number of files in this go package.
- fileCount=$(go list -f '{{len .GoFiles}}' $1)
- if [ ${fileCount} -eq 0 ]; then
- return 0
- fi
- # Number of package errors generated by golint.
- lintPackageCommentErrorsCount=$(golint --min_confidence 0 $1 | grep -c "should have a package comment")
- # golint complains about every file that's missing the package comment. If the
- # number of files for this package is greater than the number of errors, there's
- # at least one file with package comment, good. Otherwise, fail.
- if [ ${fileCount} -le ${lintPackageCommentErrorsCount} ]; then
- echo "Package $1 (with ${fileCount} files) is missing package comment"
- return 1
- fi
-}
-lint_package_comment() {
- set +ex
-
- count=0
- for i in $(go list ./...); do
- lint_package_comment_per_package "$i"
- ((count += $?))
- done
-
- set -ex
- return $count
-}
-lint_package_comment
+UpdateAddresses is deprecated:
+UpdateSubConnState is deprecated:
+balancer.ErrTransientFailure is deprecated:
+grpc/reflection/v1alpha/reflection.proto
+XXXXX xDS deprecated fields we support
+.ExactMatch
+.PrefixMatch
+.SafeRegexMatch
+.SuffixMatch
+GetContainsMatch
+GetExactMatch
+GetMatchSubjectAltNames
+GetPrefixMatch
+GetSafeRegexMatch
+GetSuffixMatch
+GetTlsCertificateCertificateProviderInstance
+GetValidationContextCertificateProviderInstance
+XXXXX PleaseIgnoreUnused'
echo SUCCESS
diff --git a/vendor/modules.txt b/vendor/modules.txt
index b4326ecdb..761570532 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -367,8 +367,8 @@ github.com/gorilla/sessions
# github.com/gorilla/websocket v1.5.1
## explicit; go 1.20
github.com/gorilla/websocket
-# github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0
-## explicit; go 1.17
+# github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0
+## explicit; go 1.19
github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule
github.com/grpc-ecosystem/grpc-gateway/v2/runtime
github.com/grpc-ecosystem/grpc-gateway/v2/utilities
@@ -545,7 +545,7 @@ github.com/pmezard/go-difflib/difflib
github.com/prometheus/client_golang/prometheus
github.com/prometheus/client_golang/prometheus/internal
github.com/prometheus/client_golang/prometheus/promhttp
-# github.com/prometheus/client_model v0.5.0
+# github.com/prometheus/client_model v0.6.0
## explicit; go 1.19
github.com/prometheus/client_model/go
# github.com/prometheus/common v0.45.0
@@ -872,7 +872,7 @@ github.com/uptrace/bun/dialect/pgdialect
# github.com/uptrace/bun/dialect/sqlitedialect v1.1.17
## explicit; go 1.19
github.com/uptrace/bun/dialect/sqlitedialect
-# github.com/uptrace/bun/extra/bunotel v1.1.16
+# github.com/uptrace/bun/extra/bunotel v1.1.17
## explicit; go 1.19
github.com/uptrace/bun/extra/bunotel
# github.com/uptrace/opentelemetry-go-extra/otelsql v0.2.3
@@ -913,7 +913,7 @@ go.mongodb.org/mongo-driver/bson/bsonrw
go.mongodb.org/mongo-driver/bson/bsontype
go.mongodb.org/mongo-driver/bson/primitive
go.mongodb.org/mongo-driver/x/bsonx/bsoncore
-# go.opentelemetry.io/otel v1.20.0
+# go.opentelemetry.io/otel v1.24.0
## explicit; go 1.20
go.opentelemetry.io/otel
go.opentelemetry.io/otel/attribute
@@ -931,34 +931,35 @@ go.opentelemetry.io/otel/semconv/v1.12.0
go.opentelemetry.io/otel/semconv/v1.20.0
go.opentelemetry.io/otel/semconv/v1.20.0/httpconv
go.opentelemetry.io/otel/semconv/v1.21.0
+go.opentelemetry.io/otel/semconv/v1.24.0
go.opentelemetry.io/otel/semconv/v1.7.0
-# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0
+# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0
## explicit; go 1.20
go.opentelemetry.io/otel/exporters/otlp/otlptrace
go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform
-# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0
+# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.24.0
## explicit; go 1.20
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry
-# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.20.0
+# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0
## explicit; go 1.20
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry
-# go.opentelemetry.io/otel/exporters/prometheus v0.43.0
+# go.opentelemetry.io/otel/exporters/prometheus v0.46.0
## explicit; go 1.20
go.opentelemetry.io/otel/exporters/prometheus
-# go.opentelemetry.io/otel/metric v1.20.0
+# go.opentelemetry.io/otel/metric v1.24.0
## explicit; go 1.20
go.opentelemetry.io/otel/metric
go.opentelemetry.io/otel/metric/embedded
go.opentelemetry.io/otel/metric/noop
-# go.opentelemetry.io/otel/sdk v1.20.0
+# go.opentelemetry.io/otel/sdk v1.24.0
## explicit; go 1.20
go.opentelemetry.io/otel/sdk
go.opentelemetry.io/otel/sdk/instrumentation
@@ -966,18 +967,20 @@ go.opentelemetry.io/otel/sdk/internal
go.opentelemetry.io/otel/sdk/internal/env
go.opentelemetry.io/otel/sdk/resource
go.opentelemetry.io/otel/sdk/trace
-# go.opentelemetry.io/otel/sdk/metric v1.20.0
+# go.opentelemetry.io/otel/sdk/metric v1.24.0
## explicit; go 1.20
go.opentelemetry.io/otel/sdk/metric
go.opentelemetry.io/otel/sdk/metric/internal
go.opentelemetry.io/otel/sdk/metric/internal/aggregate
+go.opentelemetry.io/otel/sdk/metric/internal/exemplar
+go.opentelemetry.io/otel/sdk/metric/internal/x
go.opentelemetry.io/otel/sdk/metric/metricdata
-# go.opentelemetry.io/otel/trace v1.20.0
+# go.opentelemetry.io/otel/trace v1.24.0
## explicit; go 1.20
go.opentelemetry.io/otel/trace
go.opentelemetry.io/otel/trace/embedded
go.opentelemetry.io/otel/trace/noop
-# go.opentelemetry.io/proto/otlp v1.0.0
+# go.opentelemetry.io/proto/otlp v1.1.0
## explicit; go 1.17
go.opentelemetry.io/proto/otlp/collector/trace/v1
go.opentelemetry.io/proto/otlp/common/v1
@@ -1127,14 +1130,14 @@ google.golang.org/appengine/internal/log
google.golang.org/appengine/internal/remote_api
google.golang.org/appengine/internal/urlfetch
google.golang.org/appengine/urlfetch
-# google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17
+# google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917
## explicit; go 1.19
google.golang.org/genproto/googleapis/api/httpbody
-# google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f
+# google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917
## explicit; go 1.19
google.golang.org/genproto/googleapis/rpc/errdetails
google.golang.org/genproto/googleapis/rpc/status
-# google.golang.org/grpc v1.59.0
+# google.golang.org/grpc v1.61.1
## explicit; go 1.19
google.golang.org/grpc
google.golang.org/grpc/attributes
@@ -1172,6 +1175,7 @@ google.golang.org/grpc/internal/metadata
google.golang.org/grpc/internal/pretty
google.golang.org/grpc/internal/resolver
google.golang.org/grpc/internal/resolver/dns
+google.golang.org/grpc/internal/resolver/dns/internal
google.golang.org/grpc/internal/resolver/passthrough
google.golang.org/grpc/internal/resolver/unix
google.golang.org/grpc/internal/serviceconfig
@@ -1183,6 +1187,7 @@ google.golang.org/grpc/keepalive
google.golang.org/grpc/metadata
google.golang.org/grpc/peer
google.golang.org/grpc/resolver
+google.golang.org/grpc/resolver/dns
google.golang.org/grpc/serviceconfig
google.golang.org/grpc/stats
google.golang.org/grpc/status