summaryrefslogtreecommitdiff
path: root/vendor/go.opentelemetry.io/otel
diff options
context:
space:
mode:
authorLibravatar Terin Stock <terinjokes@gmail.com>2025-03-09 17:47:56 +0100
committerLibravatar Terin Stock <terinjokes@gmail.com>2025-03-10 01:59:49 +0100
commit3ac1ee16f377d31a0fb80c8dae28b6239ac4229e (patch)
treef61faa581feaaeaba2542b9f2b8234a590684413 /vendor/go.opentelemetry.io/otel
parent[chore] update URLs to forked source (diff)
downloadgotosocial-3ac1ee16f377d31a0fb80c8dae28b6239ac4229e.tar.xz
[chore] remove vendor
Diffstat (limited to 'vendor/go.opentelemetry.io/otel')
-rw-r--r--vendor/go.opentelemetry.io/otel/.codespellignore9
-rw-r--r--vendor/go.opentelemetry.io/otel/.codespellrc10
-rw-r--r--vendor/go.opentelemetry.io/otel/.gitattributes3
-rw-r--r--vendor/go.opentelemetry.io/otel/.gitignore14
-rw-r--r--vendor/go.opentelemetry.io/otel/.golangci.yml325
-rw-r--r--vendor/go.opentelemetry.io/otel/.lycheeignore6
-rw-r--r--vendor/go.opentelemetry.io/otel/.markdownlint.yaml29
-rw-r--r--vendor/go.opentelemetry.io/otel/CHANGELOG.md3302
-rw-r--r--vendor/go.opentelemetry.io/otel/CODEOWNERS17
-rw-r--r--vendor/go.opentelemetry.io/otel/CONTRIBUTING.md664
-rw-r--r--vendor/go.opentelemetry.io/otel/LICENSE201
-rw-r--r--vendor/go.opentelemetry.io/otel/Makefile307
-rw-r--r--vendor/go.opentelemetry.io/otel/README.md111
-rw-r--r--vendor/go.opentelemetry.io/otel/RELEASING.md135
-rw-r--r--vendor/go.opentelemetry.io/otel/VERSIONING.md224
-rw-r--r--vendor/go.opentelemetry.io/otel/attribute/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/attribute/doc.go5
-rw-r--r--vendor/go.opentelemetry.io/otel/attribute/encoder.go135
-rw-r--r--vendor/go.opentelemetry.io/otel/attribute/filter.go49
-rw-r--r--vendor/go.opentelemetry.io/otel/attribute/iterator.go150
-rw-r--r--vendor/go.opentelemetry.io/otel/attribute/key.go123
-rw-r--r--vendor/go.opentelemetry.io/otel/attribute/kv.go75
-rw-r--r--vendor/go.opentelemetry.io/otel/attribute/set.go411
-rw-r--r--vendor/go.opentelemetry.io/otel/attribute/type_string.go31
-rw-r--r--vendor/go.opentelemetry.io/otel/attribute/value.go271
-rw-r--r--vendor/go.opentelemetry.io/otel/baggage/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/baggage/baggage.go1018
-rw-r--r--vendor/go.opentelemetry.io/otel/baggage/context.go28
-rw-r--r--vendor/go.opentelemetry.io/otel/baggage/doc.go9
-rw-r--r--vendor/go.opentelemetry.io/otel/codes/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/codes/codes.go106
-rw-r--r--vendor/go.opentelemetry.io/otel/codes/doc.go10
-rw-r--r--vendor/go.opentelemetry.io/otel/doc.go25
-rw-r--r--vendor/go.opentelemetry.io/otel/error_handler.go27
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE201
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go43
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/doc.go10
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go105
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go147
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go20
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go17
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go219
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE201
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go300
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go65
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go20
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go215
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go24
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go142
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go351
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go40
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go26
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go56
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go145
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go210
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/LICENSE201
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go392
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go63
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/exporter.go20
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go215
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/gen.go24
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/envconfig.go142
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go351
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/optiontypes.go40
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/tls.go26
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/partialsuccess.go56
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go145
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go155
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go9
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/prometheus/LICENSE201
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/prometheus/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go159
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/prometheus/doc.go7
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go554
-rw-r--r--vendor/go.opentelemetry.io/otel/get_main_pkgs.sh30
-rw-r--r--vendor/go.opentelemetry.io/otel/handler.go33
-rw-r--r--vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go96
-rw-r--r--vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go32
-rw-r--r--vendor/go.opentelemetry.io/otel/internal/baggage/context.go81
-rw-r--r--vendor/go.opentelemetry.io/otel/internal/gen.go18
-rw-r--r--vendor/go.opentelemetry.io/otel/internal/global/handler.go36
-rw-r--r--vendor/go.opentelemetry.io/otel/internal/global/instruments.go412
-rw-r--r--vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go62
-rw-r--r--vendor/go.opentelemetry.io/otel/internal/global/meter.go598
-rw-r--r--vendor/go.opentelemetry.io/otel/internal/global/propagator.go71
-rw-r--r--vendor/go.opentelemetry.io/otel/internal/global/state.go199
-rw-r--r--vendor/go.opentelemetry.io/otel/internal/global/trace.go220
-rw-r--r--vendor/go.opentelemetry.io/otel/internal/rawhelpers.go48
-rw-r--r--vendor/go.opentelemetry.io/otel/internal_logging.go15
-rw-r--r--vendor/go.opentelemetry.io/otel/metric.go42
-rw-r--r--vendor/go.opentelemetry.io/otel/metric/LICENSE201
-rw-r--r--vendor/go.opentelemetry.io/otel/metric/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go260
-rw-r--r--vendor/go.opentelemetry.io/otel/metric/asyncint64.go258
-rw-r--r--vendor/go.opentelemetry.io/otel/metric/config.go81
-rw-r--r--vendor/go.opentelemetry.io/otel/metric/doc.go177
-rw-r--r--vendor/go.opentelemetry.io/otel/metric/embedded/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go243
-rw-r--r--vendor/go.opentelemetry.io/otel/metric/instrument.go368
-rw-r--r--vendor/go.opentelemetry.io/otel/metric/meter.go278
-rw-r--r--vendor/go.opentelemetry.io/otel/metric/noop/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/metric/noop/noop.go281
-rw-r--r--vendor/go.opentelemetry.io/otel/metric/syncfloat64.go226
-rw-r--r--vendor/go.opentelemetry.io/otel/metric/syncint64.go226
-rw-r--r--vendor/go.opentelemetry.io/otel/propagation.go20
-rw-r--r--vendor/go.opentelemetry.io/otel/propagation/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/propagation/baggage.go47
-rw-r--r--vendor/go.opentelemetry.io/otel/propagation/doc.go13
-rw-r--r--vendor/go.opentelemetry.io/otel/propagation/propagation.go142
-rw-r--r--vendor/go.opentelemetry.io/otel/propagation/trace_context.go156
-rw-r--r--vendor/go.opentelemetry.io/otel/renovate.json26
-rw-r--r--vendor/go.opentelemetry.io/otel/requirements.txt1
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/LICENSE201
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go13
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go9
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go19
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go166
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md46
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go66
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE201
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/aggregation.go189
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/cache.go83
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/config.go172
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/doc.go47
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/env.go39
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go77
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/doc.go6
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/exemplar.go29
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/filter.go34
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/fixed_size_reservoir.go217
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go70
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/reservoir.go40
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/storage.go95
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/value.go59
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go77
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go364
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go30
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go153
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/doc.go7
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/drop.go27
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go43
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go443
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/filtered_reservoir.go50
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go232
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go161
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go42
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go237
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/internal/reuse_slice.go13
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md131
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go81
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go202
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/meter.go736
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go296
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality.go30
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go25
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go369
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go659
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/provider.go145
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/reader.go189
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/version.go9
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/view.go118
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/resource/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/resource/auto.go92
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go116
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/resource/config.go195
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/resource/container.go89
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/resource/doc.go20
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/resource/env.go95
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go109
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go12
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go8
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go18
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go11
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go17
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go19
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go36
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/resource/os.go89
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go91
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go143
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go79
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go15
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go89
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/resource/process.go173
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/resource/resource.go294
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go414
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/doc.go10
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/event.go26
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go64
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go81
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/link.go23
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/provider.go494
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go96
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go282
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go121
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go133
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/span.go937
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go36
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go114
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go61
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go153
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/version.go9
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/version.go9
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/internal/http.go327
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/internal/v4/http.go394
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/internal/v4/net.go313
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.10.0/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.10.0/doc.go9
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.10.0/exception.go9
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.10.0/http.go103
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.10.0/resource.go970
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.10.0/schema.go9
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.10.0/trace.go1689
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.12.0/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.12.0/doc.go9
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.12.0/exception.go9
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.12.0/http.go103
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.12.0/resource.go1031
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.12.0/schema.go9
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.12.0/trace.go1693
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go1198
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go9
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go188
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go9
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go10
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.20.0/httpconv/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.20.0/httpconv/http.go143
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go2060
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go9
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go2599
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go4387
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go9
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go200
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go9
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go1071
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go2545
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go9
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go1323
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go8996
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go9
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go9
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go1307
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go9
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.7.0/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.7.0/doc.go9
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.7.0/exception.go9
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.7.0/http.go103
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.7.0/resource.go935
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.7.0/schema.go9
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.7.0/trace.go1547
-rw-r--r--vendor/go.opentelemetry.io/otel/trace.go36
-rw-r--r--vendor/go.opentelemetry.io/otel/trace/LICENSE201
-rw-r--r--vendor/go.opentelemetry.io/otel/trace/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/trace/config.go323
-rw-r--r--vendor/go.opentelemetry.io/otel/trace/context.go50
-rw-r--r--vendor/go.opentelemetry.io/otel/trace/doc.go119
-rw-r--r--vendor/go.opentelemetry.io/otel/trace/embedded/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go45
-rw-r--r--vendor/go.opentelemetry.io/otel/trace/nonrecording.go16
-rw-r--r--vendor/go.opentelemetry.io/otel/trace/noop.go85
-rw-r--r--vendor/go.opentelemetry.io/otel/trace/noop/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/trace/noop/noop.go112
-rw-r--r--vendor/go.opentelemetry.io/otel/trace/provider.go59
-rw-r--r--vendor/go.opentelemetry.io/otel/trace/span.go177
-rw-r--r--vendor/go.opentelemetry.io/otel/trace/trace.go323
-rw-r--r--vendor/go.opentelemetry.io/otel/trace/tracer.go37
-rw-r--r--vendor/go.opentelemetry.io/otel/trace/tracestate.go330
-rw-r--r--vendor/go.opentelemetry.io/otel/verify_readmes.sh21
-rw-r--r--vendor/go.opentelemetry.io/otel/verify_released_changelog.sh42
-rw-r--r--vendor/go.opentelemetry.io/otel/version.go9
-rw-r--r--vendor/go.opentelemetry.io/otel/versions.yaml42
282 files changed, 0 insertions, 67272 deletions
diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore
deleted file mode 100644
index 6bf3abc41..000000000
--- a/vendor/go.opentelemetry.io/otel/.codespellignore
+++ /dev/null
@@ -1,9 +0,0 @@
-ot
-fo
-te
-collison
-consequentially
-ans
-nam
-valu
-thirdparty
diff --git a/vendor/go.opentelemetry.io/otel/.codespellrc b/vendor/go.opentelemetry.io/otel/.codespellrc
deleted file mode 100644
index e2cb3ea94..000000000
--- a/vendor/go.opentelemetry.io/otel/.codespellrc
+++ /dev/null
@@ -1,10 +0,0 @@
-# https://github.com/codespell-project/codespell
-[codespell]
-builtin = clear,rare,informal
-check-filenames =
-check-hidden =
-ignore-words = .codespellignore
-interactive = 1
-skip = .git,go.mod,go.sum,go.work,go.work.sum,semconv,venv,.tools
-uri-ignore-words-list = *
-write =
diff --git a/vendor/go.opentelemetry.io/otel/.gitattributes b/vendor/go.opentelemetry.io/otel/.gitattributes
deleted file mode 100644
index 314766e91..000000000
--- a/vendor/go.opentelemetry.io/otel/.gitattributes
+++ /dev/null
@@ -1,3 +0,0 @@
-* text=auto eol=lf
-*.{cmd,[cC][mM][dD]} text eol=crlf
-*.{bat,[bB][aA][tT]} text eol=crlf
diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore
deleted file mode 100644
index ae8577ef3..000000000
--- a/vendor/go.opentelemetry.io/otel/.gitignore
+++ /dev/null
@@ -1,14 +0,0 @@
-.DS_Store
-Thumbs.db
-
-.tools/
-venv/
-.idea/
-.vscode/
-*.iml
-*.so
-coverage.*
-go.work
-go.work.sum
-
-gen/
diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml
deleted file mode 100644
index ce3f40b60..000000000
--- a/vendor/go.opentelemetry.io/otel/.golangci.yml
+++ /dev/null
@@ -1,325 +0,0 @@
-# See https://github.com/golangci/golangci-lint#config-file
-run:
- issues-exit-code: 1 #Default
- tests: true #Default
-
-linters:
- # Disable everything by default so upgrades to not include new "default
- # enabled" linters.
- disable-all: true
- # Specifically enable linters we want to use.
- enable:
- - asasalint
- - bodyclose
- - depguard
- - errcheck
- - errorlint
- - godot
- - gofumpt
- - goimports
- - gosec
- - gosimple
- - govet
- - ineffassign
- - misspell
- - perfsprint
- - revive
- - staticcheck
- - tenv
- - testifylint
- - typecheck
- - unconvert
- - unused
- - unparam
- - usestdlibvars
-
-issues:
- # Maximum issues count per one linter.
- # Set to 0 to disable.
- # Default: 50
- # Setting to unlimited so the linter only is run once to debug all issues.
- max-issues-per-linter: 0
- # Maximum count of issues with the same text.
- # Set to 0 to disable.
- # Default: 3
- # Setting to unlimited so the linter only is run once to debug all issues.
- max-same-issues: 0
- # Excluding configuration per-path, per-linter, per-text and per-source.
- exclude-rules:
- # TODO: Having appropriate comments for exported objects helps development,
- # even for objects in internal packages. Appropriate comments for all
- # exported objects should be added and this exclusion removed.
- - path: '.*internal/.*'
- text: "exported (method|function|type|const) (.+) should have comment or be unexported"
- linters:
- - revive
- # Yes, they are, but it's okay in a test.
- - path: _test\.go
- text: "exported func.*returns unexported type.*which can be annoying to use"
- linters:
- - revive
- # Example test functions should be treated like main.
- - path: example.*_test\.go
- text: "calls to (.+) only in main[(][)] or init[(][)] functions"
- linters:
- - revive
- # It's okay to not run gosec and perfsprint in a test.
- - path: _test\.go
- linters:
- - gosec
- - perfsprint
- # Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand)
- # as we commonly use it in tests and examples.
- - text: "G404:"
- linters:
- - gosec
- # Ignoring gosec G402: TLS MinVersion too low
- # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well.
- - text: "G402: TLS MinVersion too low."
- linters:
- - gosec
- include:
- # revive exported should have comment or be unexported.
- - EXC0012
- # revive package comment should be of the form ...
- - EXC0013
-
-linters-settings:
- depguard:
- rules:
- non-tests:
- files:
- - "!$test"
- - "!**/*test/*.go"
- - "!**/internal/matchers/*.go"
- deny:
- - pkg: "testing"
- - pkg: "github.com/stretchr/testify"
- - pkg: "crypto/md5"
- - pkg: "crypto/sha1"
- - pkg: "crypto/**/pkix"
- auto/sdk:
- files:
- - "!internal/global/trace.go"
- - "~internal/global/trace_test.go"
- deny:
- - pkg: "go.opentelemetry.io/auto/sdk"
- desc: Do not use SDK from automatic instrumentation.
- otlp-internal:
- files:
- - "!**/exporters/otlp/internal/**/*.go"
- deny:
- - pkg: "go.opentelemetry.io/otel/exporters/otlp/internal"
- desc: Do not use cross-module internal packages.
- otlptrace-internal:
- files:
- - "!**/exporters/otlp/otlptrace/*.go"
- - "!**/exporters/otlp/otlptrace/internal/**.go"
- deny:
- - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal"
- desc: Do not use cross-module internal packages.
- otlpmetric-internal:
- files:
- - "!**/exporters/otlp/otlpmetric/internal/*.go"
- - "!**/exporters/otlp/otlpmetric/internal/**/*.go"
- deny:
- - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal"
- desc: Do not use cross-module internal packages.
- otel-internal:
- files:
- - "**/sdk/*.go"
- - "**/sdk/**/*.go"
- - "**/exporters/*.go"
- - "**/exporters/**/*.go"
- - "**/schema/*.go"
- - "**/schema/**/*.go"
- - "**/metric/*.go"
- - "**/metric/**/*.go"
- - "**/bridge/*.go"
- - "**/bridge/**/*.go"
- - "**/trace/*.go"
- - "**/trace/**/*.go"
- - "**/log/*.go"
- - "**/log/**/*.go"
- deny:
- - pkg: "go.opentelemetry.io/otel/internal$"
- desc: Do not use cross-module internal packages.
- - pkg: "go.opentelemetry.io/otel/internal/attribute"
- desc: Do not use cross-module internal packages.
- - pkg: "go.opentelemetry.io/otel/internal/internaltest"
- desc: Do not use cross-module internal packages.
- - pkg: "go.opentelemetry.io/otel/internal/matchers"
- desc: Do not use cross-module internal packages.
- godot:
- exclude:
- # Exclude links.
- - '^ *\[[^]]+\]:'
- # Exclude sentence fragments for lists.
- - '^[ ]*[-•]'
- # Exclude sentences prefixing a list.
- - ':$'
- goimports:
- local-prefixes: go.opentelemetry.io
- misspell:
- locale: US
- ignore-words:
- - cancelled
- perfsprint:
- err-error: true
- errorf: true
- int-conversion: true
- sprintf1: true
- strconcat: true
- revive:
- # Sets the default failure confidence.
- # This means that linting errors with less than 0.8 confidence will be ignored.
- # Default: 0.8
- confidence: 0.01
- rules:
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#blank-imports
- - name: blank-imports
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bool-literal-in-expr
- - name: bool-literal-in-expr
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#constant-logical-expr
- - name: constant-logical-expr
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument
- # TODO (#3372) re-enable linter when it is compatible. https://github.com/golangci/golangci-lint/issues/3280
- - name: context-as-argument
- disabled: true
- arguments:
- allowTypesBefore: "*testing.T"
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-keys-type
- - name: context-keys-type
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#deep-exit
- - name: deep-exit
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#defer
- - name: defer
- disabled: false
- arguments:
- - ["call-chain", "loop"]
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#dot-imports
- - name: dot-imports
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#duplicated-imports
- - name: duplicated-imports
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#early-return
- - name: early-return
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-block
- - name: empty-block
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-lines
- - name: empty-lines
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-naming
- - name: error-naming
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-return
- - name: error-return
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-strings
- - name: error-strings
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#errorf
- - name: errorf
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#exported
- - name: exported
- disabled: false
- arguments:
- - "sayRepetitiveInsteadOfStutters"
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#flag-parameter
- - name: flag-parameter
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#identical-branches
- - name: identical-branches
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#if-return
- - name: if-return
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#increment-decrement
- - name: increment-decrement
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#indent-error-flow
- - name: indent-error-flow
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#import-shadowing
- - name: import-shadowing
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#package-comments
- - name: package-comments
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range
- - name: range
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-in-closure
- - name: range-val-in-closure
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-address
- - name: range-val-address
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#redefines-builtin-id
- - name: redefines-builtin-id
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-format
- - name: string-format
- disabled: false
- arguments:
- - - panic
- - '/^[^\n]*$/'
- - must not contain line breaks
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#struct-tag
- - name: struct-tag
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#superfluous-else
- - name: superfluous-else
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#time-equal
- - name: time-equal
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-naming
- - name: var-naming
- disabled: false
- arguments:
- - ["ID"] # AllowList
- - ["Otel", "Aws", "Gcp"] # DenyList
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-declaration
- - name: var-declaration
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unconditional-recursion
- - name: unconditional-recursion
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-return
- - name: unexported-return
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unhandled-error
- - name: unhandled-error
- disabled: false
- arguments:
- - "fmt.Fprint"
- - "fmt.Fprintf"
- - "fmt.Fprintln"
- - "fmt.Print"
- - "fmt.Printf"
- - "fmt.Println"
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unnecessary-stmt
- - name: unnecessary-stmt
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#useless-break
- - name: useless-break
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value
- - name: waitgroup-by-value
- disabled: false
- testifylint:
- enable-all: true
- disable:
- - float-compare
- - go-require
- - require-error
diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore
deleted file mode 100644
index 40d62fa2e..000000000
--- a/vendor/go.opentelemetry.io/otel/.lycheeignore
+++ /dev/null
@@ -1,6 +0,0 @@
-http://localhost
-http://jaeger-collector
-https://github.com/open-telemetry/opentelemetry-go/milestone/
-https://github.com/open-telemetry/opentelemetry-go/projects
-file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries
-file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual
diff --git a/vendor/go.opentelemetry.io/otel/.markdownlint.yaml b/vendor/go.opentelemetry.io/otel/.markdownlint.yaml
deleted file mode 100644
index 3202496c3..000000000
--- a/vendor/go.opentelemetry.io/otel/.markdownlint.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
-# Default state for all rules
-default: true
-
-# ul-style
-MD004: false
-
-# hard-tabs
-MD010: false
-
-# line-length
-MD013: false
-
-# no-duplicate-header
-MD024:
- siblings_only: true
-
-#single-title
-MD025: false
-
-# ol-prefix
-MD029:
- style: ordered
-
-# no-inline-html
-MD033: false
-
-# fenced-code-language
-MD040: false
-
diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
deleted file mode 100644
index 599d59cd1..000000000
--- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md
+++ /dev/null
@@ -1,3302 +0,0 @@
-# Changelog
-
-All notable changes to this project will be documented in this file.
-
-The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
-
-This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
-
-## [Unreleased]
-
-<!-- Released section -->
-<!-- Don't change this section unless doing release -->
-
-## [1.34.0/0.56.0/0.10.0] 2025-01-17
-
-### Changed
-
-- Remove the notices from `Logger` to make the whole Logs API user-facing in `go.opentelemetry.io/otel/log`. (#6167)
-
-### Fixed
-
-- Relax minimum Go version to 1.22.0 in various modules. (#6073)
-- The `Type` name logged for the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` client is corrected from `otlphttpgrpc` to `otlptracegrpc`. (#6143)
-- The `Type` name logged for the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlphttpgrpc` client is corrected from `otlphttphttp` to `otlptracehttp`. (#6143)
-
-## [1.33.0/0.55.0/0.9.0/0.0.12] 2024-12-12
-
-### Added
-
-- Add `Reset` method to `SpanRecorder` in `go.opentelemetry.io/otel/sdk/trace/tracetest`. (#5994)
-- Add `EnabledInstrument` interface in `go.opentelemetry.io/otel/sdk/metric/internal/x`.
- This is an experimental interface that is implemented by synchronous instruments provided by `go.opentelemetry.io/otel/sdk/metric`.
- Users can use it to avoid performing computationally expensive operations when recording measurements.
- It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#6016)
-
-### Changed
-
-- The default global API now supports full auto-instrumentation from the `go.opentelemetry.io/auto` package.
- See that package for more information. (#5920)
-- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5929)
-- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5929)
-- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5929)
-- Performance improvements for attribute value `AsStringSlice`, `AsFloat64Slice`, `AsInt64Slice`, `AsBoolSlice`. (#6011)
-- Change `EnabledParameters` to have a `Severity` field instead of a getter and setter in `go.opentelemetry.io/otel/log`. (#6009)
-
-### Fixed
-
-- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5954)
-- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5954)
-- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5954)
-- Fix invalid exemplar keys in `go.opentelemetry.io/otel/exporters/prometheus`. (#5995)
-- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/trace`. (#5997)
-- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/log`. (#6032)
-
-## [1.32.0/0.54.0/0.8.0/0.0.11] 2024-11-08
-
-### Added
-
-- Add `go.opentelemetry.io/otel/sdk/metric/exemplar.AlwaysOffFilter`, which can be used to disable exemplar recording. (#5850)
-- Add `go.opentelemetry.io/otel/sdk/metric.WithExemplarFilter`, which can be used to configure the exemplar filter used by the metrics SDK. (#5850)
-- Add `ExemplarReservoirProviderSelector` and `DefaultExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric`, which defines the exemplar reservoir to use based on the aggregation of the metric. (#5861)
-- Add `ExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric.Stream` to allow using views to configure the exemplar reservoir to use for a metric. (#5861)
-- Add `ReservoirProvider`, `HistogramReservoirProvider` and `FixedSizeReservoirProvider` to `go.opentelemetry.io/otel/sdk/metric/exemplar` to make it convenient to use providers of Reservoirs. (#5861)
-- The `go.opentelemetry.io/otel/semconv/v1.27.0` package.
- The package contains semantic conventions from the `v1.27.0` version of the OpenTelemetry Semantic Conventions. (#5894)
-- Add `Attributes attribute.Set` field to `Scope` in `go.opentelemetry.io/otel/sdk/instrumentation`. (#5903)
-- Add `Attributes attribute.Set` field to `ScopeRecords` in `go.opentelemetry.io/otel/log/logtest`. (#5927)
-- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` adds instrumentation scope attributes. (#5934)
-- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` adds instrumentation scope attributes. (#5934)
-- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` adds instrumentation scope attributes. (#5935)
-- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` adds instrumentation scope attributes. (#5935)
-- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` adds instrumentation scope attributes. (#5933)
-- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` adds instrumentation scope attributes. (#5933)
-- `go.opentelemetry.io/otel/exporters/prometheus` adds instrumentation scope attributes in `otel_scope_info` metric as labels. (#5932)
-
-### Changed
-
-- Support scope attributes and make them as identifying for `Tracer` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/trace`. (#5924)
-- Support scope attributes and make them as identifying for `Meter` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/metric`. (#5926)
-- Support scope attributes and make them as identifying for `Logger` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/log`. (#5925)
-- Make schema URL and scope attributes as identifying for `Tracer` in `go.opentelemetry.io/otel/bridge/opentracing`. (#5931)
-- Clear unneeded slice elements to allow GC to collect the objects in `go.opentelemetry.io/otel/sdk/metric` and `go.opentelemetry.io/otel/sdk/trace`. (#5804)
-
-### Fixed
-
-- Global MeterProvider registration unwraps global instrument Observers, the undocumented Unwrap() methods are now private. (#5881)
-- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5892)
-- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5911)
-- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5915)
-- Fix `go.opentelemetry.io/otel/exporters/prometheus` trying to add exemplars to Gauge metrics, which is unsupported. (#5912)
-- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#5944)
-- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5944)
-- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5944)
-- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5944)
-- Fix incorrect metrics generated from callbacks when multiple readers are used in `go.opentelemetry.io/otel/sdk/metric`. (#5900)
-
-### Removed
-
-- Remove all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5930)
-
-## [1.31.0/0.53.0/0.7.0/0.0.10] 2024-10-11
-
-### Added
-
-- Add `go.opentelemetry.io/otel/sdk/metric/exemplar` package which includes `Exemplar`, `Filter`, `TraceBasedFilter`, `AlwaysOnFilter`, `HistogramReservoir`, `FixedSizeReservoir`, `Reservoir`, `Value` and `ValueType` types. These will be used for configuring the exemplar reservoir for the metrics sdk. (#5747, #5862)
-- Add `WithExportBufferSize` option to log batch processor.(#5877)
-
-### Changed
-
-- Enable exemplars by default in `go.opentelemetry.io/otel/sdk/metric`. Exemplars can be disabled by setting `OTEL_METRICS_EXEMPLAR_FILTER=always_off` (#5778)
-- `Logger.Enabled` in `go.opentelemetry.io/otel/log` now accepts a newly introduced `EnabledParameters` type instead of `Record`. (#5791)
-- `FilterProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log/internal/x` now accepts `EnabledParameters` instead of `Record`. (#5791)
-- The `Record` type in `go.opentelemetry.io/otel/log` is no longer comparable. (#5847)
-- Performance improvements for the trace SDK `SetAttributes` method in `Span`. (#5864)
-- Reduce memory allocations for the `Event` and `Link` lists in `Span`. (#5858)
-- Performance improvements for the trace SDK `AddEvent`, `AddLink`, `RecordError` and `End` methods in `Span`. (#5874)
-
-### Deprecated
-
-- Deprecate all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5854)
-
-### Fixed
-
-- The race condition for multiple `FixedSize` exemplar reservoirs identified in #5814 is resolved. (#5819)
-- Fix log records duplication in case of heterogeneous resource attributes by correctly mapping each log record to it's resource and scope. (#5803)
-- Fix timer channel drain to avoid hanging on Go 1.23. (#5868)
-- Fix delegation for global meter providers, and panic when calling otel.SetMeterProvider. (#5827)
-- Change the `reflect.TypeOf` to use a nil pointer to not allocate on the heap unless necessary. (#5827)
-
-## [1.30.0/0.52.0/0.6.0/0.0.9] 2024-09-09
-
-### Added
-
-- Support `OTEL_EXPORTER_OTLP_LOGS_INSECURE` and `OTEL_EXPORTER_OTLP_INSECURE` environments in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#5739)
-- The `WithResource` option for `NewMeterProvider` now merges the provided resources with the ones from environment variables. (#5773)
-- The `WithResource` option for `NewLoggerProvider` now merges the provided resources with the ones from environment variables. (#5773)
-- Add UTF-8 support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5755)
-
-### Fixed
-
-- Fix memory leak in the global `MeterProvider` when identical instruments are repeatedly created. (#5754)
-- Fix panic on instruments creation when setting meter provider. (#5758)
-- Fix an issue where `SetMeterProvider` in `go.opentelemetry.io/otel` might miss the delegation for instruments and registries. (#5780)
-
-### Removed
-
-- Drop support for [Go 1.21]. (#5736, #5740, #5800)
-
-## [1.29.0/0.51.0/0.5.0] 2024-08-23
-
-This release is the last to support [Go 1.21].
-The next release will require at least [Go 1.22].
-
-### Added
-
-- Add MacOS ARM64 platform to the compatibility testing suite. (#5577)
-- Add `InstrumentationScope` field to `SpanStub` in `go.opentelemetry.io/otel/sdk/trace/tracetest`, as a replacement for the deprecated `InstrumentationLibrary`. (#5627)
-- Make the initial release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`.
- This new module contains an OTLP exporter that transmits log telemetry using gRPC.
- This module is unstable and breaking changes may be introduced.
- See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5629)
-- Add `Walk` function to `TraceState` in `go.opentelemetry.io/otel/trace` to iterate all the key-value pairs. (#5651)
-- Bridge the trace state in `go.opentelemetry.io/otel/bridge/opencensus`. (#5651)
-- Zero value of `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` no longer panics. (#5665)
-- The `FilterProcessor` interface type is added in `go.opentelemetry.io/otel/sdk/log/internal/x`.
- This is an optional and experimental interface that log `Processor`s can implement to instruct the `Logger` if a `Record` will be processed or not.
- It replaces the existing `Enabled` method that is removed from the `Processor` interface itself.
- It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#5692)
-- Support [Go 1.23]. (#5720)
-
-### Changed
-
-- `NewMemberRaw`, `NewKeyProperty` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage` allow UTF-8 string in key. (#5132)
-- `Processor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` now accepts a pointer to `Record` instead of a value so that the record modifications done in a processor are propagated to subsequent registered processors. (#5636)
-- `SimpleProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log` now returns `false` if the exporter is `nil`. (#5665)
-- Update the concurrency requirements of `Exporter` in `go.opentelemetry.io/otel/sdk/log`. (#5666)
-- `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` synchronizes `OnEmit` calls. (#5666)
-- The `Processor` interface in `go.opentelemetry.io/otel/sdk/log` no longer includes the `Enabled` method.
- See the `FilterProcessor` interface type added in `go.opentelemetry.io/otel/sdk/log/internal/x` to continue providing this functionality. (#5692)
-- The `SimpleProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693)
-- The `BatchProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693)
-
-### Fixed
-
-- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5584)
-- Pass the underlying error rather than a generic retry-able failure in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5541)
-- Correct the `Tracer`, `Meter`, and `Logger` names used in `go.opentelemetry.io/otel/example/dice`. (#5612)
-- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/namedtracer`. (#5612)
-- Correct the `Tracer` name used in `go.opentelemetry.io/otel/example/opencensus`. (#5612)
-- Correct the `Tracer` and `Meter` names used in `go.opentelemetry.io/otel/example/otel-collector`. (#5612)
-- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/passthrough`. (#5612)
-- Correct the `Meter` name used in `go.opentelemetry.io/otel/example/prometheus`. (#5612)
-- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/zipkin`. (#5612)
-- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5641)
-- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5650)
-- Stop percent encoding header environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705)
-- Remove invalid environment variable header keys in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705)
-
-### Removed
-
-- The `Enabled` method of the `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692)
-- The `Enabled` method of the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692)
-
-## [1.28.0/0.50.0/0.4.0] 2024-07-02
-
-### Added
-
-- The `IsEmpty` method is added to the `Instrument` type in `go.opentelemetry.io/otel/sdk/metric`.
- This method is used to check if an `Instrument` instance is a zero-value. (#5431)
-- Store and provide the emitted `context.Context` in `ScopeRecords` of `go.opentelemetry.io/otel/sdk/log/logtest`. (#5468)
-- The `go.opentelemetry.io/otel/semconv/v1.26.0` package.
- The package contains semantic conventions from the `v1.26.0` version of the OpenTelemetry Semantic Conventions. (#5476)
-- The `AssertRecordEqual` method to `go.opentelemetry.io/otel/log/logtest` to allow comparison of two log records in tests. (#5499)
-- The `WithHeaders` option to `go.opentelemetry.io/otel/exporters/zipkin` to allow configuring custom http headers while exporting spans. (#5530)
-
-### Changed
-
-- `Tracer.Start` in `go.opentelemetry.io/otel/trace/noop` no longer allocates a span for empty span context. (#5457)
-- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/otel-collector`. (#5490)
-- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/zipkin`. (#5490)
-- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/exporters/zipkin`. (#5490)
- - The exporter no longer exports the deprecated "otel.library.name" or "otel.library.version" attributes.
-- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/resource`. (#5490)
-- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/trace`. (#5490)
-- `SimpleProcessor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` no longer allocates a slice which makes it possible to have a zero-allocation log processing using `SimpleProcessor`. (#5493)
-- Use non-generic functions in the `Start` method of `"go.opentelemetry.io/otel/sdk/trace".Trace` to reduce memory allocation. (#5497)
-- `service.instance.id` is populated for a `Resource` created with `"go.opentelemetry.io/otel/sdk/resource".Default` with a default value when `OTEL_GO_X_RESOURCE` is set. (#5520)
-- Improve performance of metric instruments in `go.opentelemetry.io/otel/sdk/metric` by removing unnecessary calls to `time.Now`. (#5545)
-
-### Fixed
-
-- Log a warning to the OpenTelemetry internal logger when a `Record` in `go.opentelemetry.io/otel/sdk/log` drops an attribute due to a limit being reached. (#5376)
-- Identify the `Tracer` returned from the global `TracerProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426)
-- Identify the `Meter` returned from the global `MeterProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426)
-- Log a warning to the OpenTelemetry internal logger when a `Span` in `go.opentelemetry.io/otel/sdk/trace` drops an attribute, event, or link due to a limit being reached. (#5434)
-- Document instrument name requirements in `go.opentelemetry.io/otel/metric`. (#5435)
-- Prevent random number generation data-race for experimental rand exemplars in `go.opentelemetry.io/otel/sdk/metric`. (#5456)
-- Fix counting number of dropped attributes of `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5464)
-- Fix panic in baggage creation when a member contains `0x80` char in key or value. (#5494)
-- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5508)
-- Retry trace and span ID generation if it generated an invalid one in `go.opentelemetry.io/otel/sdk/trace`. (#5514)
-- Fix stale timestamps reported by the last-value aggregation. (#5517)
-- Indicate the `Exporter` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` must be created by the `New` method. (#5521)
-- Improved performance in all `{Bool,Int64,Float64,String}SliceValue` functions of `go.opentelemetry.io/attributes` by reducing the number of allocations. (#5549)
-- Replace invalid percent-encoded octet sequences with replacement char in `go.opentelemetry.io/otel/baggage`. (#5528)
-
-## [1.27.0/0.49.0/0.3.0] 2024-05-21
-
-### Added
-
-- Add example for `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5242)
-- Add `RecordFactory` in `go.opentelemetry.io/otel/sdk/log/logtest` to facilitate testing exporter and processor implementations. (#5258)
-- Add `RecordFactory` in `go.opentelemetry.io/otel/log/logtest` to facilitate testing bridge implementations. (#5263)
-- The count of dropped records from the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is logged. (#5276)
-- Add metrics in the `otel-collector` example. (#5283)
-- Add the synchronous gauge instrument to `go.opentelemetry.io/otel/metric`. (#5304)
- - An `int64` or `float64` synchronous gauge instrument can now be created from a `Meter`.
- - All implementations of the API (`go.opentelemetry.io/otel/metric/noop`, `go.opentelemetry.io/otel/sdk/metric`) are updated to support this instrument.
-- Add logs to `go.opentelemetry.io/otel/example/dice`. (#5349)
-
-### Changed
-
-- The `Shutdown` method of `Exporter` in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` ignores the context cancellation and always returns `nil`. (#5189)
-- The `ForceFlush` and `Shutdown` methods of the exporter returned by `New` in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` ignore the context cancellation and always return `nil`. (#5189)
-- Apply the value length limits to `Record` attributes in `go.opentelemetry.io/otel/sdk/log`. (#5230)
-- De-duplicate map attributes added to a `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5230)
-- `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` won't print timestamps when `WithoutTimestamps` option is set. (#5241)
-- The `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` exporter won't print `AttributeValueLengthLimit` and `AttributeCountLimit` fields now, instead it prints the `DroppedAttributes` field. (#5272)
-- Improved performance in the `Stringer` implementation of `go.opentelemetry.io/otel/baggage.Member` by reducing the number of allocations. (#5286)
-- Set the start time for last-value aggregates in `go.opentelemetry.io/otel/sdk/metric`. (#5305)
-- The `Span` in `go.opentelemetry.io/otel/sdk/trace` will record links without span context if either non-empty `TraceState` or attributes are provided. (#5315)
-- Upgrade all dependencies of `go.opentelemetry.io/otel/semconv/v1.24.0` to `go.opentelemetry.io/otel/semconv/v1.25.0`. (#5374)
-
-### Fixed
-
-- Comparison of unordered maps for `go.opentelemetry.io/otel/log.KeyValue` and `go.opentelemetry.io/otel/log.Value`. (#5306)
-- Fix the empty output of `go.opentelemetry.io/otel/log.Value` in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5311)
-- Split the behavior of `Recorder` in `go.opentelemetry.io/otel/log/logtest` so it behaves as a `LoggerProvider` only. (#5365)
-- Fix wrong package name of the error message when parsing endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5371)
-- Identify the `Logger` returned from the global `LoggerProvider` in `go.opentelemetry.io/otel/log/global` with its schema URL. (#5375)
-
-## [1.26.0/0.48.0/0.2.0-alpha] 2024-04-24
-
-### Added
-
-- Add `Recorder` in `go.opentelemetry.io/otel/log/logtest` to facilitate testing the log bridge implementations. (#5134)
-- Add span flags to OTLP spans and links exported by `go.opentelemetry.io/otel/exporters/otlp/otlptrace`. (#5194)
-- Make the initial alpha release of `go.opentelemetry.io/otel/sdk/log`.
- This new module contains the Go implementation of the OpenTelemetry Logs SDK.
- This module is unstable and breaking changes may be introduced.
- See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240)
-- Make the initial alpha release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`.
- This new module contains an OTLP exporter that transmits log telemetry using HTTP.
- This module is unstable and breaking changes may be introduced.
- See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240)
-- Make the initial alpha release of `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`.
- This new module contains an exporter prints log records to STDOUT.
- This module is unstable and breaking changes may be introduced.
- See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240)
-- The `go.opentelemetry.io/otel/semconv/v1.25.0` package.
- The package contains semantic conventions from the `v1.25.0` version of the OpenTelemetry Semantic Conventions. (#5254)
-
-### Changed
-
-- Update `go.opentelemetry.io/proto/otlp` from v1.1.0 to v1.2.0. (#5177)
-- Improve performance of baggage member character validation in `go.opentelemetry.io/otel/baggage`. (#5214)
-- The `otel-collector` example now uses docker compose to bring up services instead of kubernetes. (#5244)
-
-### Fixed
-
-- Slice attribute values in `go.opentelemetry.io/otel/attribute` are now emitted as their JSON representation. (#5159)
-
-## [1.25.0/0.47.0/0.0.8/0.1.0-alpha] 2024-04-05
-
-### Added
-
-- Add `WithProxy` option in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4906)
-- Add `WithProxy` option in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp`. (#4906)
-- Add `AddLink` method to the `Span` interface in `go.opentelemetry.io/otel/trace`. (#5032)
-- The `Enabled` method is added to the `Logger` interface in `go.opentelemetry.io/otel/log`.
- This method is used to notify users if a log record will be emitted or not. (#5071)
-- Add `SeverityUndefined` `const` to `go.opentelemetry.io/otel/log`.
- This value represents an unset severity level. (#5072)
-- Add `Empty` function in `go.opentelemetry.io/otel/log` to return a `KeyValue` for an empty value. (#5076)
-- Add `go.opentelemetry.io/otel/log/global` to manage the global `LoggerProvider`.
- This package is provided with the anticipation that all functionality will be migrate to `go.opentelemetry.io/otel` when `go.opentelemetry.io/otel/log` stabilizes.
- At which point, users will be required to migrage their code, and this package will be deprecated then removed. (#5085)
-- Add support for `Summary` metrics in the `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` exporters. (#5100)
-- Add `otel.scope.name` and `otel.scope.version` tags to spans exported by `go.opentelemetry.io/otel/exporters/zipkin`. (#5108)
-- Add support for `AddLink` to `go.opentelemetry.io/otel/bridge/opencensus`. (#5116)
-- Add `String` method to `Value` and `KeyValue` in `go.opentelemetry.io/otel/log`. (#5117)
-- Add Exemplar support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5111)
-- Add metric semantic conventions to `go.opentelemetry.io/otel/semconv/v1.24.0`. Future `semconv` packages will include metric semantic conventions as well. (#4528)
-
-### Changed
-
-- `SpanFromContext` and `SpanContextFromContext` in `go.opentelemetry.io/otel/trace` no longer make a heap allocation when the passed context has no span. (#5049)
-- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now create a gRPC client in idle mode and with "dns" as the default resolver using [`grpc.NewClient`](https://pkg.go.dev/google.golang.org/grpc#NewClient). (#5151)
- Because of that `WithDialOption` ignores [`grpc.WithBlock`](https://pkg.go.dev/google.golang.org/grpc#WithBlock), [`grpc.WithTimeout`](https://pkg.go.dev/google.golang.org/grpc#WithTimeout), and [`grpc.WithReturnConnectionError`](https://pkg.go.dev/google.golang.org/grpc#WithReturnConnectionError).
- Notice that [`grpc.DialContext`](https://pkg.go.dev/google.golang.org/grpc#DialContext) which was used before is now deprecated.
-
-### Fixed
-
-- Clarify the documentation about equivalence guarantees for the `Set` and `Distinct` types in `go.opentelemetry.io/otel/attribute`. (#5027)
-- Prevent default `ErrorHandler` self-delegation. (#5137)
-- Update all dependencies to address [GO-2024-2687]. (#5139)
-
-### Removed
-
-- Drop support for [Go 1.20]. (#4967)
-
-### Deprecated
-
-- Deprecate `go.opentelemetry.io/otel/attribute.Sortable` type. (#4734)
-- Deprecate `go.opentelemetry.io/otel/attribute.NewSetWithSortable` function. (#4734)
-- Deprecate `go.opentelemetry.io/otel/attribute.NewSetWithSortableFiltered` function. (#4734)
-
-## [1.24.0/0.46.0/0.0.1-alpha] 2024-02-23
-
-This release is the last to support [Go 1.20].
-The next release will require at least [Go 1.21].
-
-### Added
-
-- Support [Go 1.22]. (#4890)
-- Add exemplar support to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4900)
-- Add exemplar support to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4900)
-- The `go.opentelemetry.io/otel/log` module is added.
- This module includes OpenTelemetry Go's implementation of the Logs Bridge API.
- This module is in an alpha state, it is subject to breaking changes.
- See our [versioning policy](./VERSIONING.md) for more info. (#4961)
-- Add ARM64 platform to the compatibility testing suite. (#4994)
-
-### Fixed
-
-- Fix registration of multiple callbacks when using the global meter provider from `go.opentelemetry.io/otel`. (#4945)
-- Fix negative buckets in output of exponential histograms. (#4956)
-
-## [1.23.1] 2024-02-07
-
-### Fixed
-
-- Register all callbacks passed during observable instrument creation instead of just the last one multiple times in `go.opentelemetry.io/otel/sdk/metric`. (#4888)
-
-## [1.23.0] 2024-02-06
-
-This release contains the first stable, `v1`, release of the following modules:
-
-- `go.opentelemetry.io/otel/bridge/opencensus`
-- `go.opentelemetry.io/otel/bridge/opencensus/test`
-- `go.opentelemetry.io/otel/example/opencensus`
-- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`
-- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`
-- `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric`
-
-See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
-
-### Added
-
-- Add `WithEndpointURL` option to the `exporters/otlp/otlpmetric/otlpmetricgrpc`, `exporters/otlp/otlpmetric/otlpmetrichttp`, `exporters/otlp/otlptrace/otlptracegrpc` and `exporters/otlp/otlptrace/otlptracehttp` packages. (#4808)
-- Experimental exemplar exporting is added to the metric SDK.
- See [metric documentation](./sdk/metric/internal/x/README.md#exemplars) for more information about this feature and how to enable it. (#4871)
-- `ErrSchemaURLConflict` is added to `go.opentelemetry.io/otel/sdk/resource`.
- This error is returned when a merge of two `Resource`s with different (non-empty) schema URL is attempted. (#4876)
-
-### Changed
-
-- The `Merge` and `New` functions in `go.opentelemetry.io/otel/sdk/resource` now returns a partial result if there is a schema URL merge conflict.
- Instead of returning `nil` when two `Resource`s with different (non-empty) schema URLs are merged the merged `Resource`, along with the new `ErrSchemaURLConflict` error, is returned.
- It is up to the user to decide if they want to use the returned `Resource` or not.
- It may have desired attributes overwritten or include stale semantic conventions. (#4876)
-
-### Fixed
-
-- Fix `ContainerID` resource detection on systemd when cgroup path has a colon. (#4449)
-- Fix `go.opentelemetry.io/otel/sdk/metric` to cache instruments to avoid leaking memory when the same instrument is created multiple times. (#4820)
-- Fix missing `Mix` and `Max` values for `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` by introducing `MarshalText` and `MarshalJSON` for the `Extrema` type in `go.opentelemetry.io/sdk/metric/metricdata`. (#4827)
-
-## [1.23.0-rc.1] 2024-01-18
-
-This is a release candidate for the v1.23.0 release.
-That release is expected to include the `v1` release of the following modules:
-
-- `go.opentelemetry.io/otel/bridge/opencensus`
-- `go.opentelemetry.io/otel/bridge/opencensus/test`
-- `go.opentelemetry.io/otel/example/opencensus`
-- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`
-- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`
-- `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric`
-
-See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
-
-## [1.22.0/0.45.0] 2024-01-17
-
-### Added
-
-- The `go.opentelemetry.io/otel/semconv/v1.22.0` package.
- The package contains semantic conventions from the `v1.22.0` version of the OpenTelemetry Semantic Conventions. (#4735)
-- The `go.opentelemetry.io/otel/semconv/v1.23.0` package.
- The package contains semantic conventions from the `v1.23.0` version of the OpenTelemetry Semantic Conventions. (#4746)
-- The `go.opentelemetry.io/otel/semconv/v1.23.1` package.
- The package contains semantic conventions from the `v1.23.1` version of the OpenTelemetry Semantic Conventions. (#4749)
-- The `go.opentelemetry.io/otel/semconv/v1.24.0` package.
- The package contains semantic conventions from the `v1.24.0` version of the OpenTelemetry Semantic Conventions. (#4770)
-- Add `WithResourceAsConstantLabels` option to apply resource attributes for every metric emitted by the Prometheus exporter. (#4733)
-- Experimental cardinality limiting is added to the metric SDK.
- See [metric documentation](./sdk/metric/internal/x/README.md#cardinality-limit) for more information about this feature and how to enable it. (#4457)
-- Add `NewMemberRaw` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage`. (#4804)
-
-### Changed
-
-- Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.24.0`. (#4754)
-- Update transformations in `go.opentelemetry.io/otel/exporters/zipkin` to follow `v1.24.0` version of the OpenTelemetry specification. (#4754)
-- Record synchronous measurements when the passed context is canceled instead of dropping in `go.opentelemetry.io/otel/sdk/metric`.
- If you do not want to make a measurement when the context is cancelled, you need to handle it yourself (e.g `if ctx.Err() != nil`). (#4671)
-- Improve `go.opentelemetry.io/otel/trace.TraceState`'s performance. (#4722)
-- Improve `go.opentelemetry.io/otel/propagation.TraceContext`'s performance. (#4721)
-- Improve `go.opentelemetry.io/otel/baggage` performance. (#4743)
-- Improve performance of the `(*Set).Filter` method in `go.opentelemetry.io/otel/attribute` when the passed filter does not filter out any attributes from the set. (#4774)
-- `Member.String` in `go.opentelemetry.io/otel/baggage` percent-encodes only when necessary. (#4775)
-- Improve `go.opentelemetry.io/otel/trace.Span`'s performance when adding multiple attributes. (#4818)
-- `Property.Value` in `go.opentelemetry.io/otel/baggage` now returns a raw string instead of a percent-encoded value. (#4804)
-
-### Fixed
-
-- Fix `Parse` in `go.opentelemetry.io/otel/baggage` to validate member value before percent-decoding. (#4755)
-- Fix whitespace encoding of `Member.String` in `go.opentelemetry.io/otel/baggage`. (#4756)
-- Fix observable not registered error when the asynchronous instrument has a drop aggregation in `go.opentelemetry.io/otel/sdk/metric`. (#4772)
-- Fix baggage item key so that it is not canonicalized in `go.opentelemetry.io/otel/bridge/opentracing`. (#4776)
-- Fix `go.opentelemetry.io/otel/bridge/opentracing` to properly handle baggage values that requires escaping during propagation. (#4804)
-- Fix a bug where using multiple readers resulted in incorrect asynchronous counter values in `go.opentelemetry.io/otel/sdk/metric`. (#4742)
-
-## [1.21.0/0.44.0] 2023-11-16
-
-### Removed
-
-- Remove the deprecated `go.opentelemetry.io/otel/bridge/opencensus.NewTracer`. (#4706)
-- Remove the deprecated `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` module. (#4707)
-- Remove the deprecated `go.opentelemetry.io/otel/example/view` module. (#4708)
-- Remove the deprecated `go.opentelemetry.io/otel/example/fib` module. (#4723)
-
-### Fixed
-
-- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4719)
-- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4719)
-
-## [1.20.0/0.43.0] 2023-11-10
-
-This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementers need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this.
-
-### Added
-
-- Add `go.opentelemetry.io/otel/bridge/opencensus.InstallTraceBridge`, which installs the OpenCensus trace bridge, and replaces `opencensus.NewTracer`. (#4567)
-- Add scope version to trace and metric bridges in `go.opentelemetry.io/otel/bridge/opencensus`. (#4584)
-- Add the `go.opentelemetry.io/otel/trace/embedded` package to be embedded in the exported trace API interfaces. (#4620)
-- Add the `go.opentelemetry.io/otel/trace/noop` package as a default no-op implementation of the trace API. (#4620)
-- Add context propagation in `go.opentelemetry.io/otel/example/dice`. (#4644)
-- Add view configuration to `go.opentelemetry.io/otel/example/prometheus`. (#4649)
-- Add `go.opentelemetry.io/otel/metric.WithExplicitBucketBoundaries`, which allows defining default explicit bucket boundaries when creating histogram instruments. (#4603)
-- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4660)
-- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4660)
-- Add Summary, SummaryDataPoint, and QuantileValue to `go.opentelemetry.io/sdk/metric/metricdata`. (#4622)
-- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` now supports exemplars from OpenCensus. (#4585)
-- Add support for `WithExplicitBucketBoundaries` in `go.opentelemetry.io/otel/sdk/metric`. (#4605)
-- Add support for Summary metrics in `go.opentelemetry.io/otel/bridge/opencensus`. (#4668)
-
-### Deprecated
-
-- Deprecate `go.opentelemetry.io/otel/bridge/opencensus.NewTracer` in favor of `opencensus.InstallTraceBridge`. (#4567)
-- Deprecate `go.opentelemetry.io/otel/example/fib` package is in favor of `go.opentelemetry.io/otel/example/dice`. (#4618)
-- Deprecate `go.opentelemetry.io/otel/trace.NewNoopTracerProvider`.
- Use the added `NewTracerProvider` function in `go.opentelemetry.io/otel/trace/noop` instead. (#4620)
-- Deprecate `go.opentelemetry.io/otel/example/view` package in favor of `go.opentelemetry.io/otel/example/prometheus`. (#4649)
-- Deprecate `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4693)
-
-### Changed
-
-- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` returns a `*MetricProducer` struct instead of the metric.Producer interface. (#4583)
-- The `TracerProvider` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.TracerProvider` type.
- This extends the `TracerProvider` interface and is is a breaking change for any existing implementation.
- Implementers need to update their implementations based on what they want the default behavior of the interface to be.
- See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620)
-- The `Tracer` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Tracer` type.
- This extends the `Tracer` interface and is is a breaking change for any existing implementation.
- Implementers need to update their implementations based on what they want the default behavior of the interface to be.
- See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620)
-- The `Span` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Span` type.
- This extends the `Span` interface and is is a breaking change for any existing implementation.
- Implementers need to update their implementations based on what they want the default behavior of the interface to be.
- See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620)
-- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660)
-- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660)
-- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4670)
-- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4670)
-- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4669)
-- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4669)
-- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4679)
-- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4679)
-
-### Fixed
-
-- Fix improper parsing of characters such us `+`, `/` by `Parse` in `go.opentelemetry.io/otel/baggage` as they were rendered as a whitespace. (#4667)
-- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_RESOURCE_ATTRIBUTES` in `go.opentelemetry.io/otel/sdk/resource` as they were rendered as a whitespace. (#4699)
-- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` as they were rendered as a whitespace. (#4699)
-- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` as they were rendered as a whitespace. (#4699)
-- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracegrpc` as they were rendered as a whitespace. (#4699)
-- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp` as they were rendered as a whitespace. (#4699)
-- In `go.opentelemetry.op/otel/exporters/prometheus`, the exporter no longer `Collect`s metrics after `Shutdown` is invoked. (#4648)
-- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4695)
-- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4695)
-
-## [1.19.0/0.42.0/0.0.7] 2023-09-28
-
-This release contains the first stable release of the OpenTelemetry Go [metric SDK].
-Our project stability guarantees now apply to the `go.opentelemetry.io/otel/sdk/metric` package.
-See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
-
-### Added
-
-- Add the "Roll the dice" getting started application example in `go.opentelemetry.io/otel/example/dice`. (#4539)
-- The `WithWriter` and `WithPrettyPrint` options to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to set a custom `io.Writer`, and allow displaying the output in human-readable JSON. (#4507)
-
-### Changed
-
-- Allow '/' characters in metric instrument names. (#4501)
-- The exporter in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` does not prettify its output by default anymore. (#4507)
-- Upgrade `gopkg.io/yaml` from `v2` to `v3` in `go.opentelemetry.io/otel/schema`. (#4535)
-
-### Fixed
-
-- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the Prometheus metric on every `Collect` if we know the scope is invalid. (#4499)
-
-### Removed
-
-- Remove `"go.opentelemetry.io/otel/bridge/opencensus".NewMetricExporter`, which is replaced by `NewMetricProducer`. (#4566)
-
-## [1.19.0-rc.1/0.42.0-rc.1] 2023-09-14
-
-This is a release candidate for the v1.19.0/v0.42.0 release.
-That release is expected to include the `v1` release of the OpenTelemetry Go metric SDK and will provide stability guarantees of that SDK.
-See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
-
-### Changed
-
-- Allow '/' characters in metric instrument names. (#4501)
-
-### Fixed
-
-- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the prometheus metric on every `Collect` if we know the scope is invalid. (#4499)
-
-## [1.18.0/0.41.0/0.0.6] 2023-09-12
-
-This release drops the compatibility guarantee of [Go 1.19].
-
-### Added
-
-- Add `WithProducer` option in `go.opentelemetry.op/otel/exporters/prometheus` to restore the ability to register producers on the prometheus exporter's manual reader. (#4473)
-- Add `IgnoreValue` option in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest` to allow ignoring values when comparing metrics. (#4447)
-
-### Changed
-
-- Use a `TestingT` interface instead of `*testing.T` struct in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest`. (#4483)
-
-### Deprecated
-
-- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` was deprecated in `v0.35.0` (#3541).
- The deprecation notice format for the function has been corrected to trigger Go documentation and build tooling. (#4470)
-
-### Removed
-
-- Removed the deprecated `go.opentelemetry.io/otel/exporters/jaeger` package. (#4467)
-- Removed the deprecated `go.opentelemetry.io/otel/example/jaeger` package. (#4467)
-- Removed the deprecated `go.opentelemetry.io/otel/sdk/metric/aggregation` package. (#4468)
-- Removed the deprecated internal packages in `go.opentelemetry.io/otel/exporters/otlp` and its sub-packages. (#4469)
-- Dropped guaranteed support for versions of Go less than 1.20. (#4481)
-
-## [1.17.0/0.40.0/0.0.5] 2023-08-28
-
-### Added
-
-- Export the `ManualReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244)
-- Export the `PeriodicReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244)
-- Add support for exponential histogram aggregations.
- A histogram can be configured as an exponential histogram using a view with `"go.opentelemetry.io/otel/sdk/metric".ExponentialHistogram` as the aggregation. (#4245)
-- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4272)
-- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4272)
-- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE` environment variable. (#4287)
-- Add `WithoutCounterSuffixes` option in `go.opentelemetry.io/otel/exporters/prometheus` to disable addition of `_total` suffixes. (#4306)
-- Add info and debug logging to the metric SDK in `go.opentelemetry.io/otel/sdk/metric`. (#4315)
-- The `go.opentelemetry.io/otel/semconv/v1.21.0` package.
- The package contains semantic conventions from the `v1.21.0` version of the OpenTelemetry Semantic Conventions. (#4362)
-- Accept 201 to 299 HTTP status as success in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4365)
-- Document the `Temporality` and `Aggregation` methods of the `"go.opentelemetry.io/otel/sdk/metric".Exporter"` need to be concurrent safe. (#4381)
-- Expand the set of units supported by the Prometheus exporter, and don't add unit suffixes if they are already present in `go.opentelemetry.op/otel/exporters/prometheus` (#4374)
-- Move the `Aggregation` interface and its implementations from `go.opentelemetry.io/otel/sdk/metric/aggregation` to `go.opentelemetry.io/otel/sdk/metric`. (#4435)
-- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION` environment variable. (#4437)
-- Add the `NewAllowKeysFilter` and `NewDenyKeysFilter` functions to `go.opentelemetry.io/otel/attribute` to allow convenient creation of allow-keys and deny-keys filters. (#4444)
-- Support Go 1.21. (#4463)
-
-### Changed
-
-- Starting from `v1.21.0` of semantic conventions, `go.opentelemetry.io/otel/semconv/{version}/httpconv` and `go.opentelemetry.io/otel/semconv/{version}/netconv` packages will no longer be published. (#4145)
-- Log duplicate instrument conflict at a warning level instead of info in `go.opentelemetry.io/otel/sdk/metric`. (#4202)
-- Return an error on the creation of new instruments in `go.opentelemetry.io/otel/sdk/metric` if their name doesn't pass regexp validation. (#4210)
-- `NewManualReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*ManualReader` instead of `Reader`. (#4244)
-- `NewPeriodicReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*PeriodicReader` instead of `Reader`. (#4244)
-- Count the Collect time in the `PeriodicReader` timeout in `go.opentelemetry.io/otel/sdk/metric`. (#4221)
-- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272)
-- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272)
-- If an attribute set is omitted from an async callback, the previous value will no longer be exported in `go.opentelemetry.io/otel/sdk/metric`. (#4290)
-- If an attribute set is observed multiple times in an async callback in `go.opentelemetry.io/otel/sdk/metric`, the values will be summed instead of the last observation winning. (#4289)
-- Allow the explicit bucket histogram aggregation to be used for the up-down counter, observable counter, observable up-down counter, and observable gauge in the `go.opentelemetry.io/otel/sdk/metric` package. (#4332)
-- Restrict `Meter`s in `go.opentelemetry.io/otel/sdk/metric` to only register and collect instruments it created. (#4333)
-- `PeriodicReader.Shutdown` and `PeriodicReader.ForceFlush` in `go.opentelemetry.io/otel/sdk/metric` now apply the periodic reader's timeout to the operation if the user provided context does not contain a deadline. (#4356, #4377)
-- Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.21.0`. (#4408)
-- Increase instrument name maximum length from 63 to 255 characters in `go.opentelemetry.io/otel/sdk/metric`. (#4434)
-- Add `go.opentelemetry.op/otel/sdk/metric.WithProducer` as an `Option` for `"go.opentelemetry.io/otel/sdk/metric".NewManualReader` and `"go.opentelemetry.io/otel/sdk/metric".NewPeriodicReader`. (#4346)
-
-### Removed
-
-- Remove `Reader.RegisterProducer` in `go.opentelemetry.io/otel/metric`.
- Use the added `WithProducer` option instead. (#4346)
-- Remove `Reader.ForceFlush` in `go.opentelemetry.io/otel/metric`.
- Notice that `PeriodicReader.ForceFlush` is still available. (#4375)
-
-### Fixed
-
-- Correctly format log messages from the `go.opentelemetry.io/otel/exporters/zipkin` exporter. (#4143)
-- Log an error for calls to `NewView` in `go.opentelemetry.io/otel/sdk/metric` that have empty criteria. (#4307)
-- Fix `"go.opentelemetry.io/otel/sdk/resource".WithHostID()` to not set an empty `host.id`. (#4317)
-- Use the instrument identifying fields to cache aggregators and determine duplicate instrument registrations in `go.opentelemetry.io/otel/sdk/metric`. (#4337)
-- Detect duplicate instruments for case-insensitive names in `go.opentelemetry.io/otel/sdk/metric`. (#4338)
-- The `ManualReader` will not panic if `AggregationSelector` returns `nil` in `go.opentelemetry.io/otel/sdk/metric`. (#4350)
-- If a `Reader`'s `AggregationSelector` returns `nil` or `DefaultAggregation` the pipeline will use the default aggregation. (#4350)
-- Log a suggested view that fixes instrument conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4349)
-- Fix possible panic, deadlock and race condition in batch span processor in `go.opentelemetry.io/otel/sdk/trace`. (#4353)
-- Improve context cancellation handling in batch span processor's `ForceFlush` in `go.opentelemetry.io/otel/sdk/trace`. (#4369)
-- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` using gotmpl. (#4397, #3846)
-- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4404, #3846)
-- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4407, #3846)
-- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4400, #3846)
-- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4401, #3846)
-- Do not block the metric SDK when OTLP metric exports are blocked in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#3925, #4395)
-- Do not append `_total` if the counter already has that suffix for the Prometheus exproter in `go.opentelemetry.io/otel/exporter/prometheus`. (#4373)
-- Fix resource detection data race in `go.opentelemetry.io/otel/sdk/resource`. (#4409)
-- Use the first-seen instrument name during instrument name conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4428)
-
-### Deprecated
-
-- The `go.opentelemetry.io/otel/exporters/jaeger` package is deprecated.
- OpenTelemetry dropped support for Jaeger exporter in July 2023.
- Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`
- or `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` instead. (#4423)
-- The `go.opentelemetry.io/otel/example/jaeger` package is deprecated. (#4423)
-- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` package is deprecated. (#4420)
-- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf` package is deprecated. (#4420)
-- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otest` package is deprecated. (#4420)
-- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform` package is deprecated. (#4420)
-- The `go.opentelemetry.io/otel/exporters/otlp/internal` package is deprecated. (#4421)
-- The `go.opentelemetry.io/otel/exporters/otlp/internal/envconfig` package is deprecated. (#4421)
-- The `go.opentelemetry.io/otel/exporters/otlp/internal/retry` package is deprecated. (#4421)
-- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` package is deprecated. (#4425)
-- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/envconfig` package is deprecated. (#4425)
-- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig` package is deprecated. (#4425)
-- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlptracetest` package is deprecated. (#4425)
-- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/retry` package is deprecated. (#4425)
-- The `go.opentelemetry.io/otel/sdk/metric/aggregation` package is deprecated.
- Use the aggregation types added to `go.opentelemetry.io/otel/sdk/metric` instead. (#4435)
-
-## [1.16.0/0.39.0] 2023-05-18
-
-This release contains the first stable release of the OpenTelemetry Go [metric API].
-Our project stability guarantees now apply to the `go.opentelemetry.io/otel/metric` package.
-See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
-
-### Added
-
-- The `go.opentelemetry.io/otel/semconv/v1.19.0` package.
- The package contains semantic conventions from the `v1.19.0` version of the OpenTelemetry specification. (#3848)
-- The `go.opentelemetry.io/otel/semconv/v1.20.0` package.
- The package contains semantic conventions from the `v1.20.0` version of the OpenTelemetry specification. (#4078)
-- The Exponential Histogram data types in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#4165)
-- OTLP metrics exporter now supports the Exponential Histogram Data Type. (#4222)
-- Fix serialization of `time.Time` zero values in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` packages. (#4271)
-
-### Changed
-
-- Use `strings.Cut()` instead of `string.SplitN()` for better readability and memory use. (#4049)
-- `MeterProvider` returns noop meters once it has been shutdown. (#4154)
-
-### Removed
-
-- The deprecated `go.opentelemetry.io/otel/metric/instrument` package is removed.
- Use `go.opentelemetry.io/otel/metric` instead. (#4055)
-
-### Fixed
-
-- Fix build for BSD based systems in `go.opentelemetry.io/otel/sdk/resource`. (#4077)
-
-## [1.16.0-rc.1/0.39.0-rc.1] 2023-05-03
-
-This is a release candidate for the v1.16.0/v0.39.0 release.
-That release is expected to include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API.
-See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
-
-### Added
-
-- Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#4039)
- - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`.
- - Use `GetMeterProivder` for a global `metric.MeterProvider`.
- - Use `SetMeterProivder` to set the global `metric.MeterProvider`.
-
-### Changed
-
-- Move the `go.opentelemetry.io/otel/metric` module to the `stable-v1` module set.
- This stages the metric API to be released as a stable module. (#4038)
-
-### Removed
-
-- The `go.opentelemetry.io/otel/metric/global` package is removed.
- Use `go.opentelemetry.io/otel` instead. (#4039)
-
-## [1.15.1/0.38.1] 2023-05-02
-
-### Fixed
-
-- Remove unused imports from `sdk/resource/host_id_bsd.go` which caused build failures. (#4040, #4041)
-
-## [1.15.0/0.38.0] 2023-04-27
-
-### Added
-
-- The `go.opentelemetry.io/otel/metric/embedded` package. (#3916)
-- The `Version` function to `go.opentelemetry.io/otel/sdk` to return the SDK version. (#3949)
-- Add a `WithNamespace` option to `go.opentelemetry.io/otel/exporters/prometheus` to allow users to prefix metrics with a namespace. (#3970)
-- The following configuration types were added to `go.opentelemetry.io/otel/metric/instrument` to be used in the configuration of measurement methods. (#3971)
- - The `AddConfig` used to hold configuration for addition measurements
- - `NewAddConfig` used to create a new `AddConfig`
- - `AddOption` used to configure an `AddConfig`
- - The `RecordConfig` used to hold configuration for recorded measurements
- - `NewRecordConfig` used to create a new `RecordConfig`
- - `RecordOption` used to configure a `RecordConfig`
- - The `ObserveConfig` used to hold configuration for observed measurements
- - `NewObserveConfig` used to create a new `ObserveConfig`
- - `ObserveOption` used to configure an `ObserveConfig`
-- `WithAttributeSet` and `WithAttributes` are added to `go.opentelemetry.io/otel/metric/instrument`.
- They return an option used during a measurement that defines the attribute Set associated with the measurement. (#3971)
-- The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` to return the OTLP metrics client version. (#3956)
-- The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlptrace` to return the OTLP trace client version. (#3956)
-
-### Changed
-
-- The `Extrema` in `go.opentelemetry.io/otel/sdk/metric/metricdata` is redefined with a generic argument of `[N int64 | float64]`. (#3870)
-- Update all exported interfaces from `go.opentelemetry.io/otel/metric` to embed their corresponding interface from `go.opentelemetry.io/otel/metric/embedded`.
- This adds an implementation requirement to set the interface default behavior for unimplemented methods. (#3916)
-- Move No-Op implementation from `go.opentelemetry.io/otel/metric` into its own package `go.opentelemetry.io/otel/metric/noop`. (#3941)
- - `metric.NewNoopMeterProvider` is replaced with `noop.NewMeterProvider`
-- Add all the methods from `"go.opentelemetry.io/otel/trace".SpanContext` to `bridgeSpanContext` by embedding `otel.SpanContext` in `bridgeSpanContext`. (#3966)
-- Wrap `UploadMetrics` error in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/` to improve error message when encountering generic grpc errors. (#3974)
-- The measurement methods for all instruments in `go.opentelemetry.io/otel/metric/instrument` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971)
- - The `Int64Counter.Add` method now accepts `...AddOption`
- - The `Float64Counter.Add` method now accepts `...AddOption`
- - The `Int64UpDownCounter.Add` method now accepts `...AddOption`
- - The `Float64UpDownCounter.Add` method now accepts `...AddOption`
- - The `Int64Histogram.Record` method now accepts `...RecordOption`
- - The `Float64Histogram.Record` method now accepts `...RecordOption`
- - The `Int64Observer.Observe` method now accepts `...ObserveOption`
- - The `Float64Observer.Observe` method now accepts `...ObserveOption`
-- The `Observer` methods in `go.opentelemetry.io/otel/metric` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971)
- - The `Observer.ObserveInt64` method now accepts `...ObserveOption`
- - The `Observer.ObserveFloat64` method now accepts `...ObserveOption`
-- Move global metric back to `go.opentelemetry.io/otel/metric/global` from `go.opentelemetry.io/otel`. (#3986)
-
-### Fixed
-
-- `TracerProvider` allows calling `Tracer()` while it's shutting down.
- It used to deadlock. (#3924)
-- Use the SDK version for the Telemetry SDK resource detector in `go.opentelemetry.io/otel/sdk/resource`. (#3949)
-- Fix a data race in `SpanProcessor` returned by `NewSimpleSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace`. (#3951)
-- Automatically figure out the default aggregation with `aggregation.Default`. (#3967)
-
-### Deprecated
-
-- The `go.opentelemetry.io/otel/metric/instrument` package is deprecated.
- Use the equivalent types added to `go.opentelemetry.io/otel/metric` instead. (#4018)
-
-## [1.15.0-rc.2/0.38.0-rc.2] 2023-03-23
-
-This is a release candidate for the v1.15.0/v0.38.0 release.
-That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API.
-See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
-
-### Added
-
-- The `WithHostID` option to `go.opentelemetry.io/otel/sdk/resource`. (#3812)
-- The `WithoutTimestamps` option to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to sets all timestamps to zero. (#3828)
-- The new `Exemplar` type is added to `go.opentelemetry.io/otel/sdk/metric/metricdata`.
- Both the `DataPoint` and `HistogramDataPoint` types from that package have a new field of `Exemplars` containing the sampled exemplars for their timeseries. (#3849)
-- Configuration for each metric instrument in `go.opentelemetry.io/otel/sdk/metric/instrument`. (#3895)
-- The internal logging introduces a warning level verbosity equal to `V(1)`. (#3900)
-- Added a log message warning about usage of `SimpleSpanProcessor` in production environments. (#3854)
-
-### Changed
-
-- Optimize memory allocation when creation a new `Set` using `NewSet` or `NewSetWithFiltered` in `go.opentelemetry.io/otel/attribute`. (#3832)
-- Optimize memory allocation when creation new metric instruments in `go.opentelemetry.io/otel/sdk/metric`. (#3832)
-- Avoid creating new objects on all calls to `WithDeferredSetup` and `SkipContextSetup` in OpenTracing bridge. (#3833)
-- The `New` and `Detect` functions from `go.opentelemetry.io/otel/sdk/resource` return errors that wrap underlying errors instead of just containing the underlying error strings. (#3844)
-- Both the `Histogram` and `HistogramDataPoint` are redefined with a generic argument of `[N int64 | float64]` in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#3849)
-- The metric `Export` interface from `go.opentelemetry.io/otel/sdk/metric` accepts a `*ResourceMetrics` instead of `ResourceMetrics`. (#3853)
-- Rename `Asynchronous` to `Observable` in `go.opentelemetry.io/otel/metric/instrument`. (#3892)
-- Rename `Int64ObserverOption` to `Int64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895)
-- Rename `Float64ObserverOption` to `Float64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895)
-- The internal logging changes the verbosity level of info to `V(4)`, the verbosity level of debug to `V(8)`. (#3900)
-
-### Fixed
-
-- `TracerProvider` consistently doesn't allow to register a `SpanProcessor` after shutdown. (#3845)
-
-### Removed
-
-- The deprecated `go.opentelemetry.io/otel/metric/global` package is removed. (#3829)
-- The unneeded `Synchronous` interface in `go.opentelemetry.io/otel/metric/instrument` was removed. (#3892)
-- The `Float64ObserverConfig` and `NewFloat64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`.
- Use the added `float64` instrument configuration instead. (#3895)
-- The `Int64ObserverConfig` and `NewInt64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`.
- Use the added `int64` instrument configuration instead. (#3895)
-- The `NewNoopMeter` function in `go.opentelemetry.io/otel/metric`, use `NewMeterProvider().Meter("")` instead. (#3893)
-
-## [1.15.0-rc.1/0.38.0-rc.1] 2023-03-01
-
-This is a release candidate for the v1.15.0/v0.38.0 release.
-That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API.
-See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
-
-This release drops the compatibility guarantee of [Go 1.18].
-
-### Added
-
-- Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#3818)
- - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`.
- - Use `GetMeterProivder` for a global `metric.MeterProvider`.
- - Use `SetMeterProivder` to set the global `metric.MeterProvider`.
-
-### Changed
-
-- Dropped compatibility testing for [Go 1.18].
- The project no longer guarantees support for this version of Go. (#3813)
-
-### Fixed
-
-- Handle empty environment variable as it they were not set. (#3764)
-- Clarify the `httpconv` and `netconv` packages in `go.opentelemetry.io/otel/semconv/*` provide tracing semantic conventions. (#3823)
-- Fix race conditions in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic. (#3899)
-- Fix sending nil `scopeInfo` to metrics channel in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic in `github.com/prometheus/client_golang/prometheus`. (#3899)
-
-### Deprecated
-
-- The `go.opentelemetry.io/otel/metric/global` package is deprecated.
- Use `go.opentelemetry.io/otel` instead. (#3818)
-
-### Removed
-
-- The deprecated `go.opentelemetry.io/otel/metric/unit` package is removed. (#3814)
-
-## [1.14.0/0.37.0/0.0.4] 2023-02-27
-
-This release is the last to support [Go 1.18].
-The next release will require at least [Go 1.19].
-
-### Added
-
-- The `event` type semantic conventions are added to `go.opentelemetry.io/otel/semconv/v1.17.0`. (#3697)
-- Support [Go 1.20]. (#3693)
-- The `go.opentelemetry.io/otel/semconv/v1.18.0` package.
- The package contains semantic conventions from the `v1.18.0` version of the OpenTelemetry specification. (#3719)
- - The following `const` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included:
- - `OtelScopeNameKey` -> `OTelScopeNameKey`
- - `OtelScopeVersionKey` -> `OTelScopeVersionKey`
- - `OtelLibraryNameKey` -> `OTelLibraryNameKey`
- - `OtelLibraryVersionKey` -> `OTelLibraryVersionKey`
- - `OtelStatusCodeKey` -> `OTelStatusCodeKey`
- - `OtelStatusDescriptionKey` -> `OTelStatusDescriptionKey`
- - `OtelStatusCodeOk` -> `OTelStatusCodeOk`
- - `OtelStatusCodeError` -> `OTelStatusCodeError`
- - The following `func` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included:
- - `OtelScopeName` -> `OTelScopeName`
- - `OtelScopeVersion` -> `OTelScopeVersion`
- - `OtelLibraryName` -> `OTelLibraryName`
- - `OtelLibraryVersion` -> `OTelLibraryVersion`
- - `OtelStatusDescription` -> `OTelStatusDescription`
-- A `IsSampled` method is added to the `SpanContext` implementation in `go.opentelemetry.io/otel/bridge/opentracing` to expose the span sampled state.
- See the [README](./bridge/opentracing/README.md) for more information. (#3570)
-- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/metric`. (#3738)
-- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/trace`. (#3739)
-- The following environment variables are supported by the periodic `Reader` in `go.opentelemetry.io/otel/sdk/metric`. (#3763)
- - `OTEL_METRIC_EXPORT_INTERVAL` sets the time between collections and exports.
- - `OTEL_METRIC_EXPORT_TIMEOUT` sets the timeout an export is attempted.
-
-### Changed
-
-- Fall-back to `TextMapCarrier` when it's not `HttpHeader`s in `go.opentelemetry.io/otel/bridge/opentracing`. (#3679)
-- The `Collect` method of the `"go.opentelemetry.io/otel/sdk/metric".Reader` interface is updated to accept the `metricdata.ResourceMetrics` value the collection will be made into.
- This change is made to enable memory reuse by SDK users. (#3732)
-- The `WithUnit` option in `go.opentelemetry.io/otel/sdk/metric/instrument` is updated to accept a `string` for the unit value. (#3776)
-
-### Fixed
-
-- Ensure `go.opentelemetry.io/otel` does not use generics. (#3723, #3725)
-- Multi-reader `MeterProvider`s now export metrics for all readers, instead of just the first reader. (#3720, #3724)
-- Remove use of deprecated `"math/rand".Seed` in `go.opentelemetry.io/otel/example/prometheus`. (#3733)
-- Do not silently drop unknown schema data with `Parse` in `go.opentelemetry.io/otel/schema/v1.1`. (#3743)
-- Data race issue in OTLP exporter retry mechanism. (#3755, #3756)
-- Wrapping empty errors when exporting in `go.opentelemetry.io/otel/sdk/metric`. (#3698, #3772)
-- Incorrect "all" and "resource" definition for schema files in `go.opentelemetry.io/otel/schema/v1.1`. (#3777)
-
-### Deprecated
-
-- The `go.opentelemetry.io/otel/metric/unit` package is deprecated.
- Use the equivalent unit string instead. (#3776)
- - Use `"1"` instead of `unit.Dimensionless`
- - Use `"By"` instead of `unit.Bytes`
- - Use `"ms"` instead of `unit.Milliseconds`
-
-## [1.13.0/0.36.0] 2023-02-07
-
-### Added
-
-- Attribute `KeyValue` creations functions to `go.opentelemetry.io/otel/semconv/v1.17.0` for all non-enum semantic conventions.
- These functions ensure semantic convention type correctness. (#3675)
-
-### Fixed
-
-- Removed the `http.target` attribute from being added by `ServerRequest` in the following packages. (#3687)
- - `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`
- - `go.opentelemetry.io/otel/semconv/v1.14.0/httpconv`
- - `go.opentelemetry.io/otel/semconv/v1.15.0/httpconv`
- - `go.opentelemetry.io/otel/semconv/v1.16.0/httpconv`
- - `go.opentelemetry.io/otel/semconv/v1.17.0/httpconv`
-
-### Removed
-
-- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is removed. (#3631)
-- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is removed. (#3631)
-- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is removed. (#3631)
-- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncint64` package is removed. (#3631)
-
-## [1.12.0/0.35.0] 2023-01-28
-
-### Added
-
-- The `WithInt64Callback` option to `go.opentelemetry.io/otel/metric/instrument`.
- This options is used to configure `int64` Observer callbacks during their creation. (#3507)
-- The `WithFloat64Callback` option to `go.opentelemetry.io/otel/metric/instrument`.
- This options is used to configure `float64` Observer callbacks during their creation. (#3507)
-- The `Producer` interface and `Reader.RegisterProducer(Producer)` to `go.opentelemetry.io/otel/sdk/metric`.
- These additions are used to enable external metric Producers. (#3524)
-- The `Callback` function type to `go.opentelemetry.io/otel/metric`.
- This new named function type is registered with a `Meter`. (#3564)
-- The `go.opentelemetry.io/otel/semconv/v1.13.0` package.
- The package contains semantic conventions from the `v1.13.0` version of the OpenTelemetry specification. (#3499)
- - The `EndUserAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientRequest` and `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
- - The `HTTPAttributesFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientResponse` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
- - The `HTTPClientAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
- - The `HTTPServerAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
- - The `HTTPServerMetricAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
- - The `NetAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `Transport` in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` and `ClientRequest` or `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
- - The `SpanStatusFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
- - The `SpanStatusFromHTTPStatusCodeAndSpanKind` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `ClientStatus` and `ServerStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
- - The `Client` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Conn`.
- - The `Server` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Listener`.
-- The `go.opentelemetry.io/otel/semconv/v1.14.0` package.
- The package contains semantic conventions from the `v1.14.0` version of the OpenTelemetry specification. (#3566)
-- The `go.opentelemetry.io/otel/semconv/v1.15.0` package.
- The package contains semantic conventions from the `v1.15.0` version of the OpenTelemetry specification. (#3578)
-- The `go.opentelemetry.io/otel/semconv/v1.16.0` package.
- The package contains semantic conventions from the `v1.16.0` version of the OpenTelemetry specification. (#3579)
-- Metric instruments to `go.opentelemetry.io/otel/metric/instrument`.
- These instruments are use as replacements of the deprecated `go.opentelemetry.io/otel/metric/instrument/{asyncfloat64,asyncint64,syncfloat64,syncint64}` packages.(#3575, #3586)
- - `Float64ObservableCounter` replaces the `asyncfloat64.Counter`
- - `Float64ObservableUpDownCounter` replaces the `asyncfloat64.UpDownCounter`
- - `Float64ObservableGauge` replaces the `asyncfloat64.Gauge`
- - `Int64ObservableCounter` replaces the `asyncint64.Counter`
- - `Int64ObservableUpDownCounter` replaces the `asyncint64.UpDownCounter`
- - `Int64ObservableGauge` replaces the `asyncint64.Gauge`
- - `Float64Counter` replaces the `syncfloat64.Counter`
- - `Float64UpDownCounter` replaces the `syncfloat64.UpDownCounter`
- - `Float64Histogram` replaces the `syncfloat64.Histogram`
- - `Int64Counter` replaces the `syncint64.Counter`
- - `Int64UpDownCounter` replaces the `syncint64.UpDownCounter`
- - `Int64Histogram` replaces the `syncint64.Histogram`
-- `NewTracerProvider` to `go.opentelemetry.io/otel/bridge/opentracing`.
- This is used to create `WrapperTracer` instances from a `TracerProvider`. (#3116)
-- The `Extrema` type to `go.opentelemetry.io/otel/sdk/metric/metricdata`.
- This type is used to represent min/max values and still be able to distinguish unset and zero values. (#3487)
-- The `go.opentelemetry.io/otel/semconv/v1.17.0` package.
- The package contains semantic conventions from the `v1.17.0` version of the OpenTelemetry specification. (#3599)
-
-### Changed
-
-- Jaeger and Zipkin exporter use `github.com/go-logr/logr` as the logging interface, and add the `WithLogr` option. (#3497, #3500)
-- Instrument configuration in `go.opentelemetry.io/otel/metric/instrument` is split into specific options and configuration based on the instrument type. (#3507)
- - Use the added `Int64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncint64`.
- - Use the added `Float64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncfloat64`.
- - Use the added `Int64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncint64`.
- - Use the added `Float64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncfloat64`.
-- Return a `Registration` from the `RegisterCallback` method of a `Meter` in the `go.opentelemetry.io/otel/metric` package.
- This `Registration` can be used to unregister callbacks. (#3522)
-- Global error handler uses an atomic value instead of a mutex. (#3543)
-- Add `NewMetricProducer` to `go.opentelemetry.io/otel/bridge/opencensus`, which can be used to pass OpenCensus metrics to an OpenTelemetry Reader. (#3541)
-- Global logger uses an atomic value instead of a mutex. (#3545)
-- The `Shutdown` method of the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` releases all computational resources when called the first time. (#3551)
-- The `Sampler` returned from `TraceIDRatioBased` `go.opentelemetry.io/otel/sdk/trace` now uses the rightmost bits for sampling decisions.
- This fixes random sampling when using ID generators like `xray.IDGenerator` and increasing parity with other language implementations. (#3557)
-- Errors from `go.opentelemetry.io/otel/exporters/otlp/otlptrace` exporters are wrapped in errors identifying their signal name.
- Existing users of the exporters attempting to identify specific errors will need to use `errors.Unwrap()` to get the underlying error. (#3516)
-- Exporters from `go.opentelemetry.io/otel/exporters/otlp` will print the final retryable error message when attempts to retry time out. (#3514)
-- The instrument kind names in `go.opentelemetry.io/otel/sdk/metric` are updated to match the API. (#3562)
- - `InstrumentKindSyncCounter` is renamed to `InstrumentKindCounter`
- - `InstrumentKindSyncUpDownCounter` is renamed to `InstrumentKindUpDownCounter`
- - `InstrumentKindSyncHistogram` is renamed to `InstrumentKindHistogram`
- - `InstrumentKindAsyncCounter` is renamed to `InstrumentKindObservableCounter`
- - `InstrumentKindAsyncUpDownCounter` is renamed to `InstrumentKindObservableUpDownCounter`
- - `InstrumentKindAsyncGauge` is renamed to `InstrumentKindObservableGauge`
-- The `RegisterCallback` method of the `Meter` in `go.opentelemetry.io/otel/metric` changed.
- - The named `Callback` replaces the inline function parameter. (#3564)
- - `Callback` is required to return an error. (#3576)
- - `Callback` accepts the added `Observer` parameter added.
- This new parameter is used by `Callback` implementations to observe values for asynchronous instruments instead of calling the `Observe` method of the instrument directly. (#3584)
- - The slice of `instrument.Asynchronous` is now passed as a variadic argument. (#3587)
-- The exporter from `go.opentelemetry.io/otel/exporters/zipkin` is updated to use the `v1.16.0` version of semantic conventions.
- This means it no longer uses the removed `net.peer.ip` or `http.host` attributes to determine the remote endpoint.
- Instead it uses the `net.sock.peer` attributes. (#3581)
-- The `Min` and `Max` fields of the `HistogramDataPoint` in `go.opentelemetry.io/otel/sdk/metric/metricdata` are now defined with the added `Extrema` type instead of a `*float64`. (#3487)
-
-### Fixed
-
-- Asynchronous instruments that use sum aggregators and attribute filters correctly add values from equivalent attribute sets that have been filtered. (#3439, #3549)
-- The `RegisterCallback` method of the `Meter` from `go.opentelemetry.io/otel/sdk/metric` only registers a callback for instruments created by that meter.
- Trying to register a callback with instruments from a different meter will result in an error being returned. (#3584)
-
-### Deprecated
-
-- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` is deprecated.
- Use `NewMetricProducer` instead. (#3541)
-- The `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is deprecated.
- Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575)
-- The `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is deprecated.
- Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575)
-- The `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is deprecated.
- Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575)
-- The `go.opentelemetry.io/otel/metric/instrument/syncint64` package is deprecated.
- Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575)
-- The `NewWrappedTracerProvider` in `go.opentelemetry.io/otel/bridge/opentracing` is now deprecated.
- Use `NewTracerProvider` instead. (#3116)
-
-### Removed
-
-- The deprecated `go.opentelemetry.io/otel/sdk/metric/view` package is removed. (#3520)
-- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncint64` is removed.
- Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530)
- - The `Counter` method is replaced by `Meter.Int64ObservableCounter`
- - The `UpDownCounter` method is replaced by `Meter.Int64ObservableUpDownCounter`
- - The `Gauge` method is replaced by `Meter.Int64ObservableGauge`
-- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncfloat64` is removed.
- Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530)
- - The `Counter` method is replaced by `Meter.Float64ObservableCounter`
- - The `UpDownCounter` method is replaced by `Meter.Float64ObservableUpDownCounter`
- - The `Gauge` method is replaced by `Meter.Float64ObservableGauge`
-- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncint64` is removed.
- Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530)
- - The `Counter` method is replaced by `Meter.Int64Counter`
- - The `UpDownCounter` method is replaced by `Meter.Int64UpDownCounter`
- - The `Histogram` method is replaced by `Meter.Int64Histogram`
-- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncfloat64` is removed.
- Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530)
- - The `Counter` method is replaced by `Meter.Float64Counter`
- - The `UpDownCounter` method is replaced by `Meter.Float64UpDownCounter`
- - The `Histogram` method is replaced by `Meter.Float64Histogram`
-
-## [1.11.2/0.34.0] 2022-12-05
-
-### Added
-
-- The `WithView` `Option` is added to the `go.opentelemetry.io/otel/sdk/metric` package.
- This option is used to configure the view(s) a `MeterProvider` will use for all `Reader`s that are registered with it. (#3387)
-- Add Instrumentation Scope and Version as info metric and label in Prometheus exporter.
- This can be disabled using the `WithoutScopeInfo()` option added to that package.(#3273, #3357)
-- OTLP exporters now recognize: (#3363)
- - `OTEL_EXPORTER_OTLP_INSECURE`
- - `OTEL_EXPORTER_OTLP_TRACES_INSECURE`
- - `OTEL_EXPORTER_OTLP_METRICS_INSECURE`
- - `OTEL_EXPORTER_OTLP_CLIENT_KEY`
- - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY`
- - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY`
- - `OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE`
- - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE`
- - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE`
-- The `View` type and related `NewView` function to create a view according to the OpenTelemetry specification are added to `go.opentelemetry.io/otel/sdk/metric`.
- These additions are replacements for the `View` type and `New` function from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459)
-- The `Instrument` and `InstrumentKind` type are added to `go.opentelemetry.io/otel/sdk/metric`.
- These additions are replacements for the `Instrument` and `InstrumentKind` types from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459)
-- The `Stream` type is added to `go.opentelemetry.io/otel/sdk/metric` to define a metric data stream a view will produce. (#3459)
-- The `AssertHasAttributes` allows instrument authors to test that datapoints returned have appropriate attributes. (#3487)
-
-### Changed
-
-- The `"go.opentelemetry.io/otel/sdk/metric".WithReader` option no longer accepts views to associate with the `Reader`.
- Instead, views are now registered directly with the `MeterProvider` via the new `WithView` option.
- The views registered with the `MeterProvider` apply to all `Reader`s. (#3387)
-- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/sdk/metric".Exporter` interface. (#3260)
-- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric".Client` interface. (#3260)
-- The `WithTemporalitySelector` and `WithAggregationSelector` `ReaderOption`s have been changed to `ManualReaderOption`s in the `go.opentelemetry.io/otel/sdk/metric` package. (#3260)
-- The periodic reader in the `go.opentelemetry.io/otel/sdk/metric` package now uses the temporality and aggregation selectors from its configured exporter instead of accepting them as options. (#3260)
-
-### Fixed
-
-- The `go.opentelemetry.io/otel/exporters/prometheus` exporter fixes duplicated `_total` suffixes. (#3369)
-- Remove comparable requirement for `Reader`s. (#3387)
-- Cumulative metrics from the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) are defined as monotonic sums, instead of non-monotonic. (#3389)
-- Asynchronous counters (`Counter` and `UpDownCounter`) from the metric SDK now produce delta sums when configured with delta temporality. (#3398)
-- Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340)
-- `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436)
-- Re-enabled Attribute Filters in the Metric SDK. (#3396)
-- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggregation. (#3408)
-- Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432)
-- Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440)
-- Prevent duplicate Prometheus description, unit, and type. (#3469)
-- Prevents panic when using incorrect `attribute.Value.As[Type]Slice()`. (#3489)
-
-### Removed
-
-- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.Client` interface is removed. (#3486)
-- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.New` function is removed. Use the `otlpmetric[http|grpc].New` directly. (#3486)
-
-### Deprecated
-
-- The `go.opentelemetry.io/otel/sdk/metric/view` package is deprecated.
- Use `Instrument`, `InstrumentKind`, `View`, and `NewView` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3476)
-
-## [1.11.1/0.33.0] 2022-10-19
-
-### Added
-
-- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` registers with a Prometheus registerer on creation.
- By default, it will register with the default Prometheus registerer.
- A non-default registerer can be used by passing the `WithRegisterer` option. (#3239)
-- Added the `WithAggregationSelector` option to the `go.opentelemetry.io/otel/exporters/prometheus` package to change the default `AggregationSelector` used. (#3341)
-- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` converts the `Resource` associated with metric exports into a `target_info` metric. (#3285)
-
-### Changed
-
-- The `"go.opentelemetry.io/otel/exporters/prometheus".New` function is updated to return an error.
- It will return an error if the exporter fails to register with Prometheus. (#3239)
-
-### Fixed
-
-- The URL-encoded values from the `OTEL_RESOURCE_ATTRIBUTES` environment variable are decoded. (#2963)
-- The `baggage.NewMember` function decodes the `value` parameter instead of directly using it.
- This fixes the implementation to be compliant with the W3C specification. (#3226)
-- Slice attributes of the `attribute` package are now comparable based on their value, not instance. (#3108 #3252)
-- The `Shutdown` and `ForceFlush` methods of the `"go.opentelemetry.io/otel/sdk/trace".TraceProvider` no longer return an error when no processor is registered. (#3268)
-- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` cumulatively sums histogram buckets. (#3281)
-- The sum of each histogram data point is now uniquely exported by the `go.opentelemetry.io/otel/exporters/otlpmetric` exporters. (#3284, #3293)
-- Recorded values for asynchronous counters (`Counter` and `UpDownCounter`) are interpreted as exact, not incremental, sum values by the metric SDK. (#3350, #3278)
-- `UpDownCounters` are now correctly output as Prometheus gauges in the `go.opentelemetry.io/otel/exporters/prometheus` exporter. (#3358)
-- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` no longer describes the metrics it will send to Prometheus on startup.
- Instead the exporter is defined as an "unchecked" collector for Prometheus.
- This fixes the `reader is not registered` warning currently emitted on startup. (#3291 #3342)
-- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now correctly adds `_total` suffixes to counter metrics. (#3360)
-- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now adds a unit suffix to metric names.
- This can be disabled using the `WithoutUnits()` option added to that package. (#3352)
-
-## [1.11.0/0.32.3] 2022-10-12
-
-### Added
-
-- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlptrace/otlptracehttp`). (#3261)
-
-### Changed
-
-- `span.SetStatus` has been updated such that calls that lower the status are now no-ops. (#3214)
-- Upgrade `golang.org/x/sys/unix` from `v0.0.0-20210423185535-09eb48e85fd7` to `v0.0.0-20220919091848-fb04ddd9f9c8`.
- This addresses [GO-2022-0493](https://pkg.go.dev/vuln/GO-2022-0493). (#3235)
-
-## [0.32.2] Metric SDK (Alpha) - 2022-10-11
-
-### Added
-
-- Added an example of using metric views to customize instruments. (#3177)
-- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetrichttp`). (#3261)
-
-### Changed
-
-- Flush pending measurements with the `PeriodicReader` in the `go.opentelemetry.io/otel/sdk/metric` when `ForceFlush` or `Shutdown` are called. (#3220)
-- Update histogram default bounds to match the requirements of the latest specification. (#3222)
-- Encode the HTTP status code in the OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`) as an integer. (#3265)
-
-### Fixed
-
-- Use default view if instrument does not match any registered view of a reader. (#3224, #3237)
-- Return the same instrument every time a user makes the exact same instrument creation call. (#3229, #3251)
-- Return the existing instrument when a view transforms a creation call to match an existing instrument. (#3240, #3251)
-- Log a warning when a conflicting instrument (e.g. description, unit, data-type) is created instead of returning an error. (#3251)
-- The OpenCensus bridge no longer sends empty batches of metrics. (#3263)
-
-## [0.32.1] Metric SDK (Alpha) - 2022-09-22
-
-### Changed
-
-- The Prometheus exporter sanitizes OpenTelemetry instrument names when exporting.
- Invalid characters are replaced with `_`. (#3212)
-
-### Added
-
-- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been reintroduced. (#3192)
-- The OpenCensus bridge example (`go.opentelemetry.io/otel/example/opencensus`) has been reintroduced. (#3206)
-
-### Fixed
-
-- Updated go.mods to point to valid versions of the sdk. (#3216)
-- Set the `MeterProvider` resource on all exported metric data. (#3218)
-
-## [0.32.0] Revised Metric SDK (Alpha) - 2022-09-18
-
-### Changed
-
-- The metric SDK in `go.opentelemetry.io/otel/sdk/metric` is completely refactored to comply with the OpenTelemetry specification.
- Please see the package documentation for how the new SDK is initialized and configured. (#3175)
-- Update the minimum supported go version to go1.18. Removes support for go1.17 (#3179)
-
-### Removed
-
-- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been removed.
- A new bridge compliant with the revised metric SDK will be added back in a future release. (#3175)
-- The `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest` package is removed, see the new metric SDK. (#3175)
-- The `go.opentelemetry.io/otel/sdk/metric/aggregator/histogram` package is removed, see the new metric SDK. (#3175)
-- The `go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue` package is removed, see the new metric SDK. (#3175)
-- The `go.opentelemetry.io/otel/sdk/metric/aggregator/sum` package is removed, see the new metric SDK. (#3175)
-- The `go.opentelemetry.io/otel/sdk/metric/aggregator` package is removed, see the new metric SDK. (#3175)
-- The `go.opentelemetry.io/otel/sdk/metric/controller/basic` package is removed, see the new metric SDK. (#3175)
-- The `go.opentelemetry.io/otel/sdk/metric/controller/controllertest` package is removed, see the new metric SDK. (#3175)
-- The `go.opentelemetry.io/otel/sdk/metric/controller/time` package is removed, see the new metric SDK. (#3175)
-- The `go.opentelemetry.io/otel/sdk/metric/export/aggregation` package is removed, see the new metric SDK. (#3175)
-- The `go.opentelemetry.io/otel/sdk/metric/export` package is removed, see the new metric SDK. (#3175)
-- The `go.opentelemetry.io/otel/sdk/metric/metrictest` package is removed.
- A replacement package that supports the new metric SDK will be added back in a future release. (#3175)
-- The `go.opentelemetry.io/otel/sdk/metric/number` package is removed, see the new metric SDK. (#3175)
-- The `go.opentelemetry.io/otel/sdk/metric/processor/basic` package is removed, see the new metric SDK. (#3175)
-- The `go.opentelemetry.io/otel/sdk/metric/processor/processortest` package is removed, see the new metric SDK. (#3175)
-- The `go.opentelemetry.io/otel/sdk/metric/processor/reducer` package is removed, see the new metric SDK. (#3175)
-- The `go.opentelemetry.io/otel/sdk/metric/registry` package is removed, see the new metric SDK. (#3175)
-- The `go.opentelemetry.io/otel/sdk/metric/sdkapi` package is removed, see the new metric SDK. (#3175)
-- The `go.opentelemetry.io/otel/sdk/metric/selector/simple` package is removed, see the new metric SDK. (#3175)
-- The `"go.opentelemetry.io/otel/sdk/metric".ErrUninitializedInstrument` variable was removed. (#3175)
-- The `"go.opentelemetry.io/otel/sdk/metric".ErrBadInstrument` variable was removed. (#3175)
-- The `"go.opentelemetry.io/otel/sdk/metric".Accumulator` type was removed, see the `MeterProvider`in the new metric SDK. (#3175)
-- The `"go.opentelemetry.io/otel/sdk/metric".NewAccumulator` function was removed, see `NewMeterProvider`in the new metric SDK. (#3175)
-- The deprecated `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets` function was removed. (#3175)
-
-## [1.10.0] - 2022-09-09
-
-### Added
-
-- Support Go 1.19. (#3077)
- Include compatibility testing and document support. (#3077)
-- Support the OTLP ExportTracePartialSuccess response; these are passed to the registered error handler. (#3106)
-- Upgrade go.opentelemetry.io/proto/otlp from v0.18.0 to v0.19.0 (#3107)
-
-### Changed
-
-- Fix misidentification of OpenTelemetry `SpanKind` in OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`). (#3096)
-- Attempting to start a span with a nil `context` will no longer cause a panic. (#3110)
-- All exporters will be shutdown even if one reports an error (#3091)
-- Ensure valid UTF-8 when truncating over-length attribute values. (#3156)
-
-## [1.9.0/0.0.3] - 2022-08-01
-
-### Added
-
-- Add support for Schema Files format 1.1.x (metric "split" transform) with the new `go.opentelemetry.io/otel/schema/v1.1` package. (#2999)
-- Add the `go.opentelemetry.io/otel/semconv/v1.11.0` package.
- The package contains semantic conventions from the `v1.11.0` version of the OpenTelemetry specification. (#3009)
-- Add the `go.opentelemetry.io/otel/semconv/v1.12.0` package.
- The package contains semantic conventions from the `v1.12.0` version of the OpenTelemetry specification. (#3010)
-- Add the `http.method` attribute to HTTP server metric from all `go.opentelemetry.io/otel/semconv/*` packages. (#3018)
-
-### Fixed
-
-- Invalid warning for context setup being deferred in `go.opentelemetry.io/otel/bridge/opentracing` package. (#3029)
-
-## [1.8.0/0.31.0] - 2022-07-08
-
-### Added
-
-- Add support for `opentracing.TextMap` format in the `Inject` and `Extract` methods
-of the `"go.opentelemetry.io/otel/bridge/opentracing".BridgeTracer` type. (#2911)
-
-### Changed
-
-- The `crosslink` make target has been updated to use the `go.opentelemetry.io/build-tools/crosslink` package. (#2886)
-- In the `go.opentelemetry.io/otel/sdk/instrumentation` package rename `Library` to `Scope` and alias `Library` as `Scope` (#2976)
-- Move metric no-op implementation form `nonrecording` to `metric` package. (#2866)
-
-### Removed
-
-- Support for go1.16. Support is now only for go1.17 and go1.18 (#2917)
-
-### Deprecated
-
-- The `Library` struct in the `go.opentelemetry.io/otel/sdk/instrumentation` package is deprecated.
- Use the equivalent `Scope` struct instead. (#2977)
-- The `ReadOnlySpan.InstrumentationLibrary` method from the `go.opentelemetry.io/otel/sdk/trace` package is deprecated.
- Use the equivalent `ReadOnlySpan.InstrumentationScope` method instead. (#2977)
-
-## [1.7.0/0.30.0] - 2022-04-28
-
-### Added
-
-- Add the `go.opentelemetry.io/otel/semconv/v1.8.0` package.
- The package contains semantic conventions from the `v1.8.0` version of the OpenTelemetry specification. (#2763)
-- Add the `go.opentelemetry.io/otel/semconv/v1.9.0` package.
- The package contains semantic conventions from the `v1.9.0` version of the OpenTelemetry specification. (#2792)
-- Add the `go.opentelemetry.io/otel/semconv/v1.10.0` package.
- The package contains semantic conventions from the `v1.10.0` version of the OpenTelemetry specification. (#2842)
-- Added an in-memory exporter to metrictest to aid testing with a full SDK. (#2776)
-
-### Fixed
-
-- Globally delegated instruments are unwrapped before delegating asynchronous callbacks. (#2784)
-- Remove import of `testing` package in non-tests builds of the `go.opentelemetry.io/otel` package. (#2786)
-
-### Changed
-
-- The `WithLabelEncoder` option from the `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` package is renamed to `WithAttributeEncoder`. (#2790)
-- The `LabelFilterSelector` interface from `go.opentelemetry.io/otel/sdk/metric/processor/reducer` is renamed to `AttributeFilterSelector`.
- The method included in the renamed interface also changed from `LabelFilterFor` to `AttributeFilterFor`. (#2790)
-- The `Metadata.Labels` method from the `go.opentelemetry.io/otel/sdk/metric/export` package is renamed to `Metadata.Attributes`.
- Consequentially, the `Record` type from the same package also has had the embedded method renamed. (#2790)
-
-### Deprecated
-
-- The `Iterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated.
- Use the equivalent `Iterator.Attribute` method instead. (#2790)
-- The `Iterator.IndexedLabel` method in the `go.opentelemetry.io/otel/attribute` package is deprecated.
- Use the equivalent `Iterator.IndexedAttribute` method instead. (#2790)
-- The `MergeIterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated.
- Use the equivalent `MergeIterator.Attribute` method instead. (#2790)
-
-### Removed
-
-- Removed the `Batch` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864)
-- Removed the `Measurement` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864)
-
-## [0.29.0] - 2022-04-11
-
-### Added
-
-- The metrics global package was added back into several test files. (#2764)
-- The `Meter` function is added back to the `go.opentelemetry.io/otel/metric/global` package.
- This function is a convenience function equivalent to calling `global.MeterProvider().Meter(...)`. (#2750)
-
-### Removed
-
-- Removed module the `go.opentelemetry.io/otel/sdk/export/metric`.
- Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2720)
-
-### Changed
-
-- Don't panic anymore when setting a global MeterProvider to itself. (#2749)
-- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` from `v0.12.1` to `v0.15.0`.
- This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibraryMetrics` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeMetrics`. (#2748)
-
-## [1.6.3] - 2022-04-07
-
-### Fixed
-
-- Allow non-comparable global `MeterProvider`, `TracerProvider`, and `TextMapPropagator` types to be set. (#2772, #2773)
-
-## [1.6.2] - 2022-04-06
-
-### Changed
-
-- Don't panic anymore when setting a global TracerProvider or TextMapPropagator to itself. (#2749)
-- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace` from `v0.12.1` to `v0.15.0`.
- This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibrarySpans` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeSpans`. (#2748)
-
-## [1.6.1] - 2022-03-28
-
-### Fixed
-
-- The `go.opentelemetry.io/otel/schema/*` packages now use the correct schema URL for their `SchemaURL` constant.
- Instead of using `"https://opentelemetry.io/schemas/v<version>"` they now use the correct URL without a `v` prefix, `"https://opentelemetry.io/schemas/<version>"`. (#2743, #2744)
-
-### Security
-
-- Upgrade `go.opentelemetry.io/proto/otlp` from `v0.12.0` to `v0.12.1`.
- This includes an indirect upgrade of `github.com/grpc-ecosystem/grpc-gateway` which resolves [a vulnerability](https://nvd.nist.gov/vuln/detail/CVE-2019-11254) from `gopkg.in/yaml.v2` in version `v2.2.3`. (#2724, #2728)
-
-## [1.6.0/0.28.0] - 2022-03-23
-
-### ⚠️ Notice ⚠️
-
-This update is a breaking change of the unstable Metrics API.
-Code instrumented with the `go.opentelemetry.io/otel/metric` will need to be modified.
-
-### Added
-
-- Add metrics exponential histogram support.
- New mapping functions have been made available in `sdk/metric/aggregator/exponential/mapping` for other OpenTelemetry projects to take dependencies on. (#2502)
-- Add Go 1.18 to our compatibility tests. (#2679)
-- Allow configuring the Sampler with the `OTEL_TRACES_SAMPLER` and `OTEL_TRACES_SAMPLER_ARG` environment variables. (#2305, #2517)
-- Add the `metric/global` for obtaining and setting the global `MeterProvider`. (#2660)
-
-### Changed
-
-- The metrics API has been significantly changed to match the revised OpenTelemetry specification.
- High-level changes include:
-
- - Synchronous and asynchronous instruments are now handled by independent `InstrumentProvider`s.
- These `InstrumentProvider`s are managed with a `Meter`.
- - Synchronous and asynchronous instruments are grouped into their own packages based on value types.
- - Asynchronous callbacks can now be registered with a `Meter`.
-
- Be sure to check out the metric module documentation for more information on how to use the revised API. (#2587, #2660)
-
-### Fixed
-
-- Fallback to general attribute limits when span specific ones are not set in the environment. (#2675, #2677)
-
-## [1.5.0] - 2022-03-16
-
-### Added
-
-- Log the Exporters configuration in the TracerProviders message. (#2578)
-- Added support to configure the span limits with environment variables.
- The following environment variables are supported. (#2606, #2637)
- - `OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT`
- - `OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT`
- - `OTEL_SPAN_EVENT_COUNT_LIMIT`
- - `OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT`
- - `OTEL_SPAN_LINK_COUNT_LIMIT`
- - `OTEL_LINK_ATTRIBUTE_COUNT_LIMIT`
-
- If the provided environment variables are invalid (negative), the default values would be used.
-- Rename the `gc` runtime name to `go` (#2560)
-- Add resource container ID detection. (#2418)
-- Add span attribute value length limit.
- The new `AttributeValueLengthLimit` field is added to the `"go.opentelemetry.io/otel/sdk/trace".SpanLimits` type to configure this limit for a `TracerProvider`.
- The default limit for this resource is "unlimited". (#2637)
-- Add the `WithRawSpanLimits` option to `go.opentelemetry.io/otel/sdk/trace`.
- This option replaces the `WithSpanLimits` option.
- Zero or negative values will not be changed to the default value like `WithSpanLimits` does.
- Setting a limit to zero will effectively disable the related resource it limits and setting to a negative value will mean that resource is unlimited.
- Consequentially, limits should be constructed using `NewSpanLimits` and updated accordingly. (#2637)
-
-### Changed
-
-- Drop oldest tracestate `Member` when capacity is reached. (#2592)
-- Add event and link drop counts to the exported data from the `oltptrace` exporter. (#2601)
-- Unify path cleaning functionally in the `otlpmetric` and `otlptrace` configuration. (#2639)
-- Change the debug message from the `sdk/trace.BatchSpanProcessor` to reflect the count is cumulative. (#2640)
-- Introduce new internal `envconfig` package for OTLP exporters. (#2608)
-- If `http.Request.Host` is empty, fall back to use `URL.Host` when populating `http.host` in the `semconv` packages. (#2661)
-
-### Fixed
-
-- Remove the OTLP trace exporter limit of SpanEvents when exporting. (#2616)
-- Default to port `4318` instead of `4317` for the `otlpmetrichttp` and `otlptracehttp` client. (#2614, #2625)
-- Unlimited span limits are now supported (negative values). (#2636, #2637)
-
-### Deprecated
-
-- Deprecated `"go.opentelemetry.io/otel/sdk/trace".WithSpanLimits`.
- Use `WithRawSpanLimits` instead.
- That option allows setting unlimited and zero limits, this option does not.
- This option will be kept until the next major version incremented release. (#2637)
-
-## [1.4.1] - 2022-02-16
-
-### Fixed
-
-- Fix race condition in reading the dropped spans number for the `BatchSpanProcessor`. (#2615)
-
-## [1.4.0] - 2022-02-11
-
-### Added
-
-- Use `OTEL_EXPORTER_ZIPKIN_ENDPOINT` environment variable to specify zipkin collector endpoint. (#2490)
-- Log the configuration of `TracerProvider`s, and `Tracer`s for debugging.
- To enable use a logger with Verbosity (V level) `>=1`. (#2500)
-- Added support to configure the batch span-processor with environment variables.
- The following environment variables are used. (#2515)
- - `OTEL_BSP_SCHEDULE_DELAY`
- - `OTEL_BSP_EXPORT_TIMEOUT`
- - `OTEL_BSP_MAX_QUEUE_SIZE`.
- - `OTEL_BSP_MAX_EXPORT_BATCH_SIZE`
-
-### Changed
-
-- Zipkin exporter exports `Resource` attributes in the `Tags` field. (#2589)
-
-### Deprecated
-
-- Deprecate module the `go.opentelemetry.io/otel/sdk/export/metric`.
- Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2382)
-- Deprecate `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets`. (#2445)
-
-### Fixed
-
-- Fixed the instrument kind for noop async instruments to correctly report an implementation. (#2461)
-- Fix UDP packets overflowing with Jaeger payloads. (#2489, #2512)
-- Change the `otlpmetric.Client` interface's `UploadMetrics` method to accept a single `ResourceMetrics` instead of a slice of them. (#2491)
-- Specify explicit buckets in Prometheus example, fixing issue where example only has `+inf` bucket. (#2419, #2493)
-- W3C baggage will now decode urlescaped values. (#2529)
-- Baggage members are now only validated once, when calling `NewMember` and not also when adding it to the baggage itself. (#2522)
-- The order attributes are dropped from spans in the `go.opentelemetry.io/otel/sdk/trace` package when capacity is reached is fixed to be in compliance with the OpenTelemetry specification.
- Instead of dropping the least-recently-used attribute, the last added attribute is dropped.
- This drop order still only applies to attributes with unique keys not already contained in the span.
- If an attribute is added with a key already contained in the span, that attribute is updated to the new value being added. (#2576)
-
-### Removed
-
-- Updated `go.opentelemetry.io/proto/otlp` from `v0.11.0` to `v0.12.0`. This version removes a number of deprecated methods. (#2546)
- - [`Metric.GetIntGauge()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntGauge)
- - [`Metric.GetIntHistogram()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntHistogram)
- - [`Metric.GetIntSum()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntSum)
-
-## [1.3.0] - 2021-12-10
-
-### ⚠️ Notice ⚠️
-
-We have updated the project minimum supported Go version to 1.16
-
-### Added
-
-- Added an internal Logger.
- This can be used by the SDK and API to provide users with feedback of the internal state.
- To enable verbose logs configure the logger which will print V(1) logs. For debugging information configure to print V(5) logs. (#2343)
-- Add the `WithRetry` `Option` and the `RetryConfig` type to the `go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp` package to specify retry behavior consistently. (#2425)
-- Add `SpanStatusFromHTTPStatusCodeAndSpanKind` to all `semconv` packages to return a span status code similar to `SpanStatusFromHTTPStatusCode`, but exclude `4XX` HTTP errors as span errors if the span is of server kind. (#2296)
-
-### Changed
-
-- The `"go.opentelemetry.io/otel/exporter/otel/otlptrace/otlptracegrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2329)
-- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2425)
-- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".RetrySettings` type is renamed to `RetryConfig`. (#2425)
-- The `go.opentelemetry.io/otel/exporter/otel/*` gRPC exporters now default to using the host's root CA set if none are provided by the user and `WithInsecure` is not specified. (#2432)
-- Change `resource.Default` to be evaluated the first time it is called, rather than on import. This allows the caller the option to update `OTEL_RESOURCE_ATTRIBUTES` first, such as with `os.Setenv`. (#2371)
-
-### Fixed
-
-- The `go.opentelemetry.io/otel/exporter/otel/*` exporters are updated to handle per-signal and universal endpoints according to the OpenTelemetry specification.
- Any per-signal endpoint set via an `OTEL_EXPORTER_OTLP_<signal>_ENDPOINT` environment variable is now used without modification of the path.
- When `OTEL_EXPORTER_OTLP_ENDPOINT` is set, if it contains a path, that path is used as a base path which per-signal paths are appended to. (#2433)
-- Basic metric controller updated to use sync.Map to avoid blocking calls (#2381)
-- The `go.opentelemetry.io/otel/exporter/jaeger` correctly sets the `otel.status_code` value to be a string of `ERROR` or `OK` instead of an integer code. (#2439, #2440)
-
-### Deprecated
-
-- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithMaxAttempts` `Option`, use the new `WithRetry` `Option` instead. (#2425)
-- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithBackoff` `Option`, use the new `WithRetry` `Option` instead. (#2425)
-
-### Removed
-
-- Remove the metric Processor's ability to convert cumulative to delta aggregation temporality. (#2350)
-- Remove the metric Bound Instruments interface and implementations. (#2399)
-- Remove the metric MinMaxSumCount kind aggregation and the corresponding OTLP export path. (#2423)
-- Metric SDK removes the "exact" aggregator for histogram instruments, as it performed a non-standard aggregation for OTLP export (creating repeated Gauge points) and worked its way into a number of confusing examples. (#2348)
-
-## [1.2.0] - 2021-11-12
-
-### Changed
-
-- Metric SDK `export.ExportKind`, `export.ExportKindSelector` types have been renamed to `aggregation.Temporality` and `aggregation.TemporalitySelector` respectively to keep in line with current specification and protocol along with built-in selectors (e.g., `aggregation.CumulativeTemporalitySelector`, ...). (#2274)
-- The Metric `Exporter` interface now requires a `TemporalitySelector` method instead of an `ExportKindSelector`. (#2274)
-- Metrics API cleanup. The `metric/sdkapi` package has been created to relocate the API-to-SDK interface:
- - The following interface types simply moved from `metric` to `metric/sdkapi`: `Descriptor`, `MeterImpl`, `InstrumentImpl`, `SyncImpl`, `BoundSyncImpl`, `AsyncImpl`, `AsyncRunner`, `AsyncSingleRunner`, and `AsyncBatchRunner`
- - The following struct types moved and are replaced with type aliases, since they are exposed to the user: `Observation`, `Measurement`.
- - The No-op implementations of sync and async instruments are no longer exported, new functions `sdkapi.NewNoopAsyncInstrument()` and `sdkapi.NewNoopSyncInstrument()` are provided instead. (#2271)
-- Update the SDK `BatchSpanProcessor` to export all queued spans when `ForceFlush` is called. (#2080, #2335)
-
-### Added
-
-- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002)
-- Added a new `schema` module to help parse Schema Files in OTEP 0152 format. (#2267)
-- Added a new `MapCarrier` to the `go.opentelemetry.io/otel/propagation` package to hold propagated cross-cutting concerns as a `map[string]string` held in memory. (#2334)
-
-## [1.1.0] - 2021-10-27
-
-### Added
-
-- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002)
-- Add the `go.opentelemetry.io/otel/semconv/v1.7.0` package.
- The package contains semantic conventions from the `v1.7.0` version of the OpenTelemetry specification. (#2320)
-- Add the `go.opentelemetry.io/otel/semconv/v1.6.1` package.
- The package contains semantic conventions from the `v1.6.1` version of the OpenTelemetry specification. (#2321)
-- Add the `go.opentelemetry.io/otel/semconv/v1.5.0` package.
- The package contains semantic conventions from the `v1.5.0` version of the OpenTelemetry specification. (#2322)
- - When upgrading from the `semconv/v1.4.0` package note the following name changes:
- - `K8SReplicasetUIDKey` -> `K8SReplicaSetUIDKey`
- - `K8SReplicasetNameKey` -> `K8SReplicaSetNameKey`
- - `K8SStatefulsetUIDKey` -> `K8SStatefulSetUIDKey`
- - `k8SStatefulsetNameKey` -> `K8SStatefulSetNameKey`
- - `K8SDaemonsetUIDKey` -> `K8SDaemonSetUIDKey`
- - `K8SDaemonsetNameKey` -> `K8SDaemonSetNameKey`
-
-### Changed
-
-- Links added to a span will be dropped by the SDK if they contain an invalid span context (#2275).
-
-### Fixed
-
-- The `"go.opentelemetry.io/otel/semconv/v1.4.0".HTTPServerAttributesFromHTTPRequest` now correctly only sets the HTTP client IP attribute even if the connection was routed with proxies and there are multiple addresses in the `X-Forwarded-For` header. (#2282, #2284)
-- The `"go.opentelemetry.io/otel/semconv/v1.4.0".NetAttributesFromHTTPRequest` function correctly handles IPv6 addresses as IP addresses and sets the correct net peer IP instead of the net peer hostname attribute. (#2283, #2285)
-- The simple span processor shutdown method deterministically returns the exporter error status if it simultaneously finishes when the deadline is reached. (#2290, #2289)
-
-## [1.0.1] - 2021-10-01
-
-### Fixed
-
-- json stdout exporter no longer crashes due to concurrency bug. (#2265)
-
-## [Metrics 0.24.0] - 2021-10-01
-
-### Changed
-
-- NoopMeterProvider is now private and NewNoopMeterProvider must be used to obtain a noopMeterProvider. (#2237)
-- The Metric SDK `Export()` function takes a new two-level reader interface for iterating over results one instrumentation library at a time. (#2197)
- - The former `"go.opentelemetry.io/otel/sdk/export/metric".CheckpointSet` is renamed `Reader`.
- - The new interface is named `"go.opentelemetry.io/otel/sdk/export/metric".InstrumentationLibraryReader`.
-
-## [1.0.0] - 2021-09-20
-
-This is the first stable release for the project.
-This release includes an API and SDK for the tracing signal that will comply with the stability guarantees defined by the projects [versioning policy](./VERSIONING.md).
-
-### Added
-
-- OTLP trace exporter now sets the `SchemaURL` field in the exported telemetry if the Tracer has `WithSchemaURL` option. (#2242)
-
-### Fixed
-
-- Slice-valued attributes can correctly be used as map keys. (#2223)
-
-### Removed
-
-- Removed the `"go.opentelemetry.io/otel/exporters/zipkin".WithSDKOptions` function. (#2248)
-- Removed the deprecated package `go.opentelemetry.io/otel/oteltest`. (#2234)
-- Removed the deprecated package `go.opentelemetry.io/otel/bridge/opencensus/utils`. (#2233)
-- Removed deprecated functions, types, and methods from `go.opentelemetry.io/otel/attribute` package.
- Use the typed functions and methods added to the package instead. (#2235)
- - The `Key.Array` method is removed.
- - The `Array` function is removed.
- - The `Any` function is removed.
- - The `ArrayValue` function is removed.
- - The `AsArray` function is removed.
-
-## [1.0.0-RC3] - 2021-09-02
-
-### Added
-
-- Added `ErrorHandlerFunc` to use a function as an `"go.opentelemetry.io/otel".ErrorHandler`. (#2149)
-- Added `"go.opentelemetry.io/otel/trace".WithStackTrace` option to add a stack trace when using `span.RecordError` or when panic is handled in `span.End`. (#2163)
-- Added typed slice attribute types and functionality to the `go.opentelemetry.io/otel/attribute` package to replace the existing array type and functions. (#2162)
- - `BoolSlice`, `IntSlice`, `Int64Slice`, `Float64Slice`, and `StringSlice` replace the use of the `Array` function in the package.
-- Added the `go.opentelemetry.io/otel/example/fib` example package.
- Included is an example application that computes Fibonacci numbers. (#2203)
-
-### Changed
-
-- Metric instruments have been renamed to match the (feature-frozen) metric API specification:
- - ValueRecorder becomes Histogram
- - ValueObserver becomes Gauge
- - SumObserver becomes CounterObserver
- - UpDownSumObserver becomes UpDownCounterObserver
- The API exported from this project is still considered experimental. (#2202)
-- Metric SDK/API implementation type `InstrumentKind` moves into `sdkapi` sub-package. (#2091)
-- The Metrics SDK export record no longer contains a Resource pointer, the SDK `"go.opentelemetry.io/otel/sdk/trace/export/metric".Exporter.Export()` function for push-based exporters now takes a single Resource argument, pull-based exporters use `"go.opentelemetry.io/otel/sdk/metric/controller/basic".Controller.Resource()`. (#2120)
-- The JSON output of the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` is harmonized now such that the output is "plain" JSON objects after each other of the form `{ ... } { ... } { ... }`. Earlier the JSON objects describing a span were wrapped in a slice for each `Exporter.ExportSpans` call, like `[ { ... } ][ { ... } { ... } ]`. Outputting JSON object directly after each other is consistent with JSON loggers, and a bit easier to parse and read. (#2196)
-- Update the `NewTracerConfig`, `NewSpanStartConfig`, `NewSpanEndConfig`, and `NewEventConfig` function in the `go.opentelemetry.io/otel/trace` package to return their respective configurations as structs instead of pointers to the struct. (#2212)
-
-### Deprecated
-
-- The `go.opentelemetry.io/otel/bridge/opencensus/utils` package is deprecated.
- All functionality from this package now exists in the `go.opentelemetry.io/otel/bridge/opencensus` package.
- The functions from that package should be used instead. (#2166)
-- The `"go.opentelemetry.io/otel/attribute".Array` function and the related `ARRAY` value type is deprecated.
- Use the typed `*Slice` functions and types added to the package instead. (#2162)
-- The `"go.opentelemetry.io/otel/attribute".Any` function is deprecated.
- Use the typed functions instead. (#2181)
-- The `go.opentelemetry.io/otel/oteltest` package is deprecated.
- The `"go.opentelemetry.io/otel/sdk/trace/tracetest".SpanRecorder` can be registered with the default SDK (`go.opentelemetry.io/otel/sdk/trace`) as a `SpanProcessor` and used as a replacement for this deprecated package. (#2188)
-
-### Removed
-
-- Removed metrics test package `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#2105)
-
-### Fixed
-
-- The `fromEnv` detector no longer throws an error when `OTEL_RESOURCE_ATTRIBUTES` environment variable is not set or empty. (#2138)
-- Setting the global `ErrorHandler` with `"go.opentelemetry.io/otel".SetErrorHandler` multiple times is now supported. (#2160, #2140)
-- The `"go.opentelemetry.io/otel/attribute".Any` function now supports `int32` values. (#2169)
-- Multiple calls to `"go.opentelemetry.io/otel/sdk/metric/controller/basic".WithResource()` are handled correctly, and when no resources are provided `"go.opentelemetry.io/otel/sdk/resource".Default()` is used. (#2120)
-- The `WithoutTimestamps` option for the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter causes the exporter to correctly omit timestamps. (#2195)
-- Fixed typos in resources.go. (#2201)
-
-## [1.0.0-RC2] - 2021-07-26
-
-### Added
-
-- Added `WithOSDescription` resource configuration option to set OS (Operating System) description resource attribute (`os.description`). (#1840)
-- Added `WithOS` resource configuration option to set all OS (Operating System) resource attributes at once. (#1840)
-- Added the `WithRetry` option to the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package.
- This option is a replacement for the removed `WithMaxAttempts` and `WithBackoff` options. (#2095)
-- Added API `LinkFromContext` to return Link which encapsulates SpanContext from provided context and also encapsulates attributes. (#2115)
-- Added a new `Link` type under the SDK `otel/sdk/trace` package that counts the number of attributes that were dropped for surpassing the `AttributePerLinkCountLimit` configured in the Span's `SpanLimits`.
- This new type replaces the equal-named API `Link` type found in the `otel/trace` package for most usages within the SDK.
- For example, instances of this type are now returned by the `Links()` function of `ReadOnlySpan`s provided in places like the `OnEnd` function of `SpanProcessor` implementations. (#2118)
-- Added the `SpanRecorder` type to the `go.opentelemetry.io/otel/skd/trace/tracetest` package.
- This type can be used with the default SDK as a `SpanProcessor` during testing. (#2132)
-
-### Changed
-
-- The `SpanModels` function is now exported from the `go.opentelemetry.io/otel/exporters/zipkin` package to convert OpenTelemetry spans into Zipkin model spans. (#2027)
-- Rename the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".RetrySettings` to `RetryConfig`. (#2095)
-
-### Deprecated
-
-- The `TextMapCarrier` and `TextMapPropagator` from the `go.opentelemetry.io/otel/oteltest` package and their associated creation functions (`TextMapCarrier`, `NewTextMapPropagator`) are deprecated. (#2114)
-- The `Harness` type from the `go.opentelemetry.io/otel/oteltest` package and its associated creation function, `NewHarness` are deprecated and will be removed in the next release. (#2123)
-- The `TraceStateFromKeyValues` function from the `go.opentelemetry.io/otel/oteltest` package is deprecated.
- Use the `trace.ParseTraceState` function instead. (#2122)
-
-### Removed
-
-- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/jaeger`. (#2020)
-- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/zipkin`. (#2020)
-- Removed the `"go.opentelemetry.io/otel/sdk/resource".WithBuiltinDetectors` function.
- The explicit `With*` options for every built-in detector should be used instead. (#2026 #2097)
-- Removed the `WithMaxAttempts` and `WithBackoff` options from the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package.
- The retry logic of the package has been updated to match the `otlptracegrpc` package and accordingly a `WithRetry` option is added that should be used instead. (#2095)
-- Removed `DroppedAttributeCount` field from `otel/trace.Link` struct. (#2118)
-
-### Fixed
-
-- When using WithNewRoot, don't use the parent context for making sampling decisions. (#2032)
-- `oteltest.Tracer` now creates a valid `SpanContext` when using `WithNewRoot`. (#2073)
-- OS type detector now sets the correct `dragonflybsd` value for DragonFly BSD. (#2092)
-- The OTel span status is correctly transformed into the OTLP status in the `go.opentelemetry.io/otel/exporters/otlp/otlptrace` package.
- This fix will by default set the status to `Unset` if it is not explicitly set to `Ok` or `Error`. (#2099 #2102)
-- The `Inject` method for the `"go.opentelemetry.io/otel/propagation".TraceContext` type no longer injects empty `tracestate` values. (#2108)
-- Use `6831` as default Jaeger agent port instead of `6832`. (#2131)
-
-## [Experimental Metrics v0.22.0] - 2021-07-19
-
-### Added
-
-- Adds HTTP support for OTLP metrics exporter. (#2022)
-
-### Removed
-
-- Removed the deprecated package `go.opentelemetry.io/otel/exporters/metric/prometheus`. (#2020)
-
-## [1.0.0-RC1] / 0.21.0 - 2021-06-18
-
-With this release we are introducing a split in module versions. The tracing API and SDK are entering the `v1.0.0` Release Candidate phase with `v1.0.0-RC1`
-while the experimental metrics API and SDK continue with `v0.x` releases at `v0.21.0`. Modules at major version 1 or greater will not depend on modules
-with major version 0.
-
-### Added
-
-- Adds `otlpgrpc.WithRetry`option for configuring the retry policy for transient errors on the otlp/gRPC exporter. (#1832)
- - The following status codes are defined as transient errors:
- | gRPC Status Code | Description |
- | ---------------- | ----------- |
- | 1 | Cancelled |
- | 4 | Deadline Exceeded |
- | 8 | Resource Exhausted |
- | 10 | Aborted |
- | 10 | Out of Range |
- | 14 | Unavailable |
- | 15 | Data Loss |
-- Added `Status` type to the `go.opentelemetry.io/otel/sdk/trace` package to represent the status of a span. (#1874)
-- Added `SpanStub` type and its associated functions to the `go.opentelemetry.io/otel/sdk/trace/tracetest` package.
- This type can be used as a testing replacement for the `SpanSnapshot` that was removed from the `go.opentelemetry.io/otel/sdk/trace` package. (#1873)
-- Adds support for scheme in `OTEL_EXPORTER_OTLP_ENDPOINT` according to the spec. (#1886)
-- Adds `trace.WithSchemaURL` option for configuring the tracer with a Schema URL. (#1889)
-- Added an example of using OpenTelemetry Go as a trace context forwarder. (#1912)
-- `ParseTraceState` is added to the `go.opentelemetry.io/otel/trace` package.
- It can be used to decode a `TraceState` from a `tracestate` header string value. (#1937)
-- Added `Len` method to the `TraceState` type in the `go.opentelemetry.io/otel/trace` package.
- This method returns the number of list-members the `TraceState` holds. (#1937)
-- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace` that defines a trace exporter that uses a `otlptrace.Client` to send data.
- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` implementing a gRPC `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing .(#1922)
-- Added `Baggage`, `Member`, and `Property` types to the `go.opentelemetry.io/otel/baggage` package along with their related functions. (#1967)
-- Added `ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext` functions to the `go.opentelemetry.io/otel/baggage` package.
- These functions replace the `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions from that package and directly work with the new `Baggage` type. (#1967)
-- The `OTEL_SERVICE_NAME` environment variable is the preferred source for `service.name`, used by the environment resource detector if a service name is present both there and in `OTEL_RESOURCE_ATTRIBUTES`. (#1969)
-- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` implementing an HTTP `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing. (#1963)
-- Changes `go.opentelemetry.io/otel/sdk/resource.NewWithAttributes` to require a schema URL. The old function is still available as `resource.NewSchemaless`. This is a breaking change. (#1938)
-- Several builtin resource detectors now correctly populate the schema URL. (#1938)
-- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` that defines a metrics exporter that uses a `otlpmetric.Client` to send data.
-- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` implementing a gRPC `otlpmetric.Client` and offers convenience functions, `New` and `NewUnstarted`, to create an `otlpmetric.Exporter`.(#1991)
-- Added `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter. (#2005)
-- Added `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` exporter. (#2005)
-- Added a `TracerProvider()` method to the `"go.opentelemetry.io/otel/trace".Span` interface. This can be used to obtain a `TracerProvider` from a given span that utilizes the same trace processing pipeline. (#2009)
-
-### Changed
-
-- Make `NewSplitDriver` from `go.opentelemetry.io/otel/exporters/otlp` take variadic arguments instead of a `SplitConfig` item.
- `NewSplitDriver` now automatically implements an internal `noopDriver` for `SplitConfig` fields that are not initialized. (#1798)
-- `resource.New()` now creates a Resource without builtin detectors. Previous behavior is now achieved by using `WithBuiltinDetectors` Option. (#1810)
-- Move the `Event` type from the `go.opentelemetry.io/otel` package to the `go.opentelemetry.io/otel/sdk/trace` package. (#1846)
-- CI builds validate against last two versions of Go, dropping 1.14 and adding 1.16. (#1865)
-- BatchSpanProcessor now report export failures when calling `ForceFlush()` method. (#1860)
-- `Set.Encoded(Encoder)` no longer caches the result of an encoding. (#1855)
-- Renamed `CloudZoneKey` to `CloudAvailabilityZoneKey` in Resource semantic conventions according to spec. (#1871)
-- The `StatusCode` and `StatusMessage` methods of the `ReadOnlySpan` interface and the `Span` produced by the `go.opentelemetry.io/otel/sdk/trace` package have been replaced with a single `Status` method.
- This method returns the status of a span using the new `Status` type. (#1874)
-- Updated `ExportSpans` method of the`SpanExporter` interface type to accept `ReadOnlySpan`s instead of the removed `SpanSnapshot`.
- This brings the export interface into compliance with the specification in that it now accepts an explicitly immutable type instead of just an implied one. (#1873)
-- Unembed `SpanContext` in `Link`. (#1877)
-- Generate Semantic conventions from the specification YAML. (#1891)
-- Spans created by the global `Tracer` obtained from `go.opentelemetry.io/otel`, prior to a functioning `TracerProvider` being set, now propagate the span context from their parent if one exists. (#1901)
-- The `"go.opentelemetry.io/otel".Tracer` function now accepts tracer options. (#1902)
-- Move the `go.opentelemetry.io/otel/unit` package to `go.opentelemetry.io/otel/metric/unit`. (#1903)
-- Changed `go.opentelemetry.io/otel/trace.TracerConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config.) (#1921)
-- Changed `go.opentelemetry.io/otel/trace.SpanConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921)
-- Changed `span.End()` now only accepts Options that are allowed at `End()`. (#1921)
-- Changed `go.opentelemetry.io/otel/metric.InstrumentConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921)
-- Changed `go.opentelemetry.io/otel/metric.MeterConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921)
-- Refactored option types according to the contribution style guide. (#1882)
-- Move the `go.opentelemetry.io/otel/trace.TraceStateFromKeyValues` function to the `go.opentelemetry.io/otel/oteltest` package.
- This function is preserved for testing purposes where it may be useful to create a `TraceState` from `attribute.KeyValue`s, but it is not intended for production use.
- The new `ParseTraceState` function should be used to create a `TraceState`. (#1931)
-- Updated `MarshalJSON` method of the `go.opentelemetry.io/otel/trace.TraceState` type to marshal the type into the string representation of the `TraceState`. (#1931)
-- The `TraceState.Delete` method from the `go.opentelemetry.io/otel/trace` package no longer returns an error in addition to a `TraceState`. (#1931)
-- Updated `Get` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931)
-- Updated `Insert` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a pair of `string`s instead of an `attribute.KeyValue` type. (#1931)
-- Updated `Delete` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931)
-- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/stdout` package. (#1985)
-- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/metric/prometheus` package. (#1985)
-- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1985)
-- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1985)
-- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985)
-- Renamed `NewUnstartedExporter` to `NewUnstarted` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985)
-- The `go.opentelemetry.io/otel/semconv` package has been moved to `go.opentelemetry.io/otel/semconv/v1.4.0` to allow for multiple [telemetry schema](https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md) versions to be used concurrently. (#1987)
-- Metrics test helpers in `go.opentelemetry.io/otel/oteltest` have been moved to `go.opentelemetry.io/otel/metric/metrictest`. (#1988)
-
-### Deprecated
-
-- The `go.opentelemetry.io/otel/exporters/metric/prometheus` is deprecated, use `go.opentelemetry.io/otel/exporters/prometheus` instead. (#1993)
-- The `go.opentelemetry.io/otel/exporters/trace/jaeger` is deprecated, use `go.opentelemetry.io/otel/exporters/jaeger` instead. (#1993)
-- The `go.opentelemetry.io/otel/exporters/trace/zipkin` is deprecated, use `go.opentelemetry.io/otel/exporters/zipkin` instead. (#1993)
-
-### Removed
-
-- Removed `resource.WithoutBuiltin()`. Use `resource.New()`. (#1810)
-- Unexported types `resource.FromEnv`, `resource.Host`, and `resource.TelemetrySDK`, Use the corresponding `With*()` to use individually. (#1810)
-- Removed the `Tracer` and `IsRecording` method from the `ReadOnlySpan` in the `go.opentelemetry.io/otel/sdk/trace`.
- The `Tracer` method is not a required to be included in this interface and given the mutable nature of the tracer that is associated with a span, this method is not appropriate.
- The `IsRecording` method returns if the span is recording or not.
- A read-only span value does not need to know if updates to it will be recorded or not.
- By definition, it cannot be updated so there is no point in communicating if an update is recorded. (#1873)
-- Removed the `SpanSnapshot` type from the `go.opentelemetry.io/otel/sdk/trace` package.
- The use of this type has been replaced with the use of the explicitly immutable `ReadOnlySpan` type.
- When a concrete representation of a read-only span is needed for testing, the newly added `SpanStub` in the `go.opentelemetry.io/otel/sdk/trace/tracetest` package should be used. (#1873)
-- Removed the `Tracer` method from the `Span` interface in the `go.opentelemetry.io/otel/trace` package.
- Using the same tracer that created a span introduces the error where an instrumentation library's `Tracer` is used by other code instead of their own.
- The `"go.opentelemetry.io/otel".Tracer` function or a `TracerProvider` should be used to acquire a library specific `Tracer` instead. (#1900)
- - The `TracerProvider()` method on the `Span` interface may also be used to obtain a `TracerProvider` using the same trace processing pipeline. (#2009)
-- The `http.url` attribute generated by `HTTPClientAttributesFromHTTPRequest` will no longer include username or password information. (#1919)
-- Removed `IsEmpty` method of the `TraceState` type in the `go.opentelemetry.io/otel/trace` package in favor of using the added `TraceState.Len` method. (#1931)
-- Removed `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions in the `go.opentelemetry.io/otel/baggage` package.
- Handling of baggage is now done using the added `Baggage` type and related context functions (`ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext`) in that package. (#1967)
-- The `InstallNewPipeline` and `NewExportPipeline` creation functions in all the exporters (prometheus, otlp, stdout, jaeger, and zipkin) have been removed.
- These functions were deemed premature attempts to provide convenience that did not achieve this aim. (#1985)
-- The `go.opentelemetry.io/otel/exporters/otlp` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace` instead. (#1990)
-- The `go.opentelemetry.io/otel/exporters/stdout` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` or `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` instead. (#2005)
-
-### Fixed
-
-- Only report errors from the `"go.opentelemetry.io/otel/sdk/resource".Environment` function when they are not `nil`. (#1850, #1851)
-- The `Shutdown` method of the simple `SpanProcessor` in the `go.opentelemetry.io/otel/sdk/trace` package now honors the context deadline or cancellation. (#1616, #1856)
-- BatchSpanProcessor now drops span batches that failed to be exported. (#1860)
-- Use `http://localhost:14268/api/traces` as default Jaeger collector endpoint instead of `http://localhost:14250`. (#1898)
-- Allow trailing and leading whitespace in the parsing of a `tracestate` header. (#1931)
-- Add logic to determine if the channel is closed to fix Jaeger exporter test panic with close closed channel. (#1870, #1973)
-- Avoid transport security when OTLP endpoint is a Unix socket. (#2001)
-
-### Security
-
-## [0.20.0] - 2021-04-23
-
-### Added
-
-- The OTLP exporter now has two new convenience functions, `NewExportPipeline` and `InstallNewPipeline`, setup and install the exporter in tracing and metrics pipelines. (#1373)
-- Adds semantic conventions for exceptions. (#1492)
-- Added Jaeger Environment variables: `OTEL_EXPORTER_JAEGER_AGENT_HOST`, `OTEL_EXPORTER_JAEGER_AGENT_PORT`
- These environment variables can be used to override Jaeger agent hostname and port (#1752)
-- Option `ExportTimeout` was added to batch span processor. (#1755)
-- `trace.TraceFlags` is now a defined type over `byte` and `WithSampled(bool) TraceFlags` and `IsSampled() bool` methods have been added to it. (#1770)
-- The `Event` and `Link` struct types from the `go.opentelemetry.io/otel` package now include a `DroppedAttributeCount` field to record the number of attributes that were not recorded due to configured limits being reached. (#1771)
-- The Jaeger exporter now reports dropped attributes for a Span event in the exported log. (#1771)
-- Adds test to check BatchSpanProcessor ignores `OnEnd` and `ForceFlush` post `Shutdown`. (#1772)
-- Extract resource attributes from the `OTEL_RESOURCE_ATTRIBUTES` environment variable and merge them with the `resource.Default` resource as well as resources provided to the `TracerProvider` and metric `Controller`. (#1785)
-- Added `WithOSType` resource configuration option to set OS (Operating System) type resource attribute (`os.type`). (#1788)
-- Added `WithProcess*` resource configuration options to set Process resource attributes. (#1788)
- - `process.pid`
- - `process.executable.name`
- - `process.executable.path`
- - `process.command_args`
- - `process.owner`
- - `process.runtime.name`
- - `process.runtime.version`
- - `process.runtime.description`
-- Adds `k8s.node.name` and `k8s.node.uid` attribute keys to the `semconv` package. (#1789)
-- Added support for configuring OTLP/HTTP and OTLP/gRPC Endpoints, TLS Certificates, Headers, Compression and Timeout via Environment Variables. (#1758, #1769 and #1811)
- - `OTEL_EXPORTER_OTLP_ENDPOINT`
- - `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT`
- - `OTEL_EXPORTER_OTLP_METRICS_ENDPOINT`
- - `OTEL_EXPORTER_OTLP_HEADERS`
- - `OTEL_EXPORTER_OTLP_TRACES_HEADERS`
- - `OTEL_EXPORTER_OTLP_METRICS_HEADERS`
- - `OTEL_EXPORTER_OTLP_COMPRESSION`
- - `OTEL_EXPORTER_OTLP_TRACES_COMPRESSION`
- - `OTEL_EXPORTER_OTLP_METRICS_COMPRESSION`
- - `OTEL_EXPORTER_OTLP_TIMEOUT`
- - `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT`
- - `OTEL_EXPORTER_OTLP_METRICS_TIMEOUT`
- - `OTEL_EXPORTER_OTLP_CERTIFICATE`
- - `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE`
- - `OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE`
-- Adds `otlpgrpc.WithTimeout` option for configuring timeout to the otlp/gRPC exporter. (#1821)
-- Adds `jaeger.WithMaxPacketSize` option for configuring maximum UDP packet size used when connecting to the Jaeger agent. (#1853)
-
-### Fixed
-
-- The `Span.IsRecording` implementation from `go.opentelemetry.io/otel/sdk/trace` always returns false when not being sampled. (#1750)
-- The Jaeger exporter now correctly sets tags for the Span status code and message.
- This means it uses the correct tag keys (`"otel.status_code"`, `"otel.status_description"`) and does not set the status message as a tag unless it is set on the span. (#1761)
-- The Jaeger exporter now correctly records Span event's names using the `"event"` key for a tag.
- Additionally, this tag is overridden, as specified in the OTel specification, if the event contains an attribute with that key. (#1768)
-- Zipkin Exporter: Ensure mapping between OTel and Zipkin span data complies with the specification. (#1688)
-- Fixed typo for default service name in Jaeger Exporter. (#1797)
-- Fix flaky OTLP for the reconnnection of the client connection. (#1527, #1814)
-- Fix Jaeger exporter dropping of span batches that exceed the UDP packet size limit.
- Instead, the exporter now splits the batch into smaller sendable batches. (#1828)
-
-### Changed
-
-- Span `RecordError` now records an `exception` event to comply with the semantic convention specification. (#1492)
-- Jaeger exporter was updated to use thrift v0.14.1. (#1712)
-- Migrate from using internally built and maintained version of the OTLP to the one hosted at `go.opentelemetry.io/proto/otlp`. (#1713)
-- Migrate from using `github.com/gogo/protobuf` to `google.golang.org/protobuf` to match `go.opentelemetry.io/proto/otlp`. (#1713)
-- The storage of a local or remote Span in a `context.Context` using its SpanContext is unified to store just the current Span.
- The Span's SpanContext can now self-identify as being remote or not.
- This means that `"go.opentelemetry.io/otel/trace".ContextWithRemoteSpanContext` will now overwrite any existing current Span, not just existing remote Spans, and make it the current Span in a `context.Context`. (#1731)
-- Improve OTLP/gRPC exporter connection errors. (#1737)
-- Information about a parent span context in a `"go.opentelemetry.io/otel/export/trace".SpanSnapshot` is unified in a new `Parent` field.
- The existing `ParentSpanID` and `HasRemoteParent` fields are removed in favor of this. (#1748)
-- The `ParentContext` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is updated to hold a `context.Context` containing the parent span.
- This changes it to make `SamplingParameters` conform with the OpenTelemetry specification. (#1749)
-- Updated Jaeger Environment Variables: `JAEGER_ENDPOINT`, `JAEGER_USER`, `JAEGER_PASSWORD`
- to `OTEL_EXPORTER_JAEGER_ENDPOINT`, `OTEL_EXPORTER_JAEGER_USER`, `OTEL_EXPORTER_JAEGER_PASSWORD` in compliance with OTel specification. (#1752)
-- Modify `BatchSpanProcessor.ForceFlush` to abort after timeout/cancellation. (#1757)
-- The `DroppedAttributeCount` field of the `Span` in the `go.opentelemetry.io/otel` package now only represents the number of attributes dropped for the span itself.
- It no longer is a conglomerate of itself, events, and link attributes that have been dropped. (#1771)
-- Make `ExportSpans` in Jaeger Exporter honor context deadline. (#1773)
-- Modify Zipkin Exporter default service name, use default resource's serviceName instead of empty. (#1777)
-- The `go.opentelemetry.io/otel/sdk/export/trace` package is merged into the `go.opentelemetry.io/otel/sdk/trace` package. (#1778)
-- The prometheus.InstallNewPipeline example is moved from comment to example test (#1796)
-- The convenience functions for the stdout exporter have been updated to return the `TracerProvider` implementation and enable the shutdown of the exporter. (#1800)
-- Replace the flush function returned from the Jaeger exporter's convenience creation functions (`InstallNewPipeline` and `NewExportPipeline`) with the `TracerProvider` implementation they create.
- This enables the caller to shutdown and flush using the related `TracerProvider` methods. (#1822)
-- Updated the Jaeger exporter to have a default endpoint, `http://localhost:14250`, for the collector. (#1824)
-- Changed the function `WithCollectorEndpoint` in the Jaeger exporter to no longer accept an endpoint as an argument.
- The endpoint can be passed with the `CollectorEndpointOption` using the `WithEndpoint` function or by setting the `OTEL_EXPORTER_JAEGER_ENDPOINT` environment variable value appropriately. (#1824)
-- The Jaeger exporter no longer batches exported spans itself, instead it relies on the SDK's `BatchSpanProcessor` for this functionality. (#1830)
-- The Jaeger exporter creation functions (`NewRawExporter`, `NewExportPipeline`, and `InstallNewPipeline`) no longer accept the removed `Option` type as a variadic argument. (#1830)
-
-### Removed
-
-- Removed Jaeger Environment variables: `JAEGER_SERVICE_NAME`, `JAEGER_DISABLED`, `JAEGER_TAGS`
- These environment variables will no longer be used to override values of the Jaeger exporter (#1752)
-- No longer set the links for a `Span` in `go.opentelemetry.io/otel/sdk/trace` that is configured to be a new root.
- This is unspecified behavior that the OpenTelemetry community plans to standardize in the future.
- To prevent backwards incompatible changes when it is specified, these links are removed. (#1726)
-- Setting error status while recording error with Span from oteltest package. (#1729)
-- The concept of a remote and local Span stored in a context is unified to just the current Span.
- Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed.
- Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContext` can be used to return the current Span.
- If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731)
-- The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed.
- This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749)
-- The `trace.FlagsDebug` and `trace.FlagsDeferred` constants have been removed and will be localized to the B3 propagator. (#1770)
-- Remove `Process` configuration, `WithProcessFromEnv` and `ProcessFromEnv`, and type from the Jaeger exporter package.
- The information that could be configured in the `Process` struct should be configured in a `Resource` instead. (#1776, #1804)
-- Remove the `WithDisabled` option from the Jaeger exporter.
- To disable the exporter unregister it from the `TracerProvider` or use a no-operation `TracerProvider`. (#1806)
-- Removed the functions `CollectorEndpointFromEnv` and `WithCollectorEndpointOptionFromEnv` from the Jaeger exporter.
- These functions for retrieving specific environment variable values are redundant of other internal functions and
- are not intended for end user use. (#1824)
-- Removed the Jaeger exporter `WithSDKOptions` `Option`.
- This option was used to set SDK options for the exporter creation convenience functions.
- These functions are provided as a way to easily setup or install the exporter with what are deemed reasonable SDK settings for common use cases.
- If the SDK needs to be configured differently, the `NewRawExporter` function and direct setup of the SDK with the desired settings should be used. (#1825)
-- The `WithBufferMaxCount` and `WithBatchMaxCount` `Option`s from the Jaeger exporter are removed.
- The exporter no longer batches exports, instead relying on the SDK's `BatchSpanProcessor` for this functionality. (#1830)
-- The Jaeger exporter `Option` type is removed.
- The type is no longer used by the exporter to configure anything.
- All the previous configurations these options provided were duplicates of SDK configuration.
- They have been removed in favor of using the SDK configuration and focuses the exporter configuration to be only about the endpoints it will send telemetry to. (#1830)
-
-## [0.19.0] - 2021-03-18
-
-### Added
-
-- Added `Marshaler` config option to `otlphttp` to enable otlp over json or protobufs. (#1586)
-- A `ForceFlush` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` to flush all registered `SpanProcessor`s. (#1608)
-- Added `WithSampler` and `WithSpanLimits` to tracer provider. (#1633, #1702)
-- `"go.opentelemetry.io/otel/trace".SpanContext` now has a `remote` property, and `IsRemote()` predicate, that is true when the `SpanContext` has been extracted from remote context data. (#1701)
-- A `Valid` method to the `"go.opentelemetry.io/otel/attribute".KeyValue` type. (#1703)
-
-### Changed
-
-- `trace.SpanContext` is now immutable and has no exported fields. (#1573)
- - `trace.NewSpanContext()` can be used in conjunction with the `trace.SpanContextConfig` struct to initialize a new `SpanContext` where all values are known.
-- Update the `ForceFlush` method signature to the `"go.opentelemetry.io/otel/sdk/trace".SpanProcessor` to accept a `context.Context` and return an error. (#1608)
-- Update the `Shutdown` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` return an error on shutdown failure. (#1608)
-- The SimpleSpanProcessor will now shut down the enclosed `SpanExporter` and gracefully ignore subsequent calls to `OnEnd` after `Shutdown` is called. (#1612)
-- `"go.opentelemetry.io/sdk/metric/controller.basic".WithPusher` is replaced with `WithExporter` to provide consistent naming across project. (#1656)
-- Added non-empty string check for trace `Attribute` keys. (#1659)
-- Add `description` to SpanStatus only when `StatusCode` is set to error. (#1662)
-- Jaeger exporter falls back to `resource.Default`'s `service.name` if the exported Span does not have one. (#1673)
-- Jaeger exporter populates Jaeger's Span Process from Resource. (#1673)
-- Renamed the `LabelSet` method of `"go.opentelemetry.io/otel/sdk/resource".Resource` to `Set`. (#1692)
-- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1693)
-- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1693)
-
-### Removed
-
-- Removed `serviceName` parameter from Zipkin exporter and uses resource instead. (#1549)
-- Removed `WithConfig` from tracer provider to avoid overriding configuration. (#1633)
-- Removed the exported `SimpleSpanProcessor` and `BatchSpanProcessor` structs.
- These are now returned as a SpanProcessor interface from their respective constructors. (#1638)
-- Removed `WithRecord()` from `trace.SpanOption` when creating a span. (#1660)
-- Removed setting status to `Error` while recording an error as a span event in `RecordError`. (#1663)
-- Removed `jaeger.WithProcess` configuration option. (#1673)
-- Removed `ApplyConfig` method from `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` and the now unneeded `Config` struct. (#1693)
-
-### Fixed
-
-- Jaeger Exporter: Ensure mapping between OTEL and Jaeger span data complies with the specification. (#1626)
-- `SamplingResult.TraceState` is correctly propagated to a newly created span's `SpanContext`. (#1655)
-- The `otel-collector` example now correctly flushes metric events prior to shutting down the exporter. (#1678)
-- Do not set span status message in `SpanStatusFromHTTPStatusCode` if it can be inferred from `http.status_code`. (#1681)
-- Synchronization issues in global trace delegate implementation. (#1686)
-- Reduced excess memory usage by global `TracerProvider`. (#1687)
-
-## [0.18.0] - 2021-03-03
-
-### Added
-
-- Added `resource.Default()` for use with meter and tracer providers. (#1507)
-- `AttributePerEventCountLimit` and `AttributePerLinkCountLimit` for `SpanLimits`. (#1535)
-- Added `Keys()` method to `propagation.TextMapCarrier` and `propagation.HeaderCarrier` to adapt `http.Header` to this interface. (#1544)
-- Added `code` attributes to `go.opentelemetry.io/otel/semconv` package. (#1558)
-- Compatibility testing suite in the CI system for the following systems. (#1567)
- | OS | Go Version | Architecture |
- | ------- | ---------- | ------------ |
- | Ubuntu | 1.15 | amd64 |
- | Ubuntu | 1.14 | amd64 |
- | Ubuntu | 1.15 | 386 |
- | Ubuntu | 1.14 | 386 |
- | MacOS | 1.15 | amd64 |
- | MacOS | 1.14 | amd64 |
- | Windows | 1.15 | amd64 |
- | Windows | 1.14 | amd64 |
- | Windows | 1.15 | 386 |
- | Windows | 1.14 | 386 |
-
-### Changed
-
-- Replaced interface `oteltest.SpanRecorder` with its existing implementation
- `StandardSpanRecorder`. (#1542)
-- Default span limit values to 128. (#1535)
-- Rename `MaxEventsPerSpan`, `MaxAttributesPerSpan` and `MaxLinksPerSpan` to `EventCountLimit`, `AttributeCountLimit` and `LinkCountLimit`, and move these fields into `SpanLimits`. (#1535)
-- Renamed the `otel/label` package to `otel/attribute`. (#1541)
-- Vendor the Jaeger exporter's dependency on Apache Thrift. (#1551)
-- Parallelize the CI linting and testing. (#1567)
-- Stagger timestamps in exact aggregator tests. (#1569)
-- Changed all examples to use `WithBatchTimeout(5 * time.Second)` rather than `WithBatchTimeout(5)`. (#1621)
-- Prevent end-users from implementing some interfaces (#1575)
-
- ```
- "otel/exporters/otlp/otlphttp".Option
- "otel/exporters/stdout".Option
- "otel/oteltest".Option
- "otel/trace".TracerOption
- "otel/trace".SpanOption
- "otel/trace".EventOption
- "otel/trace".LifeCycleOption
- "otel/trace".InstrumentationOption
- "otel/sdk/resource".Option
- "otel/sdk/trace".ParentBasedSamplerOption
- "otel/sdk/trace".ReadOnlySpan
- "otel/sdk/trace".ReadWriteSpan
- ```
-
-### Removed
-
-- Removed attempt to resample spans upon changing the span name with `span.SetName()`. (#1545)
-- The `test-benchmark` is no longer a dependency of the `precommit` make target. (#1567)
-- Removed the `test-386` make target.
- This was replaced with a full compatibility testing suite (i.e. multi OS/arch) in the CI system. (#1567)
-
-### Fixed
-
-- The sequential timing check of timestamps in the stdout exporter are now setup explicitly to be sequential (#1571). (#1572)
-- Windows build of Jaeger tests now compiles with OS specific functions (#1576). (#1577)
-- The sequential timing check of timestamps of go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue are now setup explicitly to be sequential (#1578). (#1579)
-- Validate tracestate header keys with vendors according to the W3C TraceContext specification (#1475). (#1581)
-- The OTLP exporter includes related labels for translations of a GaugeArray (#1563). (#1570)
-
-## [0.17.0] - 2021-02-12
-
-### Changed
-
-- Rename project default branch from `master` to `main`. (#1505)
-- Reverse order in which `Resource` attributes are merged, per change in spec. (#1501)
-- Add tooling to maintain "replace" directives in go.mod files automatically. (#1528)
-- Create new modules: otel/metric, otel/trace, otel/oteltest, otel/sdk/export/metric, otel/sdk/metric (#1528)
-- Move metric-related public global APIs from otel to otel/metric/global. (#1528)
-
-## Fixed
-
-- Fixed otlpgrpc reconnection issue.
-- The example code in the README.md of `go.opentelemetry.io/otel/exporters/otlp` is moved to a compiled example test and used the new `WithAddress` instead of `WithEndpoint`. (#1513)
-- The otel-collector example now uses the default OTLP receiver port of the collector.
-
-## [0.16.0] - 2021-01-13
-
-### Added
-
-- Add the `ReadOnlySpan` and `ReadWriteSpan` interfaces to provide better control for accessing span data. (#1360)
-- `NewGRPCDriver` function returns a `ProtocolDriver` that maintains a single gRPC connection to the collector. (#1369)
-- Added documentation about the project's versioning policy. (#1388)
-- Added `NewSplitDriver` for OTLP exporter that allows sending traces and metrics to different endpoints. (#1418)
-- Added codeql workflow to GitHub Actions (#1428)
-- Added Gosec workflow to GitHub Actions (#1429)
-- Add new HTTP driver for OTLP exporter in `exporters/otlp/otlphttp`. Currently it only supports the binary protobuf payloads. (#1420)
-- Add an OpenCensus exporter bridge. (#1444)
-
-### Changed
-
-- Rename `internal/testing` to `internal/internaltest`. (#1449)
-- Rename `export.SpanData` to `export.SpanSnapshot` and use it only for exporting spans. (#1360)
-- Store the parent's full `SpanContext` rather than just its span ID in the `span` struct. (#1360)
-- Improve span duration accuracy. (#1360)
-- Migrated CI/CD from CircleCI to GitHub Actions (#1382)
-- Remove duplicate checkout from GitHub Actions workflow (#1407)
-- Metric `array` aggregator renamed `exact` to match its `aggregation.Kind` (#1412)
-- Metric `exact` aggregator includes per-point timestamps (#1412)
-- Metric stdout exporter uses MinMaxSumCount aggregator for ValueRecorder instruments (#1412)
-- `NewExporter` from `exporters/otlp` now takes a `ProtocolDriver` as a parameter. (#1369)
-- Many OTLP Exporter options became gRPC ProtocolDriver options. (#1369)
-- Unify endpoint API that related to OTel exporter. (#1401)
-- Optimize metric histogram aggregator to reuse its slice of buckets. (#1435)
-- Metric aggregator Count() and histogram Bucket.Counts are consistently `uint64`. (1430)
-- Histogram aggregator accepts functional options, uses default boundaries if none given. (#1434)
-- `SamplingResult` now passed a `Tracestate` from the parent `SpanContext` (#1432)
-- Moved gRPC driver for OTLP exporter to `exporters/otlp/otlpgrpc`. (#1420)
-- The `TraceContext` propagator now correctly propagates `TraceState` through the `SpanContext`. (#1447)
-- Metric Push and Pull Controller components are combined into a single "basic" Controller:
- - `WithExporter()` and `Start()` to configure Push behavior
- - `Start()` is optional; use `Collect()` and `ForEach()` for Pull behavior
- - `Start()` and `Stop()` accept Context. (#1378)
-- The `Event` type is moved from the `otel/sdk/export/trace` package to the `otel/trace` API package. (#1452)
-
-### Removed
-
-- Remove `errUninitializedSpan` as its only usage is now obsolete. (#1360)
-- Remove Metric export functionality related to quantiles and summary data points: this is not specified (#1412)
-- Remove DDSketch metric aggregator; our intention is to re-introduce this as an option of the histogram aggregator after [new OTLP histogram data types](https://github.com/open-telemetry/opentelemetry-proto/pull/226) are released (#1412)
-
-### Fixed
-
-- `BatchSpanProcessor.Shutdown()` will now shutdown underlying `export.SpanExporter`. (#1443)
-
-## [0.15.0] - 2020-12-10
-
-### Added
-
-- The `WithIDGenerator` `TracerProviderOption` is added to the `go.opentelemetry.io/otel/trace` package to configure an `IDGenerator` for the `TracerProvider`. (#1363)
-
-### Changed
-
-- The Zipkin exporter now uses the Span status code to determine. (#1328)
-- `NewExporter` and `Start` functions in `go.opentelemetry.io/otel/exporters/otlp` now receive `context.Context` as a first parameter. (#1357)
-- Move the OpenCensus example into `example` directory. (#1359)
-- Moved the SDK's `internal.IDGenerator` interface in to the `sdk/trace` package to enable support for externally-defined ID generators. (#1363)
-- Bump `github.com/google/go-cmp` from 0.5.3 to 0.5.4 (#1374)
-- Bump `github.com/golangci/golangci-lint` in `/internal/tools` (#1375)
-
-### Fixed
-
-- Metric SDK `SumObserver` and `UpDownSumObserver` instruments correctness fixes. (#1381)
-
-## [0.14.0] - 2020-11-19
-
-### Added
-
-- An `EventOption` and the related `NewEventConfig` function are added to the `go.opentelemetry.io/otel` package to configure Span events. (#1254)
-- A `TextMapPropagator` and associated `TextMapCarrier` are added to the `go.opentelemetry.io/otel/oteltest` package to test `TextMap` type propagators and their use. (#1259)
-- `SpanContextFromContext` returns `SpanContext` from context. (#1255)
-- `TraceState` has been added to `SpanContext`. (#1340)
-- `DeploymentEnvironmentKey` added to `go.opentelemetry.io/otel/semconv` package. (#1323)
-- Add an OpenCensus to OpenTelemetry tracing bridge. (#1305)
-- Add a parent context argument to `SpanProcessor.OnStart` to follow the specification. (#1333)
-- Add missing tests for `sdk/trace/attributes_map.go`. (#1337)
-
-### Changed
-
-- Move the `go.opentelemetry.io/otel/api/trace` package into `go.opentelemetry.io/otel/trace` with the following changes. (#1229) (#1307)
- - `ID` has been renamed to `TraceID`.
- - `IDFromHex` has been renamed to `TraceIDFromHex`.
- - `EmptySpanContext` is removed.
-- Move the `go.opentelemetry.io/otel/api/trace/tracetest` package into `go.opentelemetry.io/otel/oteltest`. (#1229)
-- OTLP Exporter updates:
- - supports OTLP v0.6.0 (#1230, #1354)
- - supports configurable aggregation temporality (default: Cumulative, optional: Stateless). (#1296)
-- The Sampler is now called on local child spans. (#1233)
-- The `Kind` type from the `go.opentelemetry.io/otel/api/metric` package was renamed to `InstrumentKind` to more specifically describe what it is and avoid semantic ambiguity. (#1240)
-- The `MetricKind` method of the `Descriptor` type in the `go.opentelemetry.io/otel/api/metric` package was renamed to `Descriptor.InstrumentKind`.
- This matches the returned type and fixes misuse of the term metric. (#1240)
-- Move test harness from the `go.opentelemetry.io/otel/api/apitest` package into `go.opentelemetry.io/otel/oteltest`. (#1241)
-- Move the `go.opentelemetry.io/otel/api/metric/metrictest` package into `go.opentelemetry.io/oteltest` as part of #964. (#1252)
-- Move the `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric` as part of #1303. (#1321)
-- Move the `go.opentelemetry.io/otel/api/metric/registry` package into `go.opentelemetry.io/otel/metric/registry` as a part of #1303. (#1316)
-- Move the `Number` type (together with related functions) from `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric/number` as a part of #1303. (#1316)
-- The function signature of the Span `AddEvent` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required name and a variable number of `EventOption`s. (#1254)
-- The function signature of the Span `RecordError` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required error value and a variable number of `EventOption`s. (#1254)
-- Move the `go.opentelemetry.io/otel/api/global` package to `go.opentelemetry.io/otel`. (#1262) (#1330)
-- Move the `Version` function from `go.opentelemetry.io/otel/sdk` to `go.opentelemetry.io/otel`. (#1330)
-- Rename correlation context header from `"otcorrelations"` to `"baggage"` to match the OpenTelemetry specification. (#1267)
-- Fix `Code.UnmarshalJSON` to work with valid JSON only. (#1276)
-- The `resource.New()` method changes signature to support builtin attributes and functional options, including `telemetry.sdk.*` and
- `host.name` semantic conventions; the former method is renamed `resource.NewWithAttributes`. (#1235)
-- The Prometheus exporter now exports non-monotonic counters (i.e. `UpDownCounter`s) as gauges. (#1210)
-- Correct the `Span.End` method documentation in the `otel` API to state updates are not allowed on a span after it has ended. (#1310)
-- Updated span collection limits for attribute, event and link counts to 1000 (#1318)
-- Renamed `semconv.HTTPUrlKey` to `semconv.HTTPURLKey`. (#1338)
-
-### Removed
-
-- The `ErrInvalidHexID`, `ErrInvalidTraceIDLength`, `ErrInvalidSpanIDLength`, `ErrInvalidSpanIDLength`, or `ErrNilSpanID` from the `go.opentelemetry.io/otel` package are unexported now. (#1243)
-- The `AddEventWithTimestamp` method on the `Span` interface in `go.opentelemetry.io/otel` is removed due to its redundancy.
- It is replaced by using the `AddEvent` method with a `WithTimestamp` option. (#1254)
-- The `MockSpan` and `MockTracer` types are removed from `go.opentelemetry.io/otel/oteltest`.
- `Tracer` and `Span` from the same module should be used in their place instead. (#1306)
-- `WorkerCount` option is removed from `go.opentelemetry.io/otel/exporters/otlp`. (#1350)
-- Remove the following labels types: INT32, UINT32, UINT64 and FLOAT32. (#1314)
-
-### Fixed
-
-- Rename `MergeItererator` to `MergeIterator` in the `go.opentelemetry.io/otel/label` package. (#1244)
-- The `go.opentelemetry.io/otel/api/global` packages global TextMapPropagator now delegates functionality to a globally set delegate for all previously returned propagators. (#1258)
-- Fix condition in `label.Any`. (#1299)
-- Fix global `TracerProvider` to pass options to its configured provider. (#1329)
-- Fix missing handler for `ExactKind` aggregator in OTLP metrics transformer (#1309)
-
-## [0.13.0] - 2020-10-08
-
-### Added
-
-- OTLP Metric exporter supports Histogram aggregation. (#1209)
-- The `Code` struct from the `go.opentelemetry.io/otel/codes` package now supports JSON marshaling and unmarshaling as well as implements the `Stringer` interface. (#1214)
-- A Baggage API to implement the OpenTelemetry specification. (#1217)
-- Add Shutdown method to sdk/trace/provider, shutdown processors in the order they were registered. (#1227)
-
-### Changed
-
-- Set default propagator to no-op propagator. (#1184)
-- The `HTTPSupplier`, `HTTPExtractor`, `HTTPInjector`, and `HTTPPropagator` from the `go.opentelemetry.io/otel/api/propagation` package were replaced with unified `TextMapCarrier` and `TextMapPropagator` in the `go.opentelemetry.io/otel/propagation` package. (#1212) (#1325)
-- The `New` function from the `go.opentelemetry.io/otel/api/propagation` package was replaced with `NewCompositeTextMapPropagator` in the `go.opentelemetry.io/otel` package. (#1212)
-- The status codes of the `go.opentelemetry.io/otel/codes` package have been updated to match the latest OpenTelemetry specification.
- They now are `Unset`, `Error`, and `Ok`.
- They no longer track the gRPC codes. (#1214)
-- The `StatusCode` field of the `SpanData` struct in the `go.opentelemetry.io/otel/sdk/export/trace` package now uses the codes package from this package instead of the gRPC project. (#1214)
-- Move the `go.opentelemetry.io/otel/api/baggage` package into `go.opentelemetry.io/otel/baggage`. (#1217) (#1325)
-- A `Shutdown` method of `SpanProcessor` and all its implementations receives a context and returns an error. (#1264)
-
-### Fixed
-
-- Copies of data from arrays and slices passed to `go.opentelemetry.io/otel/label.ArrayValue()` are now used in the returned `Value` instead of using the mutable data itself. (#1226)
-
-### Removed
-
-- The `ExtractHTTP` and `InjectHTTP` functions from the `go.opentelemetry.io/otel/api/propagation` package were removed. (#1212)
-- The `Propagators` interface from the `go.opentelemetry.io/otel/api/propagation` package was removed to conform to the OpenTelemetry specification.
- The explicit `TextMapPropagator` type can be used in its place as this is the `Propagator` type the specification defines. (#1212)
-- The `SetAttribute` method of the `Span` from the `go.opentelemetry.io/otel/api/trace` package was removed given its redundancy with the `SetAttributes` method. (#1216)
-- The internal implementation of Baggage storage is removed in favor of using the new Baggage API functionality. (#1217)
-- Remove duplicate hostname key `HostHostNameKey` in Resource semantic conventions. (#1219)
-- Nested array/slice support has been removed. (#1226)
-
-## [0.12.0] - 2020-09-24
-
-### Added
-
-- A `SpanConfigure` function in `go.opentelemetry.io/otel/api/trace` to create a new `SpanConfig` from `SpanOption`s. (#1108)
-- In the `go.opentelemetry.io/otel/api/trace` package, `NewTracerConfig` was added to construct new `TracerConfig`s.
- This addition was made to conform with our project option conventions. (#1155)
-- Instrumentation library information was added to the Zipkin exporter. (#1119)
-- The `SpanProcessor` interface now has a `ForceFlush()` method. (#1166)
-- More semantic conventions for k8s as resource attributes. (#1167)
-
-### Changed
-
-- Add reconnecting udp connection type to Jaeger exporter.
- This change adds a new optional implementation of the udp conn interface used to detect changes to an agent's host dns record.
- It then adopts the new destination address to ensure the exporter doesn't get stuck. This change was ported from jaegertracing/jaeger-client-go#520. (#1063)
-- Replace `StartOption` and `EndOption` in `go.opentelemetry.io/otel/api/trace` with `SpanOption`.
- This change is matched by replacing the `StartConfig` and `EndConfig` with a unified `SpanConfig`. (#1108)
-- Replace the `LinkedTo` span option in `go.opentelemetry.io/otel/api/trace` with `WithLinks`.
- This is be more consistent with our other option patterns, i.e. passing the item to be configured directly instead of its component parts, and provides a cleaner function signature. (#1108)
-- The `go.opentelemetry.io/otel/api/trace` `TracerOption` was changed to an interface to conform to project option conventions. (#1109)
-- Move the `B3` and `TraceContext` from within the `go.opentelemetry.io/otel/api/trace` package to their own `go.opentelemetry.io/otel/propagators` package.
- This removal of the propagators is reflective of the OpenTelemetry specification for these propagators as well as cleans up the `go.opentelemetry.io/otel/api/trace` API. (#1118)
-- Rename Jaeger tags used for instrumentation library information to reflect changes in OpenTelemetry specification. (#1119)
-- Rename `ProbabilitySampler` to `TraceIDRatioBased` and change semantics to ignore parent span sampling status. (#1115)
-- Move `tools` package under `internal`. (#1141)
-- Move `go.opentelemetry.io/otel/api/correlation` package to `go.opentelemetry.io/otel/api/baggage`. (#1142)
- The `correlation.CorrelationContext` propagator has been renamed `baggage.Baggage`. Other exported functions and types are unchanged.
-- Rename `ParentOrElse` sampler to `ParentBased` and allow setting samplers depending on parent span. (#1153)
-- In the `go.opentelemetry.io/otel/api/trace` package, `SpanConfigure` was renamed to `NewSpanConfig`. (#1155)
-- Change `dependabot.yml` to add a `Skip Changelog` label to dependabot-sourced PRs. (#1161)
-- The [configuration style guide](https://github.com/open-telemetry/opentelemetry-go/blob/master/CONTRIBUTING.md#config) has been updated to
- recommend the use of `newConfig()` instead of `configure()`. (#1163)
-- The `otlp.Config` type has been unexported and changed to `otlp.config`, along with its initializer. (#1163)
-- Ensure exported interface types include parameter names and update the
- Style Guide to reflect this styling rule. (#1172)
-- Don't consider unset environment variable for resource detection to be an error. (#1170)
-- Rename `go.opentelemetry.io/otel/api/metric.ConfigureInstrument` to `NewInstrumentConfig` and
- `go.opentelemetry.io/otel/api/metric.ConfigureMeter` to `NewMeterConfig`.
-- ValueObserver instruments use LastValue aggregator by default. (#1165)
-- OTLP Metric exporter supports LastValue aggregation. (#1165)
-- Move the `go.opentelemetry.io/otel/api/unit` package to `go.opentelemetry.io/otel/unit`. (#1185)
-- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190)
-- Rename `NoopProvider` to `NoopMeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190)
-- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metric/metrictest` package. (#1190)
-- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric/registry` package. (#1190)
-- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metri/registryc` package. (#1190)
-- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190)
-- Rename `NoopProvider` to `NoopTracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190)
-- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190)
-- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190)
-- Rename `WrapperProvider` to `WrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190)
-- Rename `NewWrapperProvider` to `NewWrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190)
-- Rename `Provider` method of the pull controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/pull` package. (#1190)
-- Rename `Provider` method of the push controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/push` package. (#1190)
-- Rename `ProviderOptions` to `TracerProviderConfig` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190)
-- Rename `ProviderOption` to `TracerProviderOption` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190)
-- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190)
-- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190)
-- Renamed `SamplingDecision` values to comply with OpenTelemetry specification change. (#1192)
-- Renamed Zipkin attribute names from `ot.status_code & ot.status_description` to `otel.status_code & otel.status_description`. (#1201)
-- The default SDK now invokes registered `SpanProcessor`s in the order they were registered with the `TracerProvider`. (#1195)
-- Add test of spans being processed by the `SpanProcessor`s in the order they were registered. (#1203)
-
-### Removed
-
-- Remove the B3 propagator from `go.opentelemetry.io/otel/propagators`. It is now located in the
- `go.opentelemetry.io/contrib/propagators/` module. (#1191)
-- Remove the semantic convention for HTTP status text, `HTTPStatusTextKey` from package `go.opentelemetry.io/otel/semconv`. (#1194)
-
-### Fixed
-
-- Zipkin example no longer mentions `ParentSampler`, corrected to `ParentBased`. (#1171)
-- Fix missing shutdown processor in otel-collector example. (#1186)
-- Fix missing shutdown processor in basic and namedtracer examples. (#1197)
-
-## [0.11.0] - 2020-08-24
-
-### Added
-
-- Support for exporting array-valued attributes via OTLP. (#992)
-- `Noop` and `InMemory` `SpanBatcher` implementations to help with testing integrations. (#994)
-- Support for filtering metric label sets. (#1047)
-- A dimensionality-reducing metric Processor. (#1057)
-- Integration tests for more OTel Collector Attribute types. (#1062)
-- A new `WithSpanProcessor` `ProviderOption` is added to the `go.opentelemetry.io/otel/sdk/trace` package to create a `Provider` and automatically register the `SpanProcessor`. (#1078)
-
-### Changed
-
-- Rename `sdk/metric/processor/test` to `sdk/metric/processor/processortest`. (#1049)
-- Rename `sdk/metric/controller/test` to `sdk/metric/controller/controllertest`. (#1049)
-- Rename `api/testharness` to `api/apitest`. (#1049)
-- Rename `api/trace/testtrace` to `api/trace/tracetest`. (#1049)
-- Change Metric Processor to merge multiple observations. (#1024)
-- The `go.opentelemetry.io/otel/bridge/opentracing` bridge package has been made into its own module.
- This removes the package dependencies of this bridge from the rest of the OpenTelemetry based project. (#1038)
-- Renamed `go.opentelemetry.io/otel/api/standard` package to `go.opentelemetry.io/otel/semconv` to avoid the ambiguous and generic name `standard` and better describe the package as containing OpenTelemetry semantic conventions. (#1016)
-- The environment variable used for resource detection has been changed from `OTEL_RESOURCE_LABELS` to `OTEL_RESOURCE_ATTRIBUTES` (#1042)
-- Replace `WithSyncer` with `WithBatcher` in examples. (#1044)
-- Replace the `google.golang.org/grpc/codes` dependency in the API with an equivalent `go.opentelemetry.io/otel/codes` package. (#1046)
-- Merge the `go.opentelemetry.io/otel/api/label` and `go.opentelemetry.io/otel/api/kv` into the new `go.opentelemetry.io/otel/label` package. (#1060)
-- Unify Callback Function Naming.
- Rename `*Callback` with `*Func`. (#1061)
-- CI builds validate against last two versions of Go, dropping 1.13 and adding 1.15. (#1064)
-- The `go.opentelemetry.io/otel/sdk/export/trace` interfaces `SpanSyncer` and `SpanBatcher` have been replaced with a specification compliant `Exporter` interface.
- This interface still supports the export of `SpanData`, but only as a slice.
- Implementation are also required now to return any error from `ExportSpans` if one occurs as well as implement a `Shutdown` method for exporter clean-up. (#1078)
-- The `go.opentelemetry.io/otel/sdk/trace` `NewBatchSpanProcessor` function no longer returns an error.
- If a `nil` exporter is passed as an argument to this function, instead of it returning an error, it now returns a `BatchSpanProcessor` that handles the export of `SpanData` by not taking any action. (#1078)
-- The `go.opentelemetry.io/otel/sdk/trace` `NewProvider` function to create a `Provider` no longer returns an error, instead only a `*Provider`.
- This change is related to `NewBatchSpanProcessor` not returning an error which was the only error this function would return. (#1078)
-
-### Removed
-
-- Duplicate, unused API sampler interface. (#999)
- Use the [`Sampler` interface](https://github.com/open-telemetry/opentelemetry-go/blob/v0.11.0/sdk/trace/sampling.go) provided by the SDK instead.
-- The `grpctrace` instrumentation was moved to the `go.opentelemetry.io/contrib` repository and out of this repository.
- This move includes moving the `grpc` example to the `go.opentelemetry.io/contrib` as well. (#1027)
-- The `WithSpan` method of the `Tracer` interface.
- The functionality this method provided was limited compared to what a user can provide themselves.
- It was removed with the understanding that if there is sufficient user need it can be added back based on actual user usage. (#1043)
-- The `RegisterSpanProcessor` and `UnregisterSpanProcessor` functions.
- These were holdovers from an approach prior to the TracerProvider design. They were not used anymore. (#1077)
-- The `oterror` package. (#1026)
-- The `othttp` and `httptrace` instrumentations were moved to `go.opentelemetry.io/contrib`. (#1032)
-
-### Fixed
-
-- The `semconv.HTTPServerMetricAttributesFromHTTPRequest()` function no longer generates the high-cardinality `http.request.content.length` label. (#1031)
-- Correct instrumentation version tag in Jaeger exporter. (#1037)
-- The SDK span will now set an error event if the `End` method is called during a panic (i.e. it was deferred). (#1043)
-- Move internally generated protobuf code from the `go.opentelemetry.io/otel` to the OTLP exporter to reduce dependency overhead. (#1050)
-- The `otel-collector` example referenced outdated collector processors. (#1006)
-
-## [0.10.0] - 2020-07-29
-
-This release migrates the default OpenTelemetry SDK into its own Go module, decoupling the SDK from the API and reducing dependencies for instrumentation packages.
-
-### Added
-
-- The Zipkin exporter now has `NewExportPipeline` and `InstallNewPipeline` constructor functions to match the common pattern.
- These function build a new exporter with default SDK options and register the exporter with the `global` package respectively. (#944)
-- Add propagator option for gRPC instrumentation. (#986)
-- The `testtrace` package now tracks the `trace.SpanKind` for each span. (#987)
-
-### Changed
-
-- Replace the `RegisterGlobal` `Option` in the Jaeger exporter with an `InstallNewPipeline` constructor function.
- This matches the other exporter constructor patterns and will register a new exporter after building it with default configuration. (#944)
-- The trace (`go.opentelemetry.io/otel/exporters/trace/stdout`) and metric (`go.opentelemetry.io/otel/exporters/metric/stdout`) `stdout` exporters are now merged into a single exporter at `go.opentelemetry.io/otel/exporters/stdout`.
- This new exporter was made into its own Go module to follow the pattern of all exporters and decouple it from the `go.opentelemetry.io/otel` module. (#956, #963)
-- Move the `go.opentelemetry.io/otel/exporters/test` test package to `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#962)
-- The `go.opentelemetry.io/otel/api/kv/value` package was merged into the parent `go.opentelemetry.io/otel/api/kv` package. (#968)
- - `value.Bool` was replaced with `kv.BoolValue`.
- - `value.Int64` was replaced with `kv.Int64Value`.
- - `value.Uint64` was replaced with `kv.Uint64Value`.
- - `value.Float64` was replaced with `kv.Float64Value`.
- - `value.Int32` was replaced with `kv.Int32Value`.
- - `value.Uint32` was replaced with `kv.Uint32Value`.
- - `value.Float32` was replaced with `kv.Float32Value`.
- - `value.String` was replaced with `kv.StringValue`.
- - `value.Int` was replaced with `kv.IntValue`.
- - `value.Uint` was replaced with `kv.UintValue`.
- - `value.Array` was replaced with `kv.ArrayValue`.
-- Rename `Infer` to `Any` in the `go.opentelemetry.io/otel/api/kv` package. (#972)
-- Change `othttp` to use the `httpsnoop` package to wrap the `ResponseWriter` so that optional interfaces (`http.Hijacker`, `http.Flusher`, etc.) that are implemented by the original `ResponseWriter`are also implemented by the wrapped `ResponseWriter`. (#979)
-- Rename `go.opentelemetry.io/otel/sdk/metric/aggregator/test` package to `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest`. (#980)
-- Make the SDK into its own Go module called `go.opentelemetry.io/otel/sdk`. (#985)
-- Changed the default trace `Sampler` from `AlwaysOn` to `ParentOrElse(AlwaysOn)`. (#989)
-
-### Removed
-
-- The `IndexedAttribute` function from the `go.opentelemetry.io/otel/api/label` package was removed in favor of `IndexedLabel` which it was synonymous with. (#970)
-
-### Fixed
-
-- Bump github.com/golangci/golangci-lint from 1.28.3 to 1.29.0 in /tools. (#953)
-- Bump github.com/google/go-cmp from 0.5.0 to 0.5.1. (#957)
-- Use `global.Handle` for span export errors in the OTLP exporter. (#946)
-- Correct Go language formatting in the README documentation. (#961)
-- Remove default SDK dependencies from the `go.opentelemetry.io/otel/api` package. (#977)
-- Remove default SDK dependencies from the `go.opentelemetry.io/otel/instrumentation` package. (#983)
-- Move documented examples for `go.opentelemetry.io/otel/instrumentation/grpctrace` interceptors into Go example tests. (#984)
-
-## [0.9.0] - 2020-07-20
-
-### Added
-
-- A new Resource Detector interface is included to allow resources to be automatically detected and included. (#939)
-- A Detector to automatically detect resources from an environment variable. (#939)
-- Github action to generate protobuf Go bindings locally in `internal/opentelemetry-proto-gen`. (#938)
-- OTLP .proto files from `open-telemetry/opentelemetry-proto` imported as a git submodule under `internal/opentelemetry-proto`.
- References to `github.com/open-telemetry/opentelemetry-proto` changed to `go.opentelemetry.io/otel/internal/opentelemetry-proto-gen`. (#942)
-
-### Changed
-
-- Non-nil value `struct`s for key-value pairs will be marshalled using JSON rather than `Sprintf`. (#948)
-
-### Removed
-
-- Removed dependency on `github.com/open-telemetry/opentelemetry-collector`. (#943)
-
-## [0.8.0] - 2020-07-09
-
-### Added
-
-- The `B3Encoding` type to represent the B3 encoding(s) the B3 propagator can inject.
- A value for HTTP supported encodings (Multiple Header: `MultipleHeader`, Single Header: `SingleHeader`) are included. (#882)
-- The `FlagsDeferred` trace flag to indicate if the trace sampling decision has been deferred. (#882)
-- The `FlagsDebug` trace flag to indicate if the trace is a debug trace. (#882)
-- Add `peer.service` semantic attribute. (#898)
-- Add database-specific semantic attributes. (#899)
-- Add semantic convention for `faas.coldstart` and `container.id`. (#909)
-- Add http content size semantic conventions. (#905)
-- Include `http.request_content_length` in HTTP request basic attributes. (#905)
-- Add semantic conventions for operating system process resource attribute keys. (#919)
-- The Jaeger exporter now has a `WithBatchMaxCount` option to specify the maximum number of spans sent in a batch. (#931)
-
-### Changed
-
-- Update `CONTRIBUTING.md` to ask for updates to `CHANGELOG.md` with each pull request. (#879)
-- Use lowercase header names for B3 Multiple Headers. (#881)
-- The B3 propagator `SingleHeader` field has been replaced with `InjectEncoding`.
- This new field can be set to combinations of the `B3Encoding` bitmasks and will inject trace information in these encodings.
- If no encoding is set, the propagator will default to `MultipleHeader` encoding. (#882)
-- The B3 propagator now extracts from either HTTP encoding of B3 (Single Header or Multiple Header) based on what is contained in the header.
- Preference is given to Single Header encoding with Multiple Header being the fallback if Single Header is not found or is invalid.
- This behavior change is made to dynamically support all correctly encoded traces received instead of having to guess the expected encoding prior to receiving. (#882)
-- Extend semantic conventions for RPC. (#900)
-- To match constant naming conventions in the `api/standard` package, the `FaaS*` key names are appended with a suffix of `Key`. (#920)
- - `"api/standard".FaaSName` -> `FaaSNameKey`
- - `"api/standard".FaaSID` -> `FaaSIDKey`
- - `"api/standard".FaaSVersion` -> `FaaSVersionKey`
- - `"api/standard".FaaSInstance` -> `FaaSInstanceKey`
-
-### Removed
-
-- The `FlagsUnused` trace flag is removed.
- The purpose of this flag was to act as the inverse of `FlagsSampled`, the inverse of `FlagsSampled` is used instead. (#882)
-- The B3 header constants (`B3SingleHeader`, `B3DebugFlagHeader`, `B3TraceIDHeader`, `B3SpanIDHeader`, `B3SampledHeader`, `B3ParentSpanIDHeader`) are removed.
- If B3 header keys are needed [the authoritative OpenZipkin package constants](https://pkg.go.dev/github.com/openzipkin/zipkin-go@v0.2.2/propagation/b3?tab=doc#pkg-constants) should be used instead. (#882)
-
-### Fixed
-
-- The B3 Single Header name is now correctly `b3` instead of the previous `X-B3`. (#881)
-- The B3 propagator now correctly supports sampling only values (`b3: 0`, `b3: 1`, or `b3: d`) for a Single B3 Header. (#882)
-- The B3 propagator now propagates the debug flag.
- This removes the behavior of changing the debug flag into a set sampling bit.
- Instead, this now follow the B3 specification and omits the `X-B3-Sampling` header. (#882)
-- The B3 propagator now tracks "unset" sampling state (meaning "defer the decision") and does not set the `X-B3-Sampling` header when injecting. (#882)
-- Bump github.com/itchyny/gojq from 0.10.3 to 0.10.4 in /tools. (#883)
-- Bump github.com/opentracing/opentracing-go from v1.1.1-0.20190913142402-a7454ce5950e to v1.2.0. (#885)
-- The tracing time conversion for OTLP spans is now correctly set to `UnixNano`. (#896)
-- Ensure span status is not set to `Unknown` when no HTTP status code is provided as it is assumed to be `200 OK`. (#908)
-- Ensure `httptrace.clientTracer` closes `http.headers` span. (#912)
-- Prometheus exporter will not apply stale updates or forget inactive metrics. (#903)
-- Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905)
-- Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913)
-- Update otel-collector example to use the v0.5.0 collector. (#915)
-- The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922)
-- The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922)
-- The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists.
- This is in accordance with OpenTelemetry semantic conventions. (#922)
-- Correlation Context extractor will no longer insert an empty map into the returned context when no valid values are extracted. (#923)
-- Bump google.golang.org/api from 0.28.0 to 0.29.0 in /exporters/trace/jaeger. (#925)
-- Bump github.com/itchyny/gojq from 0.10.4 to 0.11.0 in /tools. (#926)
-- Bump github.com/golangci/golangci-lint from 1.28.1 to 1.28.2 in /tools. (#930)
-
-## [0.7.0] - 2020-06-26
-
-This release implements the v0.5.0 version of the OpenTelemetry specification.
-
-### Added
-
-- The othttp instrumentation now includes default metrics. (#861)
-- This CHANGELOG file to track all changes in the project going forward.
-- Support for array type attributes. (#798)
-- Apply transitive dependabot go.mod dependency updates as part of a new automatic Github workflow. (#844)
-- Timestamps are now passed to exporters for each export. (#835)
-- Add new `Accumulation` type to metric SDK to transport telemetry from `Accumulator`s to `Processor`s.
- This replaces the prior `Record` `struct` use for this purpose. (#835)
-- New dependabot integration to automate package upgrades. (#814)
-- `Meter` and `Tracer` implementations accept instrumentation version version as an optional argument.
- This instrumentation version is passed on to exporters. (#811) (#805) (#802)
-- The OTLP exporter includes the instrumentation version in telemetry it exports. (#811)
-- Environment variables for Jaeger exporter are supported. (#796)
-- New `aggregation.Kind` in the export metric API. (#808)
-- New example that uses OTLP and the collector. (#790)
-- Handle errors in the span `SetName` during span initialization. (#791)
-- Default service config to enable retries for retry-able failed requests in the OTLP exporter and an option to override this default. (#777)
-- New `go.opentelemetry.io/otel/api/oterror` package to uniformly support error handling and definitions for the project. (#778)
-- New `global` default implementation of the `go.opentelemetry.io/otel/api/oterror.Handler` interface to be used to handle errors prior to an user defined `Handler`.
- There is also functionality for the user to register their `Handler` as well as a convenience function `Handle` to handle an error with this global `Handler`(#778)
-- Options to specify propagators for httptrace and grpctrace instrumentation. (#784)
-- The required `application/json` header for the Zipkin exporter is included in all exports. (#774)
-- Integrate HTTP semantics helpers from the contrib repository into the `api/standard` package. #769
-
-### Changed
-
-- Rename `Integrator` to `Processor` in the metric SDK. (#863)
-- Rename `AggregationSelector` to `AggregatorSelector`. (#859)
-- Rename `SynchronizedCopy` to `SynchronizedMove`. (#858)
-- Rename `simple` integrator to `basic` integrator. (#857)
-- Merge otlp collector examples. (#841)
-- Change the metric SDK to support cumulative, delta, and pass-through exporters directly.
- With these changes, cumulative and delta specific exporters are able to request the correct kind of aggregation from the SDK. (#840)
-- The `Aggregator.Checkpoint` API is renamed to `SynchronizedCopy` and adds an argument, a different `Aggregator` into which the copy is stored. (#812)
-- The `export.Aggregator` contract is that `Update()` and `SynchronizedCopy()` are synchronized with each other.
- All the aggregation interfaces (`Sum`, `LastValue`, ...) are not meant to be synchronized, as the caller is expected to synchronize aggregators at a higher level after the `Accumulator`.
- Some of the `Aggregators` used unnecessary locking and that has been cleaned up. (#812)
-- Use of `metric.Number` was replaced by `int64` now that we use `sync.Mutex` in the `MinMaxSumCount` and `Histogram` `Aggregators`. (#812)
-- Replace `AlwaysParentSample` with `ParentSample(fallback)` to match the OpenTelemetry v0.5.0 specification. (#810)
-- Rename `sdk/export/metric/aggregator` to `sdk/export/metric/aggregation`. #808
-- Send configured headers with every request in the OTLP exporter, instead of just on connection creation. (#806)
-- Update error handling for any one off error handlers, replacing, instead, with the `global.Handle` function. (#791)
-- Rename `plugin` directory to `instrumentation` to match the OpenTelemetry specification. (#779)
-- Makes the argument order to Histogram and DDSketch `New()` consistent. (#781)
-
-### Removed
-
-- `Uint64NumberKind` and related functions from the API. (#864)
-- Context arguments from `Aggregator.Checkpoint` and `Integrator.Process` as they were unused. (#803)
-- `SpanID` is no longer included in parameters for sampling decision to match the OpenTelemetry specification. (#775)
-
-### Fixed
-
-- Upgrade OTLP exporter to opentelemetry-proto matching the opentelemetry-collector v0.4.0 release. (#866)
-- Allow changes to `go.sum` and `go.mod` when running dependabot tidy-up. (#871)
-- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1. (#824)
-- Bump github.com/prometheus/client_golang from 1.7.0 to 1.7.1 in /exporters/metric/prometheus. (#867)
-- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/jaeger. (#853)
-- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/zipkin. (#854)
-- Bumps github.com/golang/protobuf from 1.3.2 to 1.4.2 (#848)
-- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/otlp (#817)
-- Bump github.com/golangci/golangci-lint from 1.25.1 to 1.27.0 in /tools (#828)
-- Bump github.com/prometheus/client_golang from 1.5.0 to 1.7.0 in /exporters/metric/prometheus (#838)
-- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/jaeger (#829)
-- Bump github.com/benbjohnson/clock from 1.0.0 to 1.0.3 (#815)
-- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/zipkin (#823)
-- Bump github.com/itchyny/gojq from 0.10.1 to 0.10.3 in /tools (#830)
-- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/metric/prometheus (#822)
-- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/zipkin (#820)
-- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/jaeger (#831)
-- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 (#836)
-- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/trace/jaeger (#837)
-- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/otlp (#839)
-- Bump google.golang.org/api from 0.20.0 to 0.28.0 in /exporters/trace/jaeger (#843)
-- Set span status from HTTP status code in the othttp instrumentation. (#832)
-- Fixed typo in push controller comment. (#834)
-- The `Aggregator` testing has been updated and cleaned. (#812)
-- `metric.Number(0)` expressions are replaced by `0` where possible. (#812)
-- Fixed `global` `handler_test.go` test failure. #804
-- Fixed `BatchSpanProcessor.Shutdown` to wait until all spans are processed. (#766)
-- Fixed OTLP example's accidental early close of exporter. (#807)
-- Ensure zipkin exporter reads and closes response body. (#788)
-- Update instrumentation to use `api/standard` keys instead of custom keys. (#782)
-- Clean up tools and RELEASING documentation. (#762)
-
-## [0.6.0] - 2020-05-21
-
-### Added
-
-- Support for `Resource`s in the prometheus exporter. (#757)
-- New pull controller. (#751)
-- New `UpDownSumObserver` instrument. (#750)
-- OpenTelemetry collector demo. (#711)
-- New `SumObserver` instrument. (#747)
-- New `UpDownCounter` instrument. (#745)
-- New timeout `Option` and configuration function `WithTimeout` to the push controller. (#742)
-- New `api/standards` package to implement semantic conventions and standard key-value generation. (#731)
-
-### Changed
-
-- Rename `Register*` functions in the metric API to `New*` for all `Observer` instruments. (#761)
-- Use `[]float64` for histogram boundaries, not `[]metric.Number`. (#758)
-- Change OTLP example to use exporter as a trace `Syncer` instead of as an unneeded `Batcher`. (#756)
-- Replace `WithResourceAttributes()` with `WithResource()` in the trace SDK. (#754)
-- The prometheus exporter now uses the new pull controller. (#751)
-- Rename `ScheduleDelayMillis` to `BatchTimeout` in the trace `BatchSpanProcessor`.(#752)
-- Support use of synchronous instruments in asynchronous callbacks (#725)
-- Move `Resource` from the `Export` method parameter into the metric export `Record`. (#739)
-- Rename `Observer` instrument to `ValueObserver`. (#734)
-- The push controller now has a method (`Provider()`) to return a `metric.Provider` instead of the old `Meter` method that acted as a `metric.Provider`. (#738)
-- Replace `Measure` instrument by `ValueRecorder` instrument. (#732)
-- Rename correlation context header from `"Correlation-Context"` to `"otcorrelations"` to match the OpenTelemetry specification. (#727)
-
-### Fixed
-
-- Ensure gRPC `ClientStream` override methods do not panic in grpctrace package. (#755)
-- Disable parts of `BatchSpanProcessor` test until a fix is found. (#743)
-- Fix `string` case in `kv` `Infer` function. (#746)
-- Fix panic in grpctrace client interceptors. (#740)
-- Refactor the `api/metrics` push controller and add `CheckpointSet` synchronization. (#737)
-- Rewrite span batch process queue batching logic. (#719)
-- Remove the push controller named Meter map. (#738)
-- Fix Histogram aggregator initial state (fix #735). (#736)
-- Ensure golang alpine image is running `golang-1.14` for examples. (#733)
-- Added test for grpctrace `UnaryInterceptorClient`. (#695)
-- Rearrange `api/metric` code layout. (#724)
-
-## [0.5.0] - 2020-05-13
-
-### Added
-
-- Batch `Observer` callback support. (#717)
-- Alias `api` types to root package of project. (#696)
-- Create basic `othttp.Transport` for simple client instrumentation. (#678)
-- `SetAttribute(string, interface{})` to the trace API. (#674)
-- Jaeger exporter option that allows user to specify custom http client. (#671)
-- `Stringer` and `Infer` methods to `key`s. (#662)
-
-### Changed
-
-- Rename `NewKey` in the `kv` package to just `Key`. (#721)
-- Move `core` and `key` to `kv` package. (#720)
-- Make the metric API `Meter` a `struct` so the abstract `MeterImpl` can be passed and simplify implementation. (#709)
-- Rename SDK `Batcher` to `Integrator` to match draft OpenTelemetry SDK specification. (#710)
-- Rename SDK `Ungrouped` integrator to `simple.Integrator` to match draft OpenTelemetry SDK specification. (#710)
-- Rename SDK `SDK` `struct` to `Accumulator` to match draft OpenTelemetry SDK specification. (#710)
-- Move `Number` from `core` to `api/metric` package. (#706)
-- Move `SpanContext` from `core` to `trace` package. (#692)
-- Change traceparent header from `Traceparent` to `traceparent` to implement the W3C specification. (#681)
-
-### Fixed
-
-- Update tooling to run generators in all submodules. (#705)
-- gRPC interceptor regexp to match methods without a service name. (#683)
-- Use a `const` for padding 64-bit B3 trace IDs. (#701)
-- Update `mockZipkin` listen address from `:0` to `127.0.0.1:0`. (#700)
-- Left-pad 64-bit B3 trace IDs with zero. (#698)
-- Propagate at least the first W3C tracestate header. (#694)
-- Remove internal `StateLocker` implementation. (#688)
-- Increase instance size CI system uses. (#690)
-- Add a `key` benchmark and use reflection in `key.Infer()`. (#679)
-- Fix internal `global` test by using `global.Meter` with `RecordBatch()`. (#680)
-- Reimplement histogram using mutex instead of `StateLocker`. (#669)
-- Switch `MinMaxSumCount` to a mutex lock implementation instead of `StateLocker`. (#667)
-- Update documentation to not include any references to `WithKeys`. (#672)
-- Correct misspelling. (#668)
-- Fix clobbering of the span context if extraction fails. (#656)
-- Bump `golangci-lint` and work around the corrupting bug. (#666) (#670)
-
-## [0.4.3] - 2020-04-24
-
-### Added
-
-- `Dockerfile` and `docker-compose.yml` to run example code. (#635)
-- New `grpctrace` package that provides gRPC client and server interceptors for both unary and stream connections. (#621)
-- New `api/label` package, providing common label set implementation. (#651)
-- Support for JSON marshaling of `Resources`. (#654)
-- `TraceID` and `SpanID` implementations for `Stringer` interface. (#642)
-- `RemoteAddrKey` in the othttp plugin to include the HTTP client address in top-level spans. (#627)
-- `WithSpanFormatter` option to the othttp plugin. (#617)
-- Updated README to include section for compatible libraries and include reference to the contrib repository. (#612)
-- The prometheus exporter now supports exporting histograms. (#601)
-- A `String` method to the `Resource` to return a hashable identifier for a now unique resource. (#613)
-- An `Iter` method to the `Resource` to return an array `AttributeIterator`. (#613)
-- An `Equal` method to the `Resource` test the equivalence of resources. (#613)
-- An iterable structure (`AttributeIterator`) for `Resource` attributes.
-
-### Changed
-
-- zipkin export's `NewExporter` now requires a `serviceName` argument to ensure this needed values is provided. (#644)
-- Pass `Resources` through the metrics export pipeline. (#659)
-
-### Removed
-
-- `WithKeys` option from the metric API. (#639)
-
-### Fixed
-
-- Use the `label.Set.Equivalent` value instead of an encoding in the batcher. (#658)
-- Correct typo `trace.Exporter` to `trace.SpanSyncer` in comments. (#653)
-- Use type names for return values in jaeger exporter. (#648)
-- Increase the visibility of the `api/key` package by updating comments and fixing usages locally. (#650)
-- `Checkpoint` only after `Update`; Keep records in the `sync.Map` longer. (#647)
-- Do not cache `reflect.ValueOf()` in metric Labels. (#649)
-- Batch metrics exported from the OTLP exporter based on `Resource` and labels. (#626)
-- Add error wrapping to the prometheus exporter. (#631)
-- Update the OTLP exporter batching of traces to use a unique `string` representation of an associated `Resource` as the batching key. (#623)
-- Update OTLP `SpanData` transform to only include the `ParentSpanID` if one exists. (#614)
-- Update `Resource` internal representation to uniquely and reliably identify resources. (#613)
-- Check return value from `CheckpointSet.ForEach` in prometheus exporter. (#622)
-- Ensure spans created by httptrace client tracer reflect operation structure. (#618)
-- Create a new recorder rather than reuse when multiple observations in same epoch for asynchronous instruments. #610
-- The default port the OTLP exporter uses to connect to the OpenTelemetry collector is updated to match the one the collector listens on by default. (#611)
-
-## [0.4.2] - 2020-03-31
-
-### Fixed
-
-- Fix `pre_release.sh` to update version in `sdk/opentelemetry.go`. (#607)
-- Fix time conversion from internal to OTLP in OTLP exporter. (#606)
-
-## [0.4.1] - 2020-03-31
-
-### Fixed
-
-- Update `tag.sh` to create signed tags. (#604)
-
-## [0.4.0] - 2020-03-30
-
-### Added
-
-- New API package `api/metric/registry` that exposes a `MeterImpl` wrapper for use by SDKs to generate unique instruments. (#580)
-- Script to verify examples after a new release. (#579)
-
-### Removed
-
-- The dogstatsd exporter due to lack of support.
- This additionally removes support for statsd. (#591)
-- `LabelSet` from the metric API.
- This is replaced by a `[]core.KeyValue` slice. (#595)
-- `Labels` from the metric API's `Meter` interface. (#595)
-
-### Changed
-
-- The metric `export.Labels` became an interface which the SDK implements and the `export` package provides a simple, immutable implementation of this interface intended for testing purposes. (#574)
-- Renamed `internal/metric.Meter` to `MeterImpl`. (#580)
-- Renamed `api/global/internal.obsImpl` to `asyncImpl`. (#580)
-
-### Fixed
-
-- Corrected missing return in mock span. (#582)
-- Update License header for all source files to match CNCF guidelines and include a test to ensure it is present. (#586) (#596)
-- Update to v0.3.0 of the OTLP in the OTLP exporter. (#588)
-- Update pre-release script to be compatible between GNU and BSD based systems. (#592)
-- Add a `RecordBatch` benchmark. (#594)
-- Moved span transforms of the OTLP exporter to the internal package. (#593)
-- Build both go-1.13 and go-1.14 in circleci to test for all supported versions of Go. (#569)
-- Removed unneeded allocation on empty labels in OLTP exporter. (#597)
-- Update `BatchedSpanProcessor` to process the queue until no data but respect max batch size. (#599)
-- Update project documentation godoc.org links to pkg.go.dev. (#602)
-
-## [0.3.0] - 2020-03-21
-
-This is a first official beta release, which provides almost fully complete metrics, tracing, and context propagation functionality.
-There is still a possibility of breaking changes.
-
-### Added
-
-- Add `Observer` metric instrument. (#474)
-- Add global `Propagators` functionality to enable deferred initialization for propagators registered before the first Meter SDK is installed. (#494)
-- Simplified export setup pipeline for the jaeger exporter to match other exporters. (#459)
-- The zipkin trace exporter. (#495)
-- The OTLP exporter to export metric and trace telemetry to the OpenTelemetry collector. (#497) (#544) (#545)
-- Add `StatusMessage` field to the trace `Span`. (#524)
-- Context propagation in OpenTracing bridge in terms of OpenTelemetry context propagation. (#525)
-- The `Resource` type was added to the SDK. (#528)
-- The global API now supports a `Tracer` and `Meter` function as shortcuts to getting a global `*Provider` and calling these methods directly. (#538)
-- The metric API now defines a generic `MeterImpl` interface to support general purpose `Meter` construction.
- Additionally, `SyncImpl` and `AsyncImpl` are added to support general purpose instrument construction. (#560)
-- A metric `Kind` is added to represent the `MeasureKind`, `ObserverKind`, and `CounterKind`. (#560)
-- Scripts to better automate the release process. (#576)
-
-### Changed
-
-- Default to to use `AlwaysSampler` instead of `ProbabilitySampler` to match OpenTelemetry specification. (#506)
-- Renamed `AlwaysSampleSampler` to `AlwaysOnSampler` in the trace API. (#511)
-- Renamed `NeverSampleSampler` to `AlwaysOffSampler` in the trace API. (#511)
-- The `Status` field of the `Span` was changed to `StatusCode` to disambiguate with the added `StatusMessage`. (#524)
-- Updated the trace `Sampler` interface conform to the OpenTelemetry specification. (#531)
-- Rename metric API `Options` to `Config`. (#541)
-- Rename metric `Counter` aggregator to be `Sum`. (#541)
-- Unify metric options into `Option` from instrument specific options. (#541)
-- The trace API's `TraceProvider` now support `Resource`s. (#545)
-- Correct error in zipkin module name. (#548)
-- The jaeger trace exporter now supports `Resource`s. (#551)
-- Metric SDK now supports `Resource`s.
- The `WithResource` option was added to configure a `Resource` on creation and the `Resource` method was added to the metric `Descriptor` to return the associated `Resource`. (#552)
-- Replace `ErrNoLastValue` and `ErrEmptyDataSet` by `ErrNoData` in the metric SDK. (#557)
-- The stdout trace exporter now supports `Resource`s. (#558)
-- The metric `Descriptor` is now included at the API instead of the SDK. (#560)
-- Replace `Ordered` with an iterator in `export.Labels`. (#567)
-
-### Removed
-
-- The vendor specific Stackdriver. It is now hosted on 3rd party vendor infrastructure. (#452)
-- The `Unregister` method for metric observers as it is not in the OpenTelemetry specification. (#560)
-- `GetDescriptor` from the metric SDK. (#575)
-- The `Gauge` instrument from the metric API. (#537)
-
-### Fixed
-
-- Make histogram aggregator checkpoint consistent. (#438)
-- Update README with import instructions and how to build and test. (#505)
-- The default label encoding was updated to be unique. (#508)
-- Use `NewRoot` in the othttp plugin for public endpoints. (#513)
-- Fix data race in `BatchedSpanProcessor`. (#518)
-- Skip test-386 for Mac OS 10.15.x (Catalina and upwards). #521
-- Use a variable-size array to represent ordered labels in maps. (#523)
-- Update the OTLP protobuf and update changed import path. (#532)
-- Use `StateLocker` implementation in `MinMaxSumCount`. (#546)
-- Eliminate goroutine leak in histogram stress test. (#547)
-- Update OTLP exporter with latest protobuf. (#550)
-- Add filters to the othttp plugin. (#556)
-- Provide an implementation of the `Header*` filters that do not depend on Go 1.14. (#565)
-- Encode labels once during checkpoint.
- The checkpoint function is executed in a single thread so we can do the encoding lazily before passing the encoded version of labels to the exporter.
- This is a cheap and quick way to avoid encoding the labels on every collection interval. (#572)
-- Run coverage over all packages in `COVERAGE_MOD_DIR`. (#573)
-
-## [0.2.3] - 2020-03-04
-
-### Added
-
-- `RecordError` method on `Span`s in the trace API to Simplify adding error events to spans. (#473)
-- Configurable push frequency for exporters setup pipeline. (#504)
-
-### Changed
-
-- Rename the `exporter` directory to `exporters`.
- The `go.opentelemetry.io/otel/exporter/trace/jaeger` package was mistakenly released with a `v1.0.0` tag instead of `v0.1.0`.
- This resulted in all subsequent releases not becoming the default latest.
- A consequence of this was that all `go get`s pulled in the incompatible `v0.1.0` release of that package when pulling in more recent packages from other otel packages.
- Renaming the `exporter` directory to `exporters` fixes this issue by renaming the package and therefore clearing any existing dependency tags.
- Consequentially, this action also renames *all* exporter packages. (#502)
-
-### Removed
-
-- The `CorrelationContextHeader` constant in the `correlation` package is no longer exported. (#503)
-
-## [0.2.2] - 2020-02-27
-
-### Added
-
-- `HTTPSupplier` interface in the propagation API to specify methods to retrieve and store a single value for a key to be associated with a carrier. (#467)
-- `HTTPExtractor` interface in the propagation API to extract information from an `HTTPSupplier` into a context. (#467)
-- `HTTPInjector` interface in the propagation API to inject information into an `HTTPSupplier.` (#467)
-- `Config` and configuring `Option` to the propagator API. (#467)
-- `Propagators` interface in the propagation API to contain the set of injectors and extractors for all supported carrier formats. (#467)
-- `HTTPPropagator` interface in the propagation API to inject and extract from an `HTTPSupplier.` (#467)
-- `WithInjectors` and `WithExtractors` functions to the propagator API to configure injectors and extractors to use. (#467)
-- `ExtractHTTP` and `InjectHTTP` functions to apply configured HTTP extractors and injectors to a passed context. (#467)
-- Histogram aggregator. (#433)
-- `DefaultPropagator` function and have it return `trace.TraceContext` as the default context propagator. (#456)
-- `AlwaysParentSample` sampler to the trace API. (#455)
-- `WithNewRoot` option function to the trace API to specify the created span should be considered a root span. (#451)
-
-### Changed
-
-- Renamed `WithMap` to `ContextWithMap` in the correlation package. (#481)
-- Renamed `FromContext` to `MapFromContext` in the correlation package. (#481)
-- Move correlation context propagation to correlation package. (#479)
-- Do not default to putting remote span context into links. (#480)
-- `Tracer.WithSpan` updated to accept `StartOptions`. (#472)
-- Renamed `MetricKind` to `Kind` to not stutter in the type usage. (#432)
-- Renamed the `export` package to `metric` to match directory structure. (#432)
-- Rename the `api/distributedcontext` package to `api/correlation`. (#444)
-- Rename the `api/propagators` package to `api/propagation`. (#444)
-- Move the propagators from the `propagators` package into the `trace` API package. (#444)
-- Update `Float64Gauge`, `Int64Gauge`, `Float64Counter`, `Int64Counter`, `Float64Measure`, and `Int64Measure` metric methods to use value receivers instead of pointers. (#462)
-- Moved all dependencies of tools package to a tools directory. (#466)
-
-### Removed
-
-- Binary propagators. (#467)
-- NOOP propagator. (#467)
-
-### Fixed
-
-- Upgraded `github.com/golangci/golangci-lint` from `v1.21.0` to `v1.23.6` in `tools/`. (#492)
-- Fix a possible nil-dereference crash (#478)
-- Correct comments for `InstallNewPipeline` in the stdout exporter. (#483)
-- Correct comments for `InstallNewPipeline` in the dogstatsd exporter. (#484)
-- Correct comments for `InstallNewPipeline` in the prometheus exporter. (#482)
-- Initialize `onError` based on `Config` in prometheus exporter. (#486)
-- Correct module name in prometheus exporter README. (#475)
-- Removed tracer name prefix from span names. (#430)
-- Fix `aggregator_test.go` import package comment. (#431)
-- Improved detail in stdout exporter. (#436)
-- Fix a dependency issue (generate target should depend on stringer, not lint target) in Makefile. (#442)
-- Reorders the Makefile targets within `precommit` target so we generate files and build the code before doing linting, so we can get much nicer errors about syntax errors from the compiler. (#442)
-- Reword function documentation in gRPC plugin. (#446)
-- Send the `span.kind` tag to Jaeger from the jaeger exporter. (#441)
-- Fix `metadataSupplier` in the jaeger exporter to overwrite the header if existing instead of appending to it. (#441)
-- Upgraded to Go 1.13 in CI. (#465)
-- Correct opentelemetry.io URL in trace SDK documentation. (#464)
-- Refactored reference counting logic in SDK determination of stale records. (#468)
-- Add call to `runtime.Gosched` in instrument `acquireHandle` logic to not block the collector. (#469)
-
-## [0.2.1.1] - 2020-01-13
-
-### Fixed
-
-- Use stateful batcher on Prometheus exporter fixing regression introduced in #395. (#428)
-
-## [0.2.1] - 2020-01-08
-
-### Added
-
-- Global meter forwarding implementation.
- This enables deferred initialization for metric instruments registered before the first Meter SDK is installed. (#392)
-- Global trace forwarding implementation.
- This enables deferred initialization for tracers registered before the first Trace SDK is installed. (#406)
-- Standardize export pipeline creation in all exporters. (#395)
-- A testing, organization, and comments for 64-bit field alignment. (#418)
-- Script to tag all modules in the project. (#414)
-
-### Changed
-
-- Renamed `propagation` package to `propagators`. (#362)
-- Renamed `B3Propagator` propagator to `B3`. (#362)
-- Renamed `TextFormatPropagator` propagator to `TextFormat`. (#362)
-- Renamed `BinaryPropagator` propagator to `Binary`. (#362)
-- Renamed `BinaryFormatPropagator` propagator to `BinaryFormat`. (#362)
-- Renamed `NoopTextFormatPropagator` propagator to `NoopTextFormat`. (#362)
-- Renamed `TraceContextPropagator` propagator to `TraceContext`. (#362)
-- Renamed `SpanOption` to `StartOption` in the trace API. (#369)
-- Renamed `StartOptions` to `StartConfig` in the trace API. (#369)
-- Renamed `EndOptions` to `EndConfig` in the trace API. (#369)
-- `Number` now has a pointer receiver for its methods. (#375)
-- Renamed `CurrentSpan` to `SpanFromContext` in the trace API. (#379)
-- Renamed `SetCurrentSpan` to `ContextWithSpan` in the trace API. (#379)
-- Renamed `Message` in Event to `Name` in the trace API. (#389)
-- Prometheus exporter no longer aggregates metrics, instead it only exports them. (#385)
-- Renamed `HandleImpl` to `BoundInstrumentImpl` in the metric API. (#400)
-- Renamed `Float64CounterHandle` to `Float64CounterBoundInstrument` in the metric API. (#400)
-- Renamed `Int64CounterHandle` to `Int64CounterBoundInstrument` in the metric API. (#400)
-- Renamed `Float64GaugeHandle` to `Float64GaugeBoundInstrument` in the metric API. (#400)
-- Renamed `Int64GaugeHandle` to `Int64GaugeBoundInstrument` in the metric API. (#400)
-- Renamed `Float64MeasureHandle` to `Float64MeasureBoundInstrument` in the metric API. (#400)
-- Renamed `Int64MeasureHandle` to `Int64MeasureBoundInstrument` in the metric API. (#400)
-- Renamed `Release` method for bound instruments in the metric API to `Unbind`. (#400)
-- Renamed `AcquireHandle` method for bound instruments in the metric API to `Bind`. (#400)
-- Renamed the `File` option in the stdout exporter to `Writer`. (#404)
-- Renamed all `Options` to `Config` for all metric exports where this wasn't already the case.
-
-### Fixed
-
-- Aggregator import path corrected. (#421)
-- Correct links in README. (#368)
-- The README was updated to match latest code changes in its examples. (#374)
-- Don't capitalize error statements. (#375)
-- Fix ignored errors. (#375)
-- Fix ambiguous variable naming. (#375)
-- Removed unnecessary type casting. (#375)
-- Use named parameters. (#375)
-- Updated release schedule. (#378)
-- Correct http-stackdriver example module name. (#394)
-- Removed the `http.request` span in `httptrace` package. (#397)
-- Add comments in the metrics SDK (#399)
-- Initialize checkpoint when creating ddsketch aggregator to prevent panic when merging into a empty one. (#402) (#403)
-- Add documentation of compatible exporters in the README. (#405)
-- Typo fix. (#408)
-- Simplify span check logic in SDK tracer implementation. (#419)
-
-## [0.2.0] - 2019-12-03
-
-### Added
-
-- Unary gRPC tracing example. (#351)
-- Prometheus exporter. (#334)
-- Dogstatsd metrics exporter. (#326)
-
-### Changed
-
-- Rename `MaxSumCount` aggregation to `MinMaxSumCount` and add the `Min` interface for this aggregation. (#352)
-- Rename `GetMeter` to `Meter`. (#357)
-- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355)
-- Rename `HTTPB3Propagator` to `B3Propagator`. (#355)
-- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355)
-- Move `/global` package to `/api/global`. (#356)
-- Rename `GetTracer` to `Tracer`. (#347)
-
-### Removed
-
-- `SetAttribute` from the `Span` interface in the trace API. (#361)
-- `AddLink` from the `Span` interface in the trace API. (#349)
-- `Link` from the `Span` interface in the trace API. (#349)
-
-### Fixed
-
-- Exclude example directories from coverage report. (#365)
-- Lint make target now implements automatic fixes with `golangci-lint` before a second run to report the remaining issues. (#360)
-- Drop `GO111MODULE` environment variable in Makefile as Go 1.13 is the project specified minimum version and this is environment variable is not needed for that version of Go. (#359)
-- Run the race checker for all test. (#354)
-- Redundant commands in the Makefile are removed. (#354)
-- Split the `generate` and `lint` targets of the Makefile. (#354)
-- Renames `circle-ci` target to more generic `ci` in Makefile. (#354)
-- Add example Prometheus binary to gitignore. (#358)
-- Support negative numbers with the `MaxSumCount`. (#335)
-- Resolve race conditions in `push_test.go` identified in #339. (#340)
-- Use `/usr/bin/env bash` as a shebang in scripts rather than `/bin/bash`. (#336)
-- Trace benchmark now tests both `AlwaysSample` and `NeverSample`.
- Previously it was testing `AlwaysSample` twice. (#325)
-- Trace benchmark now uses a `[]byte` for `TraceID` to fix failing test. (#325)
-- Added a trace benchmark to test variadic functions in `setAttribute` vs `setAttributes` (#325)
-- The `defaultkeys` batcher was only using the encoded label set as its map key while building a checkpoint.
- This allowed distinct label sets through, but any metrics sharing a label set could be overwritten or merged incorrectly.
- This was corrected. (#333)
-
-## [0.1.2] - 2019-11-18
-
-### Fixed
-
-- Optimized the `simplelru` map for attributes to reduce the number of allocations. (#328)
-- Removed unnecessary unslicing of parameters that are already a slice. (#324)
-
-## [0.1.1] - 2019-11-18
-
-This release contains a Metrics SDK with stdout exporter and supports basic aggregations such as counter, gauges, array, maxsumcount, and ddsketch.
-
-### Added
-
-- Metrics stdout export pipeline. (#265)
-- Array aggregation for raw measure metrics. (#282)
-- The core.Value now have a `MarshalJSON` method. (#281)
-
-### Removed
-
-- `WithService`, `WithResources`, and `WithComponent` methods of tracers. (#314)
-- Prefix slash in `Tracer.Start()` for the Jaeger example. (#292)
-
-### Changed
-
-- Allocation in LabelSet construction to reduce GC overhead. (#318)
-- `trace.WithAttributes` to append values instead of replacing (#315)
-- Use a formula for tolerance in sampling tests. (#298)
-- Move export types into trace and metric-specific sub-directories. (#289)
-- `SpanKind` back to being based on an `int` type. (#288)
-
-### Fixed
-
-- URL to OpenTelemetry website in README. (#323)
-- Name of othttp default tracer. (#321)
-- `ExportSpans` for the stackdriver exporter now handles `nil` context. (#294)
-- CI modules cache to correctly restore/save from/to the cache. (#316)
-- Fix metric SDK race condition between `LoadOrStore` and the assignment `rec.recorder = i.meter.exporter.AggregatorFor(rec)`. (#293)
-- README now reflects the new code structure introduced with these changes. (#291)
-- Make the basic example work. (#279)
-
-## [0.1.0] - 2019-11-04
-
-This is the first release of open-telemetry go library.
-It contains api and sdk for trace and meter.
-
-### Added
-
-- Initial OpenTelemetry trace and metric API prototypes.
-- Initial OpenTelemetry trace, metric, and export SDK packages.
-- A wireframe bridge to support compatibility with OpenTracing.
-- Example code for a basic, http-stackdriver, http, jaeger, and named tracer setup.
-- Exporters for Jaeger, Stackdriver, and stdout.
-- Propagators for binary, B3, and trace-context protocols.
-- Project information and guidelines in the form of a README and CONTRIBUTING.
-- Tools to build the project and a Makefile to automate the process.
-- Apache-2.0 license.
-- CircleCI build CI manifest files.
-- CODEOWNERS file to track owners of this project.
-
-[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.34.0...HEAD
-[1.34.0/0.56.0/0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.34.0
-[1.33.0/0.55.0/0.9.0/0.0.12]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.33.0
-[1.32.0/0.54.0/0.8.0/0.0.11]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.32.0
-[1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0
-[1.30.0/0.52.0/0.6.0/0.0.9]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.30.0
-[1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0
-[1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0
-[1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0
-[1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0
-[1.25.0/0.47.0/0.0.8/0.1.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.25.0
-[1.24.0/0.46.0/0.0.1-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.24.0
-[1.23.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.1
-[1.23.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0
-[1.23.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0-rc.1
-[1.22.0/0.45.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.22.0
-[1.21.0/0.44.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.21.0
-[1.20.0/0.43.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.20.0
-[1.19.0/0.42.0/0.0.7]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0
-[1.19.0-rc.1/0.42.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0-rc.1
-[1.18.0/0.41.0/0.0.6]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.18.0
-[1.17.0/0.40.0/0.0.5]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.17.0
-[1.16.0/0.39.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0
-[1.16.0-rc.1/0.39.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0-rc.1
-[1.15.1/0.38.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.1
-[1.15.0/0.38.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0
-[1.15.0-rc.2/0.38.0-rc.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.2
-[1.15.0-rc.1/0.38.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.1
-[1.14.0/0.37.0/0.0.4]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.14.0
-[1.13.0/0.36.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.13.0
-[1.12.0/0.35.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.12.0
-[1.11.2/0.34.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.2
-[1.11.1/0.33.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.1
-[1.11.0/0.32.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.0
-[0.32.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.2
-[0.32.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.1
-[0.32.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.0
-[1.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.10.0
-[1.9.0/0.0.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.9.0
-[1.8.0/0.31.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.8.0
-[1.7.0/0.30.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.7.0
-[0.29.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.29.0
-[1.6.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.3
-[1.6.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.2
-[1.6.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.1
-[1.6.0/0.28.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.0
-[1.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.5.0
-[1.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.1
-[1.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.0
-[1.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.3.0
-[1.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.2.0
-[1.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.1.0
-[1.0.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.1
-[Metrics 0.24.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.24.0
-[1.0.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0
-[1.0.0-RC3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC3
-[1.0.0-RC2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC2
-[Experimental Metrics v0.22.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.22.0
-[1.0.0-RC1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC1
-[0.20.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.20.0
-[0.19.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.19.0
-[0.18.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.18.0
-[0.17.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.17.0
-[0.16.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.16.0
-[0.15.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.15.0
-[0.14.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.14.0
-[0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.13.0
-[0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.12.0
-[0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.11.0
-[0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.10.0
-[0.9.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.9.0
-[0.8.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.8.0
-[0.7.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.7.0
-[0.6.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.6.0
-[0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.5.0
-[0.4.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.3
-[0.4.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.2
-[0.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.1
-[0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.0
-[0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.3.0
-[0.2.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.3
-[0.2.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.2
-[0.2.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1.1
-[0.2.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1
-[0.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.0
-[0.1.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.2
-[0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1
-[0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0
-
-<!-- Released section ended -->
-
-[Go 1.23]: https://go.dev/doc/go1.23
-[Go 1.22]: https://go.dev/doc/go1.22
-[Go 1.21]: https://go.dev/doc/go1.21
-[Go 1.20]: https://go.dev/doc/go1.20
-[Go 1.19]: https://go.dev/doc/go1.19
-[Go 1.18]: https://go.dev/doc/go1.18
-
-[metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric
-[metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric
-[trace API]:https://pkg.go.dev/go.opentelemetry.io/otel/trace
-
-[GO-2024-2687]: https://pkg.go.dev/vuln/GO-2024-2687
diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS
deleted file mode 100644
index 945a07d2b..000000000
--- a/vendor/go.opentelemetry.io/otel/CODEOWNERS
+++ /dev/null
@@ -1,17 +0,0 @@
-#####################################################
-#
-# List of approvers for this repository
-#
-#####################################################
-#
-# Learn about membership in OpenTelemetry community:
-# https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md
-#
-#
-# Learn about CODEOWNERS file format:
-# https://help.github.com/en/articles/about-code-owners
-#
-
-* @MrAlias @XSAM @dashpole @pellared @dmathieu
-
-CODEOWNERS @MrAlias @pellared @dashpole @XSAM @dmathieu
diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
deleted file mode 100644
index 22a2e9dbd..000000000
--- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
+++ /dev/null
@@ -1,664 +0,0 @@
-# Contributing to opentelemetry-go
-
-The Go special interest group (SIG) meets regularly. See the
-OpenTelemetry
-[community](https://github.com/open-telemetry/community#golang-sdk)
-repo for information on this and other language SIGs.
-
-See the [public meeting
-notes](https://docs.google.com/document/d/1E5e7Ld0NuU1iVvf-42tOBpu2VBBLYnh73GJuITGJTTU/edit)
-for a summary description of past meetings. To request edit access,
-join the meeting or get in touch on
-[Slack](https://cloud-native.slack.com/archives/C01NPAXACKT).
-
-## Development
-
-You can view and edit the source code by cloning this repository:
-
-```sh
-git clone https://github.com/open-telemetry/opentelemetry-go.git
-```
-
-Run `make test` to run the tests instead of `go test`.
-
-There are some generated files checked into the repo. To make sure
-that the generated files are up-to-date, run `make` (or `make
-precommit` - the `precommit` target is the default).
-
-The `precommit` target also fixes the formatting of the code and
-checks the status of the go module files.
-
-Additionally, there is a `codespell` target that checks for common
-typos in the code. It is not run by default, but you can run it
-manually with `make codespell`. It will set up a virtual environment
-in `venv` and install `codespell` there.
-
-If after running `make precommit` the output of `git status` contains
-`nothing to commit, working tree clean` then it means that everything
-is up-to-date and properly formatted.
-
-## Pull Requests
-
-### How to Send Pull Requests
-
-Everyone is welcome to contribute code to `opentelemetry-go` via
-GitHub pull requests (PRs).
-
-To create a new PR, fork the project in GitHub and clone the upstream
-repo:
-
-```sh
-go get -d go.opentelemetry.io/otel
-```
-
-(This may print some warning about "build constraints exclude all Go
-files", just ignore it.)
-
-This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You
-can alternatively use `git` directly with:
-
-```sh
-git clone https://github.com/open-telemetry/opentelemetry-go
-```
-
-(Note that `git clone` is *not* using the `go.opentelemetry.io/otel` name -
-that name is a kind of a redirector to GitHub that `go get` can
-understand, but `git` does not.)
-
-This would put the project in the `opentelemetry-go` directory in
-current working directory.
-
-Enter the newly created directory and add your fork as a new remote:
-
-```sh
-git remote add <YOUR_FORK> git@github.com:<YOUR_GITHUB_USERNAME>/opentelemetry-go
-```
-
-Check out a new branch, make modifications, run linters and tests, update
-`CHANGELOG.md`, and push the branch to your fork:
-
-```sh
-git checkout -b <YOUR_BRANCH_NAME>
-# edit files
-# update changelog
-make precommit
-git add -p
-git commit
-git push <YOUR_FORK> <YOUR_BRANCH_NAME>
-```
-
-Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull
-request ID to the entry you added to `CHANGELOG.md`.
-
-Avoid rebasing and force-pushing to your branch to facilitate reviewing the pull request.
-Rewriting Git history makes it difficult to keep track of iterations during code review.
-All pull requests are squashed to a single commit upon merge to `main`.
-
-### How to Receive Comments
-
-* If the PR is not ready for review, please put `[WIP]` in the title,
- tag it as `work-in-progress`, or mark it as
- [`draft`](https://github.blog/2019-02-14-introducing-draft-pull-requests/).
-* Make sure CLA is signed and CI is clear.
-
-### How to Get PRs Merged
-
-A PR is considered **ready to merge** when:
-
-* It has received two qualified approvals[^1].
-
- This is not enforced through automation, but needs to be validated by the
- maintainer merging.
- * The qualified approvals need to be from [Approver]s/[Maintainer]s
- affiliated with different companies. Two qualified approvals from
- [Approver]s or [Maintainer]s affiliated with the same company counts as a
- single qualified approval.
- * PRs introducing changes that have already been discussed and consensus
- reached only need one qualified approval. The discussion and resolution
- needs to be linked to the PR.
- * Trivial changes[^2] only need one qualified approval.
-
-* All feedback has been addressed.
- * All PR comments and suggestions are resolved.
- * All GitHub Pull Request reviews with a status of "Request changes" have
- been addressed. Another review by the objecting reviewer with a different
- status can be submitted to clear the original review, or the review can be
- dismissed by a [Maintainer] when the issues from the original review have
- been addressed.
- * Any comments or reviews that cannot be resolved between the PR author and
- reviewers can be submitted to the community [Approver]s and [Maintainer]s
- during the weekly SIG meeting. If consensus is reached among the
- [Approver]s and [Maintainer]s during the SIG meeting the objections to the
- PR may be dismissed or resolved or the PR closed by a [Maintainer].
- * Any substantive changes to the PR require existing Approval reviews be
- cleared unless the approver explicitly states that their approval persists
- across changes. This includes changes resulting from other feedback.
- [Approver]s and [Maintainer]s can help in clearing reviews and they should
- be consulted if there are any questions.
-
-* The PR branch is up to date with the base branch it is merging into.
- * To ensure this does not block the PR, it should be configured to allow
- maintainers to update it.
-
-* It has been open for review for at least one working day. This gives people
- reasonable time to review.
- * Trivial changes[^2] do not have to wait for one day and may be merged with
- a single [Maintainer]'s approval.
-
-* All required GitHub workflows have succeeded.
-* Urgent fix can take exception as long as it has been actively communicated
- among [Maintainer]s.
-
-Any [Maintainer] can merge the PR once the above criteria have been met.
-
-[^1]: A qualified approval is a GitHub Pull Request review with "Approve"
- status from an OpenTelemetry Go [Approver] or [Maintainer].
-[^2]: Trivial changes include: typo corrections, cosmetic non-substantive
- changes, documentation corrections or updates, dependency updates, etc.
-
-## Design Choices
-
-As with other OpenTelemetry clients, opentelemetry-go follows the
-[OpenTelemetry Specification](https://opentelemetry.io/docs/specs/otel).
-
-It's especially valuable to read through the [library
-guidelines](https://opentelemetry.io/docs/specs/otel/library-guidelines).
-
-### Focus on Capabilities, Not Structure Compliance
-
-OpenTelemetry is an evolving specification, one where the desires and
-use cases are clear, but the method to satisfy those uses cases are
-not.
-
-As such, Contributions should provide functionality and behavior that
-conforms to the specification, but the interface and structure is
-flexible.
-
-It is preferable to have contributions follow the idioms of the
-language rather than conform to specific API names or argument
-patterns in the spec.
-
-For a deeper discussion, see
-[this](https://github.com/open-telemetry/opentelemetry-specification/issues/165).
-
-## Documentation
-
-Each (non-internal, non-test) package must be documented using
-[Go Doc Comments](https://go.dev/doc/comment),
-preferably in a `doc.go` file.
-
-Prefer using [Examples](https://pkg.go.dev/testing#hdr-Examples)
-instead of putting code snippets in Go doc comments.
-In some cases, you can even create [Testable Examples](https://go.dev/blog/examples).
-
-You can install and run a "local Go Doc site" in the following way:
-
- ```sh
- go install golang.org/x/pkgsite/cmd/pkgsite@latest
- pkgsite
- ```
-
-[`go.opentelemetry.io/otel/metric`](https://pkg.go.dev/go.opentelemetry.io/otel/metric)
-is an example of a very well-documented package.
-
-### README files
-
-Each (non-internal, non-test, non-documentation) package must contain a
-`README.md` file containing at least a title, and a `pkg.go.dev` badge.
-
-The README should not be a repetition of Go doc comments.
-
-You can verify the presence of all README files with the `make verify-readmes`
-command.
-
-## Style Guide
-
-One of the primary goals of this project is that it is actually used by
-developers. With this goal in mind the project strives to build
-user-friendly and idiomatic Go code adhering to the Go community's best
-practices.
-
-For a non-comprehensive but foundational overview of these best practices
-the [Effective Go](https://golang.org/doc/effective_go.html) documentation
-is an excellent starting place.
-
-As a convenience for developers building this project the `make precommit`
-will format, lint, validate, and in some cases fix the changes you plan to
-submit. This check will need to pass for your changes to be able to be
-merged.
-
-In addition to idiomatic Go, the project has adopted certain standards for
-implementations of common patterns. These standards should be followed as a
-default, and if they are not followed documentation needs to be included as
-to the reasons why.
-
-### Configuration
-
-When creating an instantiation function for a complex `type T struct`, it is
-useful to allow variable number of options to be applied. However, the strong
-type system of Go restricts the function design options. There are a few ways
-to solve this problem, but we have landed on the following design.
-
-#### `config`
-
-Configuration should be held in a `struct` named `config`, or prefixed with
-specific type name this Configuration applies to if there are multiple
-`config` in the package. This type must contain configuration options.
-
-```go
-// config contains configuration options for a thing.
-type config struct {
- // options ...
-}
-```
-
-In general the `config` type will not need to be used externally to the
-package and should be unexported. If, however, it is expected that the user
-will likely want to build custom options for the configuration, the `config`
-should be exported. Please, include in the documentation for the `config`
-how the user can extend the configuration.
-
-It is important that internal `config` are not shared across package boundaries.
-Meaning a `config` from one package should not be directly used by another. The
-one exception is the API packages. The configs from the base API, eg.
-`go.opentelemetry.io/otel/trace.TracerConfig` and
-`go.opentelemetry.io/otel/metric.InstrumentConfig`, are intended to be consumed
-by the SDK therefore it is expected that these are exported.
-
-When a config is exported we want to maintain forward and backward
-compatibility, to achieve this no fields should be exported but should
-instead be accessed by methods.
-
-Optionally, it is common to include a `newConfig` function (with the same
-naming scheme). This function wraps any defaults setting and looping over
-all options to create a configured `config`.
-
-```go
-// newConfig returns an appropriately configured config.
-func newConfig(options ...Option) config {
- // Set default values for config.
- config := config{/* […] */}
- for _, option := range options {
- config = option.apply(config)
- }
- // Perform any validation here.
- return config
-}
-```
-
-If validation of the `config` options is also performed this can return an
-error as well that is expected to be handled by the instantiation function
-or propagated to the user.
-
-Given the design goal of not having the user need to work with the `config`,
-the `newConfig` function should also be unexported.
-
-#### `Option`
-
-To set the value of the options a `config` contains, a corresponding
-`Option` interface type should be used.
-
-```go
-type Option interface {
- apply(config) config
-}
-```
-
-Having `apply` unexported makes sure that it will not be used externally.
-Moreover, the interface becomes sealed so the user cannot easily implement
-the interface on its own.
-
-The `apply` method should return a modified version of the passed config.
-This approach, instead of passing a pointer, is used to prevent the config from being allocated to the heap.
-
-The name of the interface should be prefixed in the same way the
-corresponding `config` is (if at all).
-
-#### Options
-
-All user configurable options for a `config` must have a related unexported
-implementation of the `Option` interface and an exported configuration
-function that wraps this implementation.
-
-The wrapping function name should be prefixed with `With*` (or in the
-special case of a boolean options `Without*`) and should have the following
-function signature.
-
-```go
-func With*(…) Option { … }
-```
-
-##### `bool` Options
-
-```go
-type defaultFalseOption bool
-
-func (o defaultFalseOption) apply(c config) config {
- c.Bool = bool(o)
- return c
-}
-
-// WithOption sets a T to have an option included.
-func WithOption() Option {
- return defaultFalseOption(true)
-}
-```
-
-```go
-type defaultTrueOption bool
-
-func (o defaultTrueOption) apply(c config) config {
- c.Bool = bool(o)
- return c
-}
-
-// WithoutOption sets a T to have Bool option excluded.
-func WithoutOption() Option {
- return defaultTrueOption(false)
-}
-```
-
-##### Declared Type Options
-
-```go
-type myTypeOption struct {
- MyType MyType
-}
-
-func (o myTypeOption) apply(c config) config {
- c.MyType = o.MyType
- return c
-}
-
-// WithMyType sets T to have include MyType.
-func WithMyType(t MyType) Option {
- return myTypeOption{t}
-}
-```
-
-##### Functional Options
-
-```go
-type optionFunc func(config) config
-
-func (fn optionFunc) apply(c config) config {
- return fn(c)
-}
-
-// WithMyType sets t as MyType.
-func WithMyType(t MyType) Option {
- return optionFunc(func(c config) config {
- c.MyType = t
- return c
- })
-}
-```
-
-#### Instantiation
-
-Using this configuration pattern to configure instantiation with a `NewT`
-function.
-
-```go
-func NewT(options ...Option) T {…}
-```
-
-Any required parameters can be declared before the variadic `options`.
-
-#### Dealing with Overlap
-
-Sometimes there are multiple complex `struct` that share common
-configuration and also have distinct configuration. To avoid repeated
-portions of `config`s, a common `config` can be used with the union of
-options being handled with the `Option` interface.
-
-For example.
-
-```go
-// config holds options for all animals.
-type config struct {
- Weight float64
- Color string
- MaxAltitude float64
-}
-
-// DogOption apply Dog specific options.
-type DogOption interface {
- applyDog(config) config
-}
-
-// BirdOption apply Bird specific options.
-type BirdOption interface {
- applyBird(config) config
-}
-
-// Option apply options for all animals.
-type Option interface {
- BirdOption
- DogOption
-}
-
-type weightOption float64
-
-func (o weightOption) applyDog(c config) config {
- c.Weight = float64(o)
- return c
-}
-
-func (o weightOption) applyBird(c config) config {
- c.Weight = float64(o)
- return c
-}
-
-func WithWeight(w float64) Option { return weightOption(w) }
-
-type furColorOption string
-
-func (o furColorOption) applyDog(c config) config {
- c.Color = string(o)
- return c
-}
-
-func WithFurColor(c string) DogOption { return furColorOption(c) }
-
-type maxAltitudeOption float64
-
-func (o maxAltitudeOption) applyBird(c config) config {
- c.MaxAltitude = float64(o)
- return c
-}
-
-func WithMaxAltitude(a float64) BirdOption { return maxAltitudeOption(a) }
-
-func NewDog(name string, o ...DogOption) Dog {…}
-func NewBird(name string, o ...BirdOption) Bird {…}
-```
-
-### Interfaces
-
-To allow other developers to better comprehend the code, it is important
-to ensure it is sufficiently documented. One simple measure that contributes
-to this aim is self-documenting by naming method parameters. Therefore,
-where appropriate, methods of every exported interface type should have
-their parameters appropriately named.
-
-#### Interface Stability
-
-All exported stable interfaces that include the following warning in their
-documentation are allowed to be extended with additional methods.
-
-> Warning: methods may be added to this interface in minor releases.
-
-These interfaces are defined by the OpenTelemetry specification and will be
-updated as the specification evolves.
-
-Otherwise, stable interfaces MUST NOT be modified.
-
-#### How to Change Specification Interfaces
-
-When an API change must be made, we will update the SDK with the new method one
-release before the API change. This will allow the SDK one version before the
-API change to work seamlessly with the new API.
-
-If an incompatible version of the SDK is used with the new API the application
-will fail to compile.
-
-#### How Not to Change Specification Interfaces
-
-We have explored using a v2 of the API to change interfaces and found that there
-was no way to introduce a v2 and have it work seamlessly with the v1 of the API.
-Problems happened with libraries that upgraded to v2 when an application did not,
-and would not produce any telemetry.
-
-More detail of the approaches considered and their limitations can be found in
-the [Use a V2 API to evolve interfaces](https://github.com/open-telemetry/opentelemetry-go/issues/3920)
-issue.
-
-#### How to Change Other Interfaces
-
-If new functionality is needed for an interface that cannot be changed it MUST
-be added by including an additional interface. That added interface can be a
-simple interface for the specific functionality that you want to add or it can
-be a super-set of the original interface. For example, if you wanted to a
-`Close` method to the `Exporter` interface:
-
-```go
-type Exporter interface {
- Export()
-}
-```
-
-A new interface, `Closer`, can be added:
-
-```go
-type Closer interface {
- Close()
-}
-```
-
-Code that is passed the `Exporter` interface can now check to see if the passed
-value also satisfies the new interface. E.g.
-
-```go
-func caller(e Exporter) {
- /* ... */
- if c, ok := e.(Closer); ok {
- c.Close()
- }
- /* ... */
-}
-```
-
-Alternatively, a new type that is the super-set of an `Exporter` can be created.
-
-```go
-type ClosingExporter struct {
- Exporter
- Close()
-}
-```
-
-This new type can be used similar to the simple interface above in that a
-passed `Exporter` type can be asserted to satisfy the `ClosingExporter` type
-and the `Close` method called.
-
-This super-set approach can be useful if there is explicit behavior that needs
-to be coupled with the original type and passed as a unified type to a new
-function, but, because of this coupling, it also limits the applicability of
-the added functionality. If there exist other interfaces where this
-functionality should be added, each one will need their own super-set
-interfaces and will duplicate the pattern. For this reason, the simple targeted
-interface that defines the specific functionality should be preferred.
-
-See also:
-[Keeping Your Modules Compatible: Working with interfaces](https://go.dev/blog/module-compatibility#working-with-interfaces).
-
-### Testing
-
-The tests should never leak goroutines.
-
-Use the term `ConcurrentSafe` in the test name when it aims to verify the
-absence of race conditions. The top-level tests with this term will be run
-many times in the `test-concurrent-safe` CI job to increase the chance of
-catching concurrency issues. This does not apply to subtests when this term
-is not in their root name.
-
-### Internal packages
-
-The use of internal packages should be scoped to a single module. A sub-module
-should never import from a parent internal package. This creates a coupling
-between the two modules where a user can upgrade the parent without the child
-and if the internal package API has changed it will fail to upgrade[^3].
-
-There are two known exceptions to this rule:
-
-- `go.opentelemetry.io/otel/internal/global`
- - This package manages global state for all of opentelemetry-go. It needs to
- be a single package in order to ensure the uniqueness of the global state.
-- `go.opentelemetry.io/otel/internal/baggage`
- - This package provides values in a `context.Context` that need to be
- recognized by `go.opentelemetry.io/otel/baggage` and
- `go.opentelemetry.io/otel/bridge/opentracing` but remain private.
-
-If you have duplicate code in multiple modules, make that code into a Go
-template stored in `go.opentelemetry.io/otel/internal/shared` and use [gotmpl]
-to render the templates in the desired locations. See [#4404] for an example of
-this.
-
-[^3]: https://github.com/open-telemetry/opentelemetry-go/issues/3548
-
-### Ignoring context cancellation
-
-OpenTelemetry API implementations need to ignore the cancellation of the context that are
-passed when recording a value (e.g. starting a span, recording a measurement, emitting a log).
-Recording methods should not return an error describing the cancellation state of the context
-when they complete, nor should they abort any work.
-
-This rule may not apply if the OpenTelemetry specification defines a timeout mechanism for
-the method. In that case the context cancellation can be used for the timeout with the
-restriction that this behavior is documented for the method. Otherwise, timeouts
-are expected to be handled by the user calling the API, not the implementation.
-
-Stoppage of the telemetry pipeline is handled by calling the appropriate `Shutdown` method
-of a provider. It is assumed the context passed from a user is not used for this purpose.
-
-Outside of the direct recording of telemetry from the API (e.g. exporting telemetry,
-force flushing telemetry, shutting down a signal provider) the context cancellation
-should be honored. This means all work done on behalf of the user provided context
-should be canceled.
-
-## Approvers and Maintainers
-
-### Triagers
-
-- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent
-
-### Approvers
-
-### Maintainers
-
-- [Damien Mathieu](https://github.com/dmathieu), Elastic
-- [David Ashpole](https://github.com/dashpole), Google
-- [Robert Pająk](https://github.com/pellared), Splunk
-- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics
-- [Tyler Yahn](https://github.com/MrAlias), Splunk
-
-### Emeritus
-
-- [Aaron Clawson](https://github.com/MadVikingGod)
-- [Anthony Mirabella](https://github.com/Aneurysm9)
-- [Chester Cheung](https://github.com/hanyuancheung)
-- [Evan Torrie](https://github.com/evantorrie)
-- [Gustavo Silva Paiva](https://github.com/paivagustavo)
-- [Josh MacDonald](https://github.com/jmacd)
-- [Liz Fong-Jones](https://github.com/lizthegrey)
-
-### Become an Approver or a Maintainer
-
-See the [community membership document in OpenTelemetry community
-repo](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md).
-
-[Approver]: #approvers
-[Maintainer]: #maintainers
-[gotmpl]: https://pkg.go.dev/go.opentelemetry.io/build-tools/gotmpl
-[#4404]: https://github.com/open-telemetry/opentelemetry-go/pull/4404
diff --git a/vendor/go.opentelemetry.io/otel/LICENSE b/vendor/go.opentelemetry.io/otel/LICENSE
deleted file mode 100644
index 261eeb9e9..000000000
--- a/vendor/go.opentelemetry.io/otel/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile
deleted file mode 100644
index a7f6d8cc6..000000000
--- a/vendor/go.opentelemetry.io/otel/Makefile
+++ /dev/null
@@ -1,307 +0,0 @@
-# Copyright The OpenTelemetry Authors
-# SPDX-License-Identifier: Apache-2.0
-
-TOOLS_MOD_DIR := ./internal/tools
-
-ALL_DOCS := $(shell find . -name '*.md' -type f | sort)
-ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort)
-OTEL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(ALL_GO_MOD_DIRS))
-ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | grep -E -v '^./example|^$(TOOLS_MOD_DIR)' | sort)
-
-GO = go
-TIMEOUT = 60
-
-.DEFAULT_GOAL := precommit
-
-.PHONY: precommit ci
-precommit: generate toolchain-check license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default
-ci: generate toolchain-check license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage
-
-# Tools
-
-TOOLS = $(CURDIR)/.tools
-
-$(TOOLS):
- @mkdir -p $@
-$(TOOLS)/%: $(TOOLS_MOD_DIR)/go.mod | $(TOOLS)
- cd $(TOOLS_MOD_DIR) && \
- $(GO) build -o $@ $(PACKAGE)
-
-MULTIMOD = $(TOOLS)/multimod
-$(TOOLS)/multimod: PACKAGE=go.opentelemetry.io/build-tools/multimod
-
-SEMCONVGEN = $(TOOLS)/semconvgen
-$(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen
-
-CROSSLINK = $(TOOLS)/crosslink
-$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink
-
-SEMCONVKIT = $(TOOLS)/semconvkit
-$(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit
-
-GOLANGCI_LINT = $(TOOLS)/golangci-lint
-$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint
-
-MISSPELL = $(TOOLS)/misspell
-$(TOOLS)/misspell: PACKAGE=github.com/client9/misspell/cmd/misspell
-
-GOCOVMERGE = $(TOOLS)/gocovmerge
-$(TOOLS)/gocovmerge: PACKAGE=github.com/wadey/gocovmerge
-
-STRINGER = $(TOOLS)/stringer
-$(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer
-
-PORTO = $(TOOLS)/porto
-$(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto
-
-GOTMPL = $(TOOLS)/gotmpl
-$(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl
-
-GORELEASE = $(TOOLS)/gorelease
-$(GORELEASE): PACKAGE=golang.org/x/exp/cmd/gorelease
-
-GOVULNCHECK = $(TOOLS)/govulncheck
-$(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck
-
-.PHONY: tools
-tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE)
-
-# Virtualized python tools via docker
-
-# The directory where the virtual environment is created.
-VENVDIR := venv
-
-# The directory where the python tools are installed.
-PYTOOLS := $(VENVDIR)/bin
-
-# The pip executable in the virtual environment.
-PIP := $(PYTOOLS)/pip
-
-# The directory in the docker image where the current directory is mounted.
-WORKDIR := /workdir
-
-# The python image to use for the virtual environment.
-PYTHONIMAGE := python:3.11.3-slim-bullseye
-
-# Run the python image with the current directory mounted.
-DOCKERPY := docker run --rm -v "$(CURDIR):$(WORKDIR)" -w $(WORKDIR) $(PYTHONIMAGE)
-
-# Create a virtual environment for Python tools.
-$(PYTOOLS):
-# The `--upgrade` flag is needed to ensure that the virtual environment is
-# created with the latest pip version.
- @$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade pip"
-
-# Install python packages into the virtual environment.
-$(PYTOOLS)/%: $(PYTOOLS)
- @$(DOCKERPY) $(PIP) install -r requirements.txt
-
-CODESPELL = $(PYTOOLS)/codespell
-$(CODESPELL): PACKAGE=codespell
-
-# Generate
-
-.PHONY: generate
-generate: go-generate vanity-import-fix
-
-.PHONY: go-generate
-go-generate: $(OTEL_GO_MOD_DIRS:%=go-generate/%)
-go-generate/%: DIR=$*
-go-generate/%: $(STRINGER) $(GOTMPL)
- @echo "$(GO) generate $(DIR)/..." \
- && cd $(DIR) \
- && PATH="$(TOOLS):$${PATH}" $(GO) generate ./...
-
-.PHONY: vanity-import-fix
-vanity-import-fix: $(PORTO)
- @$(PORTO) --include-internal -w .
-
-# Generate go.work file for local development.
-.PHONY: go-work
-go-work: $(CROSSLINK)
- $(CROSSLINK) work --root=$(shell pwd)
-
-# Build
-
-.PHONY: build
-
-build: $(OTEL_GO_MOD_DIRS:%=build/%) $(OTEL_GO_MOD_DIRS:%=build-tests/%)
-build/%: DIR=$*
-build/%:
- @echo "$(GO) build $(DIR)/..." \
- && cd $(DIR) \
- && $(GO) build ./...
-
-build-tests/%: DIR=$*
-build-tests/%:
- @echo "$(GO) build tests $(DIR)/..." \
- && cd $(DIR) \
- && $(GO) list ./... \
- | grep -v third_party \
- | xargs $(GO) test -vet=off -run xxxxxMatchNothingxxxxx >/dev/null
-
-# Tests
-
-TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe
-.PHONY: $(TEST_TARGETS) test
-test-default test-race: ARGS=-race
-test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=.
-test-short: ARGS=-short
-test-verbose: ARGS=-v -race
-test-concurrent-safe: ARGS=-run=ConcurrentSafe -count=100 -race
-test-concurrent-safe: TIMEOUT=120
-$(TEST_TARGETS): test
-test: $(OTEL_GO_MOD_DIRS:%=test/%)
-test/%: DIR=$*
-test/%:
- @echo "$(GO) test -timeout $(TIMEOUT)s $(ARGS) $(DIR)/..." \
- && cd $(DIR) \
- && $(GO) list ./... \
- | grep -v third_party \
- | xargs $(GO) test -timeout $(TIMEOUT)s $(ARGS)
-
-COVERAGE_MODE = atomic
-COVERAGE_PROFILE = coverage.out
-.PHONY: test-coverage
-test-coverage: $(GOCOVMERGE)
- @set -e; \
- printf "" > coverage.txt; \
- for dir in $(ALL_COVERAGE_MOD_DIRS); do \
- echo "$(GO) test -coverpkg=go.opentelemetry.io/otel/... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" $${dir}/..."; \
- (cd "$${dir}" && \
- $(GO) list ./... \
- | grep -v third_party \
- | grep -v 'semconv/v.*' \
- | xargs $(GO) test -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" && \
- $(GO) tool cover -html=coverage.out -o coverage.html); \
- done; \
- $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt
-
-.PHONY: benchmark
-benchmark: $(OTEL_GO_MOD_DIRS:%=benchmark/%)
-benchmark/%:
- @echo "$(GO) test -run=xxxxxMatchNothingxxxxx -bench=. $*..." \
- && cd $* \
- && $(GO) list ./... \
- | grep -v third_party \
- | xargs $(GO) test -run=xxxxxMatchNothingxxxxx -bench=.
-
-.PHONY: golangci-lint golangci-lint-fix
-golangci-lint-fix: ARGS=--fix
-golangci-lint-fix: golangci-lint
-golangci-lint: $(OTEL_GO_MOD_DIRS:%=golangci-lint/%)
-golangci-lint/%: DIR=$*
-golangci-lint/%: $(GOLANGCI_LINT)
- @echo 'golangci-lint $(if $(ARGS),$(ARGS) ,)$(DIR)' \
- && cd $(DIR) \
- && $(GOLANGCI_LINT) run --allow-serial-runners $(ARGS)
-
-.PHONY: crosslink
-crosslink: $(CROSSLINK)
- @echo "Updating intra-repository dependencies in all go modules" \
- && $(CROSSLINK) --root=$(shell pwd) --prune
-
-.PHONY: go-mod-tidy
-go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%)
-go-mod-tidy/%: DIR=$*
-go-mod-tidy/%: crosslink
- @echo "$(GO) mod tidy in $(DIR)" \
- && cd $(DIR) \
- && $(GO) mod tidy -compat=1.21
-
-.PHONY: lint-modules
-lint-modules: go-mod-tidy
-
-.PHONY: lint
-lint: misspell lint-modules golangci-lint govulncheck
-
-.PHONY: vanity-import-check
-vanity-import-check: $(PORTO)
- @$(PORTO) --include-internal -l . || ( echo "(run: make vanity-import-fix)"; exit 1 )
-
-.PHONY: misspell
-misspell: $(MISSPELL)
- @$(MISSPELL) -w $(ALL_DOCS)
-
-.PHONY: govulncheck
-govulncheck: $(OTEL_GO_MOD_DIRS:%=govulncheck/%)
-govulncheck/%: DIR=$*
-govulncheck/%: $(GOVULNCHECK)
- @echo "govulncheck ./... in $(DIR)" \
- && cd $(DIR) \
- && $(GOVULNCHECK) ./...
-
-.PHONY: codespell
-codespell: $(CODESPELL)
- @$(DOCKERPY) $(CODESPELL)
-
-.PHONY: toolchain-check
-toolchain-check:
- @toolchainRes=$$(for f in $(ALL_GO_MOD_DIRS); do \
- awk '/^toolchain/ { found=1; next } END { if (found) print FILENAME }' $$f/go.mod; \
- done); \
- if [ -n "$${toolchainRes}" ]; then \
- echo "toolchain checking failed:"; echo "$${toolchainRes}"; \
- exit 1; \
- fi
-
-.PHONY: license-check
-license-check:
- @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \
- awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=4 { found=1; next } END { if (!found) print FILENAME }' $$f; \
- done); \
- if [ -n "$${licRes}" ]; then \
- echo "license header checking failed:"; echo "$${licRes}"; \
- exit 1; \
- fi
-
-.PHONY: check-clean-work-tree
-check-clean-work-tree:
- @if ! git diff --quiet; then \
- echo; \
- echo 'Working tree is not clean, did you forget to run "make precommit"?'; \
- echo; \
- git status; \
- exit 1; \
- fi
-
-SEMCONVPKG ?= "semconv/"
-.PHONY: semconv-generate
-semconv-generate: $(SEMCONVGEN) $(SEMCONVKIT)
- [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 )
- [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 )
- $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -z "$(SEMCONVPKG)/capitalizations.txt" -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
- $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=metric -f metric.go -t "$(SEMCONVPKG)/metric_template.j2" -s "$(TAG)"
- $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)"
-
-.PHONY: gorelease
-gorelease: $(OTEL_GO_MOD_DIRS:%=gorelease/%)
-gorelease/%: DIR=$*
-gorelease/%:| $(GORELEASE)
- @echo "gorelease in $(DIR):" \
- && cd $(DIR) \
- && $(GORELEASE) \
- || echo ""
-
-.PHONY: verify-mods
-verify-mods: $(MULTIMOD)
- $(MULTIMOD) verify
-
-.PHONY: prerelease
-prerelease: verify-mods
- @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 )
- $(MULTIMOD) prerelease -m ${MODSET}
-
-COMMIT ?= "HEAD"
-.PHONY: add-tags
-add-tags: verify-mods
- @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 )
- $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT}
-
-.PHONY: lint-markdown
-lint-markdown:
- docker run -v "$(CURDIR):$(WORKDIR)" avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md
-
-.PHONY: verify-readmes
-verify-readmes:
- ./verify_readmes.sh
diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md
deleted file mode 100644
index d9a192076..000000000
--- a/vendor/go.opentelemetry.io/otel/README.md
+++ /dev/null
@@ -1,111 +0,0 @@
-# OpenTelemetry-Go
-
-[![ci](https://github.com/open-telemetry/opentelemetry-go/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/open-telemetry/opentelemetry-go/actions/workflows/ci.yml)
-[![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-go/coverage.svg?branch=main)](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main)
-[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel)](https://pkg.go.dev/go.opentelemetry.io/otel)
-[![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel)
-[![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT)
-
-OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/).
-It provides a set of APIs to directly measure performance and behavior of your software and send this data to observability platforms.
-
-## Project Status
-
-| Signal | Status |
-|---------|--------------------|
-| Traces | Stable |
-| Metrics | Stable |
-| Logs | Beta[^1] |
-
-Progress and status specific to this repository is tracked in our
-[project boards](https://github.com/open-telemetry/opentelemetry-go/projects)
-and
-[milestones](https://github.com/open-telemetry/opentelemetry-go/milestones).
-
-Project versioning information and stability guarantees can be found in the
-[versioning documentation](VERSIONING.md).
-
-[^1]: https://github.com/orgs/open-telemetry/projects/43
-
-### Compatibility
-
-OpenTelemetry-Go ensures compatibility with the current supported versions of
-the [Go language](https://golang.org/doc/devel/release#policy):
-
-> Each major Go release is supported until there are two newer major releases.
-> For example, Go 1.5 was supported until the Go 1.7 release, and Go 1.6 was supported until the Go 1.8 release.
-
-For versions of Go that are no longer supported upstream, opentelemetry-go will
-stop ensuring compatibility with these versions in the following manner:
-
-- A minor release of opentelemetry-go will be made to add support for the new
- supported release of Go.
-- The following minor release of opentelemetry-go will remove compatibility
- testing for the oldest (now archived upstream) version of Go. This, and
- future, releases of opentelemetry-go may include features only supported by
- the currently supported versions of Go.
-
-Currently, this project supports the following environments.
-
-| OS | Go Version | Architecture |
-|----------|------------|--------------|
-| Ubuntu | 1.23 | amd64 |
-| Ubuntu | 1.22 | amd64 |
-| Ubuntu | 1.23 | 386 |
-| Ubuntu | 1.22 | 386 |
-| Linux | 1.23 | arm64 |
-| Linux | 1.22 | arm64 |
-| macOS 13 | 1.23 | amd64 |
-| macOS 13 | 1.22 | amd64 |
-| macOS | 1.23 | arm64 |
-| macOS | 1.22 | arm64 |
-| Windows | 1.23 | amd64 |
-| Windows | 1.22 | amd64 |
-| Windows | 1.23 | 386 |
-| Windows | 1.22 | 386 |
-
-While this project should work for other systems, no compatibility guarantees
-are made for those systems currently.
-
-## Getting Started
-
-You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/languages/go/getting-started/).
-
-OpenTelemetry's goal is to provide a single set of APIs to capture distributed
-traces and metrics from your application and send them to an observability
-platform. This project allows you to do just that for applications written in
-Go. There are two steps to this process: instrument your application, and
-configure an exporter.
-
-### Instrumentation
-
-To start capturing distributed traces and metric events from your application
-it first needs to be instrumented. The easiest way to do this is by using an
-instrumentation library for your code. Be sure to check out [the officially
-supported instrumentation
-libraries](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/instrumentation).
-
-If you need to extend the telemetry an instrumentation library provides or want
-to build your own instrumentation for your application directly you will need
-to use the
-[Go otel](https://pkg.go.dev/go.opentelemetry.io/otel)
-package. The [examples](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples)
-are a good way to see some practical uses of this process.
-
-### Export
-
-Now that your application is instrumented to collect telemetry, it needs an
-export pipeline to send that telemetry to an observability platform.
-
-All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters).
-
-| Exporter | Logs | Metrics | Traces |
-|---------------------------------------|:----:|:-------:|:------:|
-| [OTLP](./exporters/otlp/) | ✓ | ✓ | ✓ |
-| [Prometheus](./exporters/prometheus/) | | ✓ | |
-| [stdout](./exporters/stdout/) | ✓ | ✓ | ✓ |
-| [Zipkin](./exporters/zipkin/) | | | ✓ |
-
-## Contributing
-
-See the [contributing documentation](CONTRIBUTING.md).
diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md
deleted file mode 100644
index 4ebef4f9d..000000000
--- a/vendor/go.opentelemetry.io/otel/RELEASING.md
+++ /dev/null
@@ -1,135 +0,0 @@
-# Release Process
-
-## Semantic Convention Generation
-
-New versions of the [OpenTelemetry Semantic Conventions] mean new versions of the `semconv` package need to be generated.
-The `semconv-generate` make target is used for this.
-
-1. Checkout a local copy of the [OpenTelemetry Semantic Conventions] to the desired release tag.
-2. Pull the latest `otel/semconvgen` image: `docker pull otel/semconvgen:latest`
-3. Run the `make semconv-generate ...` target from this repository.
-
-For example,
-
-```sh
-export TAG="v1.21.0" # Change to the release version you are generating.
-export OTEL_SEMCONV_REPO="/absolute/path/to/opentelemetry/semantic-conventions"
-docker pull otel/semconvgen:latest
-make semconv-generate # Uses the exported TAG and OTEL_SEMCONV_REPO.
-```
-
-This should create a new sub-package of [`semconv`](./semconv).
-Ensure things look correct before submitting a pull request to include the addition.
-
-## Breaking changes validation
-
-You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes done in the public API.
-
-You can check/report problems with `gorelease` [here](https://golang.org/issues/26420).
-
-## Verify changes for contrib repository
-
-If the changes in the main repository are going to affect the contrib repository, it is important to verify that the changes are compatible with the contrib repository.
-
-Follow [the steps](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md#verify-otel-changes) in the contrib repository to verify OTel changes.
-
-## Pre-Release
-
-First, decide which module sets will be released and update their versions
-in `versions.yaml`. Commit this change to a new branch.
-
-Update go.mod for submodules to depend on the new release which will happen in the next step.
-
-1. Run the `prerelease` make target. It creates a branch
- `prerelease_<module set>_<new tag>` that will contain all release changes.
-
- ```
- make prerelease MODSET=<module set>
- ```
-
-2. Verify the changes.
-
- ```
- git diff ...prerelease_<module set>_<new tag>
- ```
-
- This should have changed the version for all modules to be `<new tag>`.
- If these changes look correct, merge them into your pre-release branch:
-
- ```go
- git merge prerelease_<module set>_<new tag>
- ```
-
-3. Update the [Changelog](./CHANGELOG.md).
- - Make sure all relevant changes for this release are included and are in language that non-contributors to the project can understand.
- To verify this, you can look directly at the commits since the `<last tag>`.
-
- ```
- git --no-pager log --pretty=oneline "<last tag>..HEAD"
- ```
-
- - Move all the `Unreleased` changes into a new section following the title scheme (`[<new tag>] - <date of release>`).
- - Make sure the new section is under the comment for released section, like `<!-- Released section -->`, so it is protected from being overwritten in the future.
- - Update all the appropriate links at the bottom.
-
-4. Push the changes to upstream and create a Pull Request on GitHub.
- Be sure to include the curated changes from the [Changelog](./CHANGELOG.md) in the description.
-
-## Tag
-
-Once the Pull Request with all the version changes has been approved and merged it is time to tag the merged commit.
-
-***IMPORTANT***: It is critical you use the same tag that you used in the Pre-Release step!
-Failure to do so will leave things in a broken state. As long as you do not
-change `versions.yaml` between pre-release and this step, things should be fine.
-
-***IMPORTANT***: [There is currently no way to remove an incorrectly tagged version of a Go module](https://github.com/golang/go/issues/34189).
-It is critical you make sure the version you push upstream is correct.
-[Failure to do so will lead to minor emergencies and tough to work around](https://github.com/open-telemetry/opentelemetry-go/issues/331).
-
-1. For each module set that will be released, run the `add-tags` make target
- using the `<commit-hash>` of the commit on the main branch for the merged Pull Request.
-
- ```
- make add-tags MODSET=<module set> COMMIT=<commit hash>
- ```
-
- It should only be necessary to provide an explicit `COMMIT` value if the
- current `HEAD` of your working directory is not the correct commit.
-
-2. Push tags to the upstream remote (not your fork: `github.com/open-telemetry/opentelemetry-go.git`).
- Make sure you push all sub-modules as well.
-
- ```
- git push upstream <new tag>
- git push upstream <submodules-path/new tag>
- ...
- ```
-
-## Release
-
-Finally create a Release for the new `<new tag>` on GitHub.
-The release body should include all the release notes from the Changelog for this release.
-
-## Post-Release
-
-### Contrib Repository
-
-Once verified be sure to [make a release for the `contrib` repository](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md) that uses this release.
-
-### Website Documentation
-
-Update the [Go instrumentation documentation] in the OpenTelemetry website under [content/en/docs/languages/go].
-Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate.
-
-[OpenTelemetry Semantic Conventions]: https://github.com/open-telemetry/semantic-conventions
-[Go instrumentation documentation]: https://opentelemetry.io/docs/languages/go/
-[content/en/docs/languages/go]: https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/languages/go
-
-### Demo Repository
-
-Bump the dependencies in the following Go services:
-
-- [`accounting`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accounting)
-- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkout)
-- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/product-catalog)
diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md
deleted file mode 100644
index b8cb605c1..000000000
--- a/vendor/go.opentelemetry.io/otel/VERSIONING.md
+++ /dev/null
@@ -1,224 +0,0 @@
-# Versioning
-
-This document describes the versioning policy for this repository. This policy
-is designed so the following goals can be achieved.
-
-**Users are provided a codebase of value that is stable and secure.**
-
-## Policy
-
-* Versioning of this project will be idiomatic of a Go project using [Go
- modules](https://github.com/golang/go/wiki/Modules).
- * [Semantic import
- versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning)
- will be used.
- * Versions will comply with [semver
- 2.0](https://semver.org/spec/v2.0.0.html) with the following exceptions.
- * New methods may be added to exported API interfaces. All exported
- interfaces that fall within this exception will include the following
- paragraph in their public documentation.
-
- > Warning: methods may be added to this interface in minor releases.
-
- * If a module is version `v2` or higher, the major version of the module
- must be included as a `/vN` at the end of the module paths used in
- `go.mod` files (e.g., `module go.opentelemetry.io/otel/v2`, `require
- go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path
- (e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the
- paths used in `go get` commands (e.g., `go get
- go.opentelemetry.io/otel/v2@v2.0.1`). Note there is both a `/v2` and a
- `@v2.0.1` in that example. One way to think about it is that the module
- name now includes the `/v2`, so include `/v2` whenever you are using the
- module name).
- * If a module is version `v0` or `v1`, do not include the major version in
- either the module path or the import path.
- * Modules will be used to encapsulate signals and components.
- * Experimental modules still under active development will be versioned at
- `v0` to imply the stability guarantee defined by
- [semver](https://semver.org/spec/v2.0.0.html#spec-item-4).
-
- > Major version zero (0.y.z) is for initial development. Anything MAY
- > change at any time. The public API SHOULD NOT be considered stable.
-
- * Mature modules for which we guarantee a stable public API will be versioned
- with a major version greater than `v0`.
- * The decision to make a module stable will be made on a case-by-case
- basis by the maintainers of this project.
- * Experimental modules will start their versioning at `v0.0.0` and will
- increment their minor version when backwards incompatible changes are
- released and increment their patch version when backwards compatible
- changes are released.
- * All stable modules that use the same major version number will use the
- same entire version number.
- * Stable modules may be released with an incremented minor or patch
- version even though that module has not been changed, but rather so
- that it will remain at the same version as other stable modules that
- did undergo change.
- * When an experimental module becomes stable a new stable module version
- will be released and will include this now stable module. The new
- stable module version will be an increment of the minor version number
- and will be applied to all existing stable modules as well as the newly
- stable module being released.
-* Versioning of the associated [contrib
- repository](https://github.com/open-telemetry/opentelemetry-go-contrib) of
- this project will be idiomatic of a Go project using [Go
- modules](https://github.com/golang/go/wiki/Modules).
- * [Semantic import
- versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning)
- will be used.
- * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html).
- * If a module is version `v2` or higher, the
- major version of the module must be included as a `/vN` at the end of the
- module paths used in `go.mod` files (e.g., `module
- go.opentelemetry.io/contrib/instrumentation/host/v2`, `require
- go.opentelemetry.io/contrib/instrumentation/host/v2 v2.0.1`) and in the
- package import path (e.g., `import
- "go.opentelemetry.io/contrib/instrumentation/host/v2"`). This includes
- the paths used in `go get` commands (e.g., `go get
- go.opentelemetry.io/contrib/instrumentation/host/v2@v2.0.1`. Note there
- is both a `/v2` and a `@v2.0.1` in that example. One way to think about
- it is that the module name now includes the `/v2`, so include `/v2`
- whenever you are using the module name).
- * If a module is version `v0` or `v1`, do not include the major version
- in either the module path or the import path.
- * In addition to public APIs, telemetry produced by stable instrumentation
- will remain stable and backwards compatible. This is to avoid breaking
- alerts and dashboard.
- * Modules will be used to encapsulate instrumentation, detectors, exporters,
- propagators, and any other independent sets of related components.
- * Experimental modules still under active development will be versioned at
- `v0` to imply the stability guarantee defined by
- [semver](https://semver.org/spec/v2.0.0.html#spec-item-4).
-
- > Major version zero (0.y.z) is for initial development. Anything MAY
- > change at any time. The public API SHOULD NOT be considered stable.
-
- * Mature modules for which we guarantee a stable public API and telemetry will
- be versioned with a major version greater than `v0`.
- * Experimental modules will start their versioning at `v0.0.0` and will
- increment their minor version when backwards incompatible changes are
- released and increment their patch version when backwards compatible
- changes are released.
- * Stable contrib modules cannot depend on experimental modules from this
- project.
- * All stable contrib modules of the same major version with this project
- will use the same entire version as this project.
- * Stable modules may be released with an incremented minor or patch
- version even though that module's code has not been changed. Instead
- the only change that will have been included is to have updated that
- modules dependency on this project's stable APIs.
- * When an experimental module in contrib becomes stable a new stable
- module version will be released and will include this now stable
- module. The new stable module version will be an increment of the minor
- version number and will be applied to all existing stable contrib
- modules, this project's modules, and the newly stable module being
- released.
- * Contrib modules will be kept up to date with this project's releases.
- * Due to the dependency contrib modules will implicitly have on this
- project's modules the release of stable contrib modules to match the
- released version number will be staggered after this project's release.
- There is no explicit time guarantee for how long after this projects
- release the contrib release will be. Effort should be made to keep them
- as close in time as possible.
- * No additional stable release in this project can be made until the
- contrib repository has a matching stable release.
- * No release can be made in the contrib repository after this project's
- stable release except for a stable release of the contrib repository.
-* GitHub releases will be made for all releases.
-* Go modules will be made available at Go package mirrors.
-
-## Example Versioning Lifecycle
-
-To better understand the implementation of the above policy the following
-example is provided. This project is simplified to include only the following
-modules and their versions:
-
-* `otel`: `v0.14.0`
-* `otel/trace`: `v0.14.0`
-* `otel/metric`: `v0.14.0`
-* `otel/baggage`: `v0.14.0`
-* `otel/sdk/trace`: `v0.14.0`
-* `otel/sdk/metric`: `v0.14.0`
-
-These modules have been developed to a point where the `otel/trace`,
-`otel/baggage`, and `otel/sdk/trace` modules have reached a point that they
-should be considered for a stable release. The `otel/metric` and
-`otel/sdk/metric` are still under active development and the `otel` module
-depends on both `otel/trace` and `otel/metric`.
-
-The `otel` package is refactored to remove its dependencies on `otel/metric` so
-it can be released as stable as well. With that done the following release
-candidates are made:
-
-* `otel`: `v1.0.0-RC1`
-* `otel/trace`: `v1.0.0-RC1`
-* `otel/baggage`: `v1.0.0-RC1`
-* `otel/sdk/trace`: `v1.0.0-RC1`
-
-The `otel/metric` and `otel/sdk/metric` modules remain at `v0.14.0`.
-
-A few minor issues are discovered in the `otel/trace` package. These issues are
-resolved with some minor, but backwards incompatible, changes and are released
-as a second release candidate:
-
-* `otel`: `v1.0.0-RC2`
-* `otel/trace`: `v1.0.0-RC2`
-* `otel/baggage`: `v1.0.0-RC2`
-* `otel/sdk/trace`: `v1.0.0-RC2`
-
-Notice that all module version numbers are incremented to adhere to our
-versioning policy.
-
-After these release candidates have been evaluated to satisfaction, they are
-released as version `v1.0.0`.
-
-* `otel`: `v1.0.0`
-* `otel/trace`: `v1.0.0`
-* `otel/baggage`: `v1.0.0`
-* `otel/sdk/trace`: `v1.0.0`
-
-Since both the `go` utility and the Go module system support [the semantic
-versioning definition of
-precedence](https://semver.org/spec/v2.0.0.html#spec-item-11), this release
-will correctly be interpreted as the successor to the previous release
-candidates.
-
-Active development of this project continues. The `otel/metric` module now has
-backwards incompatible changes to its API that need to be released and the
-`otel/baggage` module has a minor bug fix that needs to be released. The
-following release is made:
-
-* `otel`: `v1.0.1`
-* `otel/trace`: `v1.0.1`
-* `otel/metric`: `v0.15.0`
-* `otel/baggage`: `v1.0.1`
-* `otel/sdk/trace`: `v1.0.1`
-* `otel/sdk/metric`: `v0.15.0`
-
-Notice that, again, all stable module versions are incremented in unison and
-the `otel/sdk/metric` package, which depends on the `otel/metric` package, also
-bumped its version. This bump of the `otel/sdk/metric` package makes sense
-given their coupling, though it is not explicitly required by our versioning
-policy.
-
-As we progress, the `otel/metric` and `otel/sdk/metric` packages have reached a
-point where they should be evaluated for stability. The `otel` module is
-reintegrated with the `otel/metric` package and the following release is made:
-
-* `otel`: `v1.1.0-RC1`
-* `otel/trace`: `v1.1.0-RC1`
-* `otel/metric`: `v1.1.0-RC1`
-* `otel/baggage`: `v1.1.0-RC1`
-* `otel/sdk/trace`: `v1.1.0-RC1`
-* `otel/sdk/metric`: `v1.1.0-RC1`
-
-All the modules are evaluated and determined to a viable stable release. They
-are then released as version `v1.1.0` (the minor version is incremented to
-indicate the addition of new signal).
-
-* `otel`: `v1.1.0`
-* `otel/trace`: `v1.1.0`
-* `otel/metric`: `v1.1.0`
-* `otel/baggage`: `v1.1.0`
-* `otel/sdk/trace`: `v1.1.0`
-* `otel/sdk/metric`: `v1.1.0`
diff --git a/vendor/go.opentelemetry.io/otel/attribute/README.md b/vendor/go.opentelemetry.io/otel/attribute/README.md
deleted file mode 100644
index 5b3da8f14..000000000
--- a/vendor/go.opentelemetry.io/otel/attribute/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Attribute
-
-[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/attribute)](https://pkg.go.dev/go.opentelemetry.io/otel/attribute)
diff --git a/vendor/go.opentelemetry.io/otel/attribute/doc.go b/vendor/go.opentelemetry.io/otel/attribute/doc.go
deleted file mode 100644
index eef51ebc2..000000000
--- a/vendor/go.opentelemetry.io/otel/attribute/doc.go
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Package attribute provides key and value attributes.
-package attribute // import "go.opentelemetry.io/otel/attribute"
diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go
deleted file mode 100644
index 318e42fca..000000000
--- a/vendor/go.opentelemetry.io/otel/attribute/encoder.go
+++ /dev/null
@@ -1,135 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package attribute // import "go.opentelemetry.io/otel/attribute"
-
-import (
- "bytes"
- "sync"
- "sync/atomic"
-)
-
-type (
- // Encoder is a mechanism for serializing an attribute set into a specific
- // string representation that supports caching, to avoid repeated
- // serialization. An example could be an exporter encoding the attribute
- // set into a wire representation.
- Encoder interface {
- // Encode returns the serialized encoding of the attribute set using
- // its Iterator. This result may be cached by a attribute.Set.
- Encode(iterator Iterator) string
-
- // ID returns a value that is unique for each class of attribute
- // encoder. Attribute encoders allocate these using `NewEncoderID`.
- ID() EncoderID
- }
-
- // EncoderID is used to identify distinct Encoder
- // implementations, for caching encoded results.
- EncoderID struct {
- value uint64
- }
-
- // defaultAttrEncoder uses a sync.Pool of buffers to reduce the number of
- // allocations used in encoding attributes. This implementation encodes a
- // comma-separated list of key=value, with '/'-escaping of '=', ',', and
- // '\'.
- defaultAttrEncoder struct {
- // pool is a pool of attribute set builders. The buffers in this pool
- // grow to a size that most attribute encodings will not allocate new
- // memory.
- pool sync.Pool // *bytes.Buffer
- }
-)
-
-// escapeChar is used to ensure uniqueness of the attribute encoding where
-// keys or values contain either '=' or ','. Since there is no parser needed
-// for this encoding and its only requirement is to be unique, this choice is
-// arbitrary. Users will see these in some exporters (e.g., stdout), so the
-// backslash ('\') is used as a conventional choice.
-const escapeChar = '\\'
-
-var (
- _ Encoder = &defaultAttrEncoder{}
-
- // encoderIDCounter is for generating IDs for other attribute encoders.
- encoderIDCounter uint64
-
- defaultEncoderOnce sync.Once
- defaultEncoderID = NewEncoderID()
- defaultEncoderInstance *defaultAttrEncoder
-)
-
-// NewEncoderID returns a unique attribute encoder ID. It should be called
-// once per each type of attribute encoder. Preferably in init() or in var
-// definition.
-func NewEncoderID() EncoderID {
- return EncoderID{value: atomic.AddUint64(&encoderIDCounter, 1)}
-}
-
-// DefaultEncoder returns an attribute encoder that encodes attributes in such
-// a way that each escaped attribute's key is followed by an equal sign and
-// then by an escaped attribute's value. All key-value pairs are separated by
-// a comma.
-//
-// Escaping is done by prepending a backslash before either a backslash, equal
-// sign or a comma.
-func DefaultEncoder() Encoder {
- defaultEncoderOnce.Do(func() {
- defaultEncoderInstance = &defaultAttrEncoder{
- pool: sync.Pool{
- New: func() interface{} {
- return &bytes.Buffer{}
- },
- },
- }
- })
- return defaultEncoderInstance
-}
-
-// Encode is a part of an implementation of the AttributeEncoder interface.
-func (d *defaultAttrEncoder) Encode(iter Iterator) string {
- buf := d.pool.Get().(*bytes.Buffer)
- defer d.pool.Put(buf)
- buf.Reset()
-
- for iter.Next() {
- i, keyValue := iter.IndexedAttribute()
- if i > 0 {
- _, _ = buf.WriteRune(',')
- }
- copyAndEscape(buf, string(keyValue.Key))
-
- _, _ = buf.WriteRune('=')
-
- if keyValue.Value.Type() == STRING {
- copyAndEscape(buf, keyValue.Value.AsString())
- } else {
- _, _ = buf.WriteString(keyValue.Value.Emit())
- }
- }
- return buf.String()
-}
-
-// ID is a part of an implementation of the AttributeEncoder interface.
-func (*defaultAttrEncoder) ID() EncoderID {
- return defaultEncoderID
-}
-
-// copyAndEscape escapes `=`, `,` and its own escape character (`\`),
-// making the default encoding unique.
-func copyAndEscape(buf *bytes.Buffer, val string) {
- for _, ch := range val {
- switch ch {
- case '=', ',', escapeChar:
- _, _ = buf.WriteRune(escapeChar)
- }
- _, _ = buf.WriteRune(ch)
- }
-}
-
-// Valid returns true if this encoder ID was allocated by
-// `NewEncoderID`. Invalid encoder IDs will not be cached.
-func (id EncoderID) Valid() bool {
- return id.value != 0
-}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/filter.go b/vendor/go.opentelemetry.io/otel/attribute/filter.go
deleted file mode 100644
index be9cd922d..000000000
--- a/vendor/go.opentelemetry.io/otel/attribute/filter.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package attribute // import "go.opentelemetry.io/otel/attribute"
-
-// Filter supports removing certain attributes from attribute sets. When
-// the filter returns true, the attribute will be kept in the filtered
-// attribute set. When the filter returns false, the attribute is excluded
-// from the filtered attribute set, and the attribute instead appears in
-// the removed list of excluded attributes.
-type Filter func(KeyValue) bool
-
-// NewAllowKeysFilter returns a Filter that only allows attributes with one of
-// the provided keys.
-//
-// If keys is empty a deny-all filter is returned.
-func NewAllowKeysFilter(keys ...Key) Filter {
- if len(keys) <= 0 {
- return func(kv KeyValue) bool { return false }
- }
-
- allowed := make(map[Key]struct{})
- for _, k := range keys {
- allowed[k] = struct{}{}
- }
- return func(kv KeyValue) bool {
- _, ok := allowed[kv.Key]
- return ok
- }
-}
-
-// NewDenyKeysFilter returns a Filter that only allows attributes
-// that do not have one of the provided keys.
-//
-// If keys is empty an allow-all filter is returned.
-func NewDenyKeysFilter(keys ...Key) Filter {
- if len(keys) <= 0 {
- return func(kv KeyValue) bool { return true }
- }
-
- forbid := make(map[Key]struct{})
- for _, k := range keys {
- forbid[k] = struct{}{}
- }
- return func(kv KeyValue) bool {
- _, ok := forbid[kv.Key]
- return !ok
- }
-}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go
deleted file mode 100644
index f2ba89ce4..000000000
--- a/vendor/go.opentelemetry.io/otel/attribute/iterator.go
+++ /dev/null
@@ -1,150 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package attribute // import "go.opentelemetry.io/otel/attribute"
-
-// Iterator allows iterating over the set of attributes in order, sorted by
-// key.
-type Iterator struct {
- storage *Set
- idx int
-}
-
-// MergeIterator supports iterating over two sets of attributes while
-// eliminating duplicate values from the combined set. The first iterator
-// value takes precedence.
-type MergeIterator struct {
- one oneIterator
- two oneIterator
- current KeyValue
-}
-
-type oneIterator struct {
- iter Iterator
- done bool
- attr KeyValue
-}
-
-// Next moves the iterator to the next position. Returns false if there are no
-// more attributes.
-func (i *Iterator) Next() bool {
- i.idx++
- return i.idx < i.Len()
-}
-
-// Label returns current KeyValue. Must be called only after Next returns
-// true.
-//
-// Deprecated: Use Attribute instead.
-func (i *Iterator) Label() KeyValue {
- return i.Attribute()
-}
-
-// Attribute returns the current KeyValue of the Iterator. It must be called
-// only after Next returns true.
-func (i *Iterator) Attribute() KeyValue {
- kv, _ := i.storage.Get(i.idx)
- return kv
-}
-
-// IndexedLabel returns current index and attribute. Must be called only
-// after Next returns true.
-//
-// Deprecated: Use IndexedAttribute instead.
-func (i *Iterator) IndexedLabel() (int, KeyValue) {
- return i.idx, i.Attribute()
-}
-
-// IndexedAttribute returns current index and attribute. Must be called only
-// after Next returns true.
-func (i *Iterator) IndexedAttribute() (int, KeyValue) {
- return i.idx, i.Attribute()
-}
-
-// Len returns a number of attributes in the iterated set.
-func (i *Iterator) Len() int {
- return i.storage.Len()
-}
-
-// ToSlice is a convenience function that creates a slice of attributes from
-// the passed iterator. The iterator is set up to start from the beginning
-// before creating the slice.
-func (i *Iterator) ToSlice() []KeyValue {
- l := i.Len()
- if l == 0 {
- return nil
- }
- i.idx = -1
- slice := make([]KeyValue, 0, l)
- for i.Next() {
- slice = append(slice, i.Attribute())
- }
- return slice
-}
-
-// NewMergeIterator returns a MergeIterator for merging two attribute sets.
-// Duplicates are resolved by taking the value from the first set.
-func NewMergeIterator(s1, s2 *Set) MergeIterator {
- mi := MergeIterator{
- one: makeOne(s1.Iter()),
- two: makeOne(s2.Iter()),
- }
- return mi
-}
-
-func makeOne(iter Iterator) oneIterator {
- oi := oneIterator{
- iter: iter,
- }
- oi.advance()
- return oi
-}
-
-func (oi *oneIterator) advance() {
- if oi.done = !oi.iter.Next(); !oi.done {
- oi.attr = oi.iter.Attribute()
- }
-}
-
-// Next returns true if there is another attribute available.
-func (m *MergeIterator) Next() bool {
- if m.one.done && m.two.done {
- return false
- }
- if m.one.done {
- m.current = m.two.attr
- m.two.advance()
- return true
- }
- if m.two.done {
- m.current = m.one.attr
- m.one.advance()
- return true
- }
- if m.one.attr.Key == m.two.attr.Key {
- m.current = m.one.attr // first iterator attribute value wins
- m.one.advance()
- m.two.advance()
- return true
- }
- if m.one.attr.Key < m.two.attr.Key {
- m.current = m.one.attr
- m.one.advance()
- return true
- }
- m.current = m.two.attr
- m.two.advance()
- return true
-}
-
-// Label returns the current value after Next() returns true.
-//
-// Deprecated: Use Attribute instead.
-func (m *MergeIterator) Label() KeyValue {
- return m.current
-}
-
-// Attribute returns the current value after Next() returns true.
-func (m *MergeIterator) Attribute() KeyValue {
- return m.current
-}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/key.go b/vendor/go.opentelemetry.io/otel/attribute/key.go
deleted file mode 100644
index d9a22c650..000000000
--- a/vendor/go.opentelemetry.io/otel/attribute/key.go
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package attribute // import "go.opentelemetry.io/otel/attribute"
-
-// Key represents the key part in key-value pairs. It's a string. The
-// allowed character set in the key depends on the use of the key.
-type Key string
-
-// Bool creates a KeyValue instance with a BOOL Value.
-//
-// If creating both a key and value at the same time, use the provided
-// convenience function instead -- Bool(name, value).
-func (k Key) Bool(v bool) KeyValue {
- return KeyValue{
- Key: k,
- Value: BoolValue(v),
- }
-}
-
-// BoolSlice creates a KeyValue instance with a BOOLSLICE Value.
-//
-// If creating both a key and value at the same time, use the provided
-// convenience function instead -- BoolSlice(name, value).
-func (k Key) BoolSlice(v []bool) KeyValue {
- return KeyValue{
- Key: k,
- Value: BoolSliceValue(v),
- }
-}
-
-// Int creates a KeyValue instance with an INT64 Value.
-//
-// If creating both a key and value at the same time, use the provided
-// convenience function instead -- Int(name, value).
-func (k Key) Int(v int) KeyValue {
- return KeyValue{
- Key: k,
- Value: IntValue(v),
- }
-}
-
-// IntSlice creates a KeyValue instance with an INT64SLICE Value.
-//
-// If creating both a key and value at the same time, use the provided
-// convenience function instead -- IntSlice(name, value).
-func (k Key) IntSlice(v []int) KeyValue {
- return KeyValue{
- Key: k,
- Value: IntSliceValue(v),
- }
-}
-
-// Int64 creates a KeyValue instance with an INT64 Value.
-//
-// If creating both a key and value at the same time, use the provided
-// convenience function instead -- Int64(name, value).
-func (k Key) Int64(v int64) KeyValue {
- return KeyValue{
- Key: k,
- Value: Int64Value(v),
- }
-}
-
-// Int64Slice creates a KeyValue instance with an INT64SLICE Value.
-//
-// If creating both a key and value at the same time, use the provided
-// convenience function instead -- Int64Slice(name, value).
-func (k Key) Int64Slice(v []int64) KeyValue {
- return KeyValue{
- Key: k,
- Value: Int64SliceValue(v),
- }
-}
-
-// Float64 creates a KeyValue instance with a FLOAT64 Value.
-//
-// If creating both a key and value at the same time, use the provided
-// convenience function instead -- Float64(name, value).
-func (k Key) Float64(v float64) KeyValue {
- return KeyValue{
- Key: k,
- Value: Float64Value(v),
- }
-}
-
-// Float64Slice creates a KeyValue instance with a FLOAT64SLICE Value.
-//
-// If creating both a key and value at the same time, use the provided
-// convenience function instead -- Float64(name, value).
-func (k Key) Float64Slice(v []float64) KeyValue {
- return KeyValue{
- Key: k,
- Value: Float64SliceValue(v),
- }
-}
-
-// String creates a KeyValue instance with a STRING Value.
-//
-// If creating both a key and value at the same time, use the provided
-// convenience function instead -- String(name, value).
-func (k Key) String(v string) KeyValue {
- return KeyValue{
- Key: k,
- Value: StringValue(v),
- }
-}
-
-// StringSlice creates a KeyValue instance with a STRINGSLICE Value.
-//
-// If creating both a key and value at the same time, use the provided
-// convenience function instead -- StringSlice(name, value).
-func (k Key) StringSlice(v []string) KeyValue {
- return KeyValue{
- Key: k,
- Value: StringSliceValue(v),
- }
-}
-
-// Defined returns true for non-empty keys.
-func (k Key) Defined() bool {
- return len(k) != 0
-}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go
deleted file mode 100644
index 3028f9a40..000000000
--- a/vendor/go.opentelemetry.io/otel/attribute/kv.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package attribute // import "go.opentelemetry.io/otel/attribute"
-
-import (
- "fmt"
-)
-
-// KeyValue holds a key and value pair.
-type KeyValue struct {
- Key Key
- Value Value
-}
-
-// Valid returns if kv is a valid OpenTelemetry attribute.
-func (kv KeyValue) Valid() bool {
- return kv.Key.Defined() && kv.Value.Type() != INVALID
-}
-
-// Bool creates a KeyValue with a BOOL Value type.
-func Bool(k string, v bool) KeyValue {
- return Key(k).Bool(v)
-}
-
-// BoolSlice creates a KeyValue with a BOOLSLICE Value type.
-func BoolSlice(k string, v []bool) KeyValue {
- return Key(k).BoolSlice(v)
-}
-
-// Int creates a KeyValue with an INT64 Value type.
-func Int(k string, v int) KeyValue {
- return Key(k).Int(v)
-}
-
-// IntSlice creates a KeyValue with an INT64SLICE Value type.
-func IntSlice(k string, v []int) KeyValue {
- return Key(k).IntSlice(v)
-}
-
-// Int64 creates a KeyValue with an INT64 Value type.
-func Int64(k string, v int64) KeyValue {
- return Key(k).Int64(v)
-}
-
-// Int64Slice creates a KeyValue with an INT64SLICE Value type.
-func Int64Slice(k string, v []int64) KeyValue {
- return Key(k).Int64Slice(v)
-}
-
-// Float64 creates a KeyValue with a FLOAT64 Value type.
-func Float64(k string, v float64) KeyValue {
- return Key(k).Float64(v)
-}
-
-// Float64Slice creates a KeyValue with a FLOAT64SLICE Value type.
-func Float64Slice(k string, v []float64) KeyValue {
- return Key(k).Float64Slice(v)
-}
-
-// String creates a KeyValue with a STRING Value type.
-func String(k, v string) KeyValue {
- return Key(k).String(v)
-}
-
-// StringSlice creates a KeyValue with a STRINGSLICE Value type.
-func StringSlice(k string, v []string) KeyValue {
- return Key(k).StringSlice(v)
-}
-
-// Stringer creates a new key-value pair with a passed name and a string
-// value generated by the passed Stringer interface.
-func Stringer(k string, v fmt.Stringer) KeyValue {
- return Key(k).String(v.String())
-}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go
deleted file mode 100644
index 6cbefcead..000000000
--- a/vendor/go.opentelemetry.io/otel/attribute/set.go
+++ /dev/null
@@ -1,411 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package attribute // import "go.opentelemetry.io/otel/attribute"
-
-import (
- "cmp"
- "encoding/json"
- "reflect"
- "slices"
- "sort"
-)
-
-type (
- // Set is the representation for a distinct attribute set. It manages an
- // immutable set of attributes, with an internal cache for storing
- // attribute encodings.
- //
- // This type will remain comparable for backwards compatibility. The
- // equivalence of Sets across versions is not guaranteed to be stable.
- // Prior versions may find two Sets to be equal or not when compared
- // directly (i.e. ==), but subsequent versions may not. Users should use
- // the Equals method to ensure stable equivalence checking.
- //
- // Users should also use the Distinct returned from Equivalent as a map key
- // instead of a Set directly. In addition to that type providing guarantees
- // on stable equivalence, it may also provide performance improvements.
- Set struct {
- equivalent Distinct
- }
-
- // Distinct is a unique identifier of a Set.
- //
- // Distinct is designed to be ensures equivalence stability: comparisons
- // will return the save value across versions. For this reason, Distinct
- // should always be used as a map key instead of a Set.
- Distinct struct {
- iface interface{}
- }
-
- // Sortable implements sort.Interface, used for sorting KeyValue.
- //
- // Deprecated: This type is no longer used. It was added as a performance
- // optimization for Go < 1.21 that is no longer needed (Go < 1.21 is no
- // longer supported by the module).
- Sortable []KeyValue
-)
-
-var (
- // keyValueType is used in computeDistinctReflect.
- keyValueType = reflect.TypeOf(KeyValue{})
-
- // emptySet is returned for empty attribute sets.
- emptySet = &Set{
- equivalent: Distinct{
- iface: [0]KeyValue{},
- },
- }
-)
-
-// EmptySet returns a reference to a Set with no elements.
-//
-// This is a convenience provided for optimized calling utility.
-func EmptySet() *Set {
- return emptySet
-}
-
-// reflectValue abbreviates reflect.ValueOf(d).
-func (d Distinct) reflectValue() reflect.Value {
- return reflect.ValueOf(d.iface)
-}
-
-// Valid returns true if this value refers to a valid Set.
-func (d Distinct) Valid() bool {
- return d.iface != nil
-}
-
-// Len returns the number of attributes in this set.
-func (l *Set) Len() int {
- if l == nil || !l.equivalent.Valid() {
- return 0
- }
- return l.equivalent.reflectValue().Len()
-}
-
-// Get returns the KeyValue at ordered position idx in this set.
-func (l *Set) Get(idx int) (KeyValue, bool) {
- if l == nil || !l.equivalent.Valid() {
- return KeyValue{}, false
- }
- value := l.equivalent.reflectValue()
-
- if idx >= 0 && idx < value.Len() {
- // Note: The Go compiler successfully avoids an allocation for
- // the interface{} conversion here:
- return value.Index(idx).Interface().(KeyValue), true
- }
-
- return KeyValue{}, false
-}
-
-// Value returns the value of a specified key in this set.
-func (l *Set) Value(k Key) (Value, bool) {
- if l == nil || !l.equivalent.Valid() {
- return Value{}, false
- }
- rValue := l.equivalent.reflectValue()
- vlen := rValue.Len()
-
- idx := sort.Search(vlen, func(idx int) bool {
- return rValue.Index(idx).Interface().(KeyValue).Key >= k
- })
- if idx >= vlen {
- return Value{}, false
- }
- keyValue := rValue.Index(idx).Interface().(KeyValue)
- if k == keyValue.Key {
- return keyValue.Value, true
- }
- return Value{}, false
-}
-
-// HasValue tests whether a key is defined in this set.
-func (l *Set) HasValue(k Key) bool {
- if l == nil {
- return false
- }
- _, ok := l.Value(k)
- return ok
-}
-
-// Iter returns an iterator for visiting the attributes in this set.
-func (l *Set) Iter() Iterator {
- return Iterator{
- storage: l,
- idx: -1,
- }
-}
-
-// ToSlice returns the set of attributes belonging to this set, sorted, where
-// keys appear no more than once.
-func (l *Set) ToSlice() []KeyValue {
- iter := l.Iter()
- return iter.ToSlice()
-}
-
-// Equivalent returns a value that may be used as a map key. The Distinct type
-// guarantees that the result will equal the equivalent. Distinct value of any
-// attribute set with the same elements as this, where sets are made unique by
-// choosing the last value in the input for any given key.
-func (l *Set) Equivalent() Distinct {
- if l == nil || !l.equivalent.Valid() {
- return emptySet.equivalent
- }
- return l.equivalent
-}
-
-// Equals returns true if the argument set is equivalent to this set.
-func (l *Set) Equals(o *Set) bool {
- return l.Equivalent() == o.Equivalent()
-}
-
-// Encoded returns the encoded form of this set, according to encoder.
-func (l *Set) Encoded(encoder Encoder) string {
- if l == nil || encoder == nil {
- return ""
- }
-
- return encoder.Encode(l.Iter())
-}
-
-func empty() Set {
- return Set{
- equivalent: emptySet.equivalent,
- }
-}
-
-// NewSet returns a new Set. See the documentation for
-// NewSetWithSortableFiltered for more details.
-//
-// Except for empty sets, this method adds an additional allocation compared
-// with calls that include a Sortable.
-func NewSet(kvs ...KeyValue) Set {
- s, _ := NewSetWithFiltered(kvs, nil)
- return s
-}
-
-// NewSetWithSortable returns a new Set. See the documentation for
-// NewSetWithSortableFiltered for more details.
-//
-// This call includes a Sortable option as a memory optimization.
-//
-// Deprecated: Use [NewSet] instead.
-func NewSetWithSortable(kvs []KeyValue, _ *Sortable) Set {
- s, _ := NewSetWithFiltered(kvs, nil)
- return s
-}
-
-// NewSetWithFiltered returns a new Set. See the documentation for
-// NewSetWithSortableFiltered for more details.
-//
-// This call includes a Filter to include/exclude attribute keys from the
-// return value. Excluded keys are returned as a slice of attribute values.
-func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
- // Check for empty set.
- if len(kvs) == 0 {
- return empty(), nil
- }
-
- // Stable sort so the following de-duplication can implement
- // last-value-wins semantics.
- slices.SortStableFunc(kvs, func(a, b KeyValue) int {
- return cmp.Compare(a.Key, b.Key)
- })
-
- position := len(kvs) - 1
- offset := position - 1
-
- // The requirements stated above require that the stable
- // result be placed in the end of the input slice, while
- // overwritten values are swapped to the beginning.
- //
- // De-duplicate with last-value-wins semantics. Preserve
- // duplicate values at the beginning of the input slice.
- for ; offset >= 0; offset-- {
- if kvs[offset].Key == kvs[position].Key {
- continue
- }
- position--
- kvs[offset], kvs[position] = kvs[position], kvs[offset]
- }
- kvs = kvs[position:]
-
- if filter != nil {
- if div := filteredToFront(kvs, filter); div != 0 {
- return Set{equivalent: computeDistinct(kvs[div:])}, kvs[:div]
- }
- }
- return Set{equivalent: computeDistinct(kvs)}, nil
-}
-
-// NewSetWithSortableFiltered returns a new Set.
-//
-// Duplicate keys are eliminated by taking the last value. This
-// re-orders the input slice so that unique last-values are contiguous
-// at the end of the slice.
-//
-// This ensures the following:
-//
-// - Last-value-wins semantics
-// - Caller sees the reordering, but doesn't lose values
-// - Repeated call preserve last-value wins.
-//
-// Note that methods are defined on Set, although this returns Set. Callers
-// can avoid memory allocations by:
-//
-// - allocating a Sortable for use as a temporary in this method
-// - allocating a Set for storing the return value of this constructor.
-//
-// The result maintains a cache of encoded attributes, by attribute.EncoderID.
-// This value should not be copied after its first use.
-//
-// The second []KeyValue return value is a list of attributes that were
-// excluded by the Filter (if non-nil).
-//
-// Deprecated: Use [NewSetWithFiltered] instead.
-func NewSetWithSortableFiltered(kvs []KeyValue, _ *Sortable, filter Filter) (Set, []KeyValue) {
- return NewSetWithFiltered(kvs, filter)
-}
-
-// filteredToFront filters slice in-place using keep function. All KeyValues that need to
-// be removed are moved to the front. All KeyValues that need to be kept are
-// moved (in-order) to the back. The index for the first KeyValue to be kept is
-// returned.
-func filteredToFront(slice []KeyValue, keep Filter) int {
- n := len(slice)
- j := n
- for i := n - 1; i >= 0; i-- {
- if keep(slice[i]) {
- j--
- slice[i], slice[j] = slice[j], slice[i]
- }
- }
- return j
-}
-
-// Filter returns a filtered copy of this Set. See the documentation for
-// NewSetWithSortableFiltered for more details.
-func (l *Set) Filter(re Filter) (Set, []KeyValue) {
- if re == nil {
- return *l, nil
- }
-
- // Iterate in reverse to the first attribute that will be filtered out.
- n := l.Len()
- first := n - 1
- for ; first >= 0; first-- {
- kv, _ := l.Get(first)
- if !re(kv) {
- break
- }
- }
-
- // No attributes will be dropped, return the immutable Set l and nil.
- if first < 0 {
- return *l, nil
- }
-
- // Copy now that we know we need to return a modified set.
- //
- // Do not do this in-place on the underlying storage of *Set l. Sets are
- // immutable and filtering should not change this.
- slice := l.ToSlice()
-
- // Don't re-iterate the slice if only slice[0] is filtered.
- if first == 0 {
- // It is safe to assume len(slice) >= 1 given we found at least one
- // attribute above that needs to be filtered out.
- return Set{equivalent: computeDistinct(slice[1:])}, slice[:1]
- }
-
- // Move the filtered slice[first] to the front (preserving order).
- kv := slice[first]
- copy(slice[1:first+1], slice[:first])
- slice[0] = kv
-
- // Do not re-evaluate re(slice[first+1:]).
- div := filteredToFront(slice[1:first+1], re) + 1
- return Set{equivalent: computeDistinct(slice[div:])}, slice[:div]
-}
-
-// computeDistinct returns a Distinct using either the fixed- or
-// reflect-oriented code path, depending on the size of the input. The input
-// slice is assumed to already be sorted and de-duplicated.
-func computeDistinct(kvs []KeyValue) Distinct {
- iface := computeDistinctFixed(kvs)
- if iface == nil {
- iface = computeDistinctReflect(kvs)
- }
- return Distinct{
- iface: iface,
- }
-}
-
-// computeDistinctFixed computes a Distinct for small slices. It returns nil
-// if the input is too large for this code path.
-func computeDistinctFixed(kvs []KeyValue) interface{} {
- switch len(kvs) {
- case 1:
- return [1]KeyValue(kvs)
- case 2:
- return [2]KeyValue(kvs)
- case 3:
- return [3]KeyValue(kvs)
- case 4:
- return [4]KeyValue(kvs)
- case 5:
- return [5]KeyValue(kvs)
- case 6:
- return [6]KeyValue(kvs)
- case 7:
- return [7]KeyValue(kvs)
- case 8:
- return [8]KeyValue(kvs)
- case 9:
- return [9]KeyValue(kvs)
- case 10:
- return [10]KeyValue(kvs)
- default:
- return nil
- }
-}
-
-// computeDistinctReflect computes a Distinct using reflection, works for any
-// size input.
-func computeDistinctReflect(kvs []KeyValue) interface{} {
- at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem()
- for i, keyValue := range kvs {
- *(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue
- }
- return at.Interface()
-}
-
-// MarshalJSON returns the JSON encoding of the Set.
-func (l *Set) MarshalJSON() ([]byte, error) {
- return json.Marshal(l.equivalent.iface)
-}
-
-// MarshalLog is the marshaling function used by the logging system to represent this Set.
-func (l Set) MarshalLog() interface{} {
- kvs := make(map[string]string)
- for _, kv := range l.ToSlice() {
- kvs[string(kv.Key)] = kv.Value.Emit()
- }
- return kvs
-}
-
-// Len implements sort.Interface.
-func (l *Sortable) Len() int {
- return len(*l)
-}
-
-// Swap implements sort.Interface.
-func (l *Sortable) Swap(i, j int) {
- (*l)[i], (*l)[j] = (*l)[j], (*l)[i]
-}
-
-// Less implements sort.Interface.
-func (l *Sortable) Less(i, j int) bool {
- return (*l)[i].Key < (*l)[j].Key
-}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/type_string.go b/vendor/go.opentelemetry.io/otel/attribute/type_string.go
deleted file mode 100644
index e584b2477..000000000
--- a/vendor/go.opentelemetry.io/otel/attribute/type_string.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Code generated by "stringer -type=Type"; DO NOT EDIT.
-
-package attribute
-
-import "strconv"
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[INVALID-0]
- _ = x[BOOL-1]
- _ = x[INT64-2]
- _ = x[FLOAT64-3]
- _ = x[STRING-4]
- _ = x[BOOLSLICE-5]
- _ = x[INT64SLICE-6]
- _ = x[FLOAT64SLICE-7]
- _ = x[STRINGSLICE-8]
-}
-
-const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICESTRINGSLICE"
-
-var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71}
-
-func (i Type) String() string {
- if i < 0 || i >= Type(len(_Type_index)-1) {
- return "Type(" + strconv.FormatInt(int64(i), 10) + ")"
- }
- return _Type_name[_Type_index[i]:_Type_index[i+1]]
-}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go
deleted file mode 100644
index 9ea0ecbbd..000000000
--- a/vendor/go.opentelemetry.io/otel/attribute/value.go
+++ /dev/null
@@ -1,271 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package attribute // import "go.opentelemetry.io/otel/attribute"
-
-import (
- "encoding/json"
- "fmt"
- "reflect"
- "strconv"
-
- "go.opentelemetry.io/otel/internal"
- "go.opentelemetry.io/otel/internal/attribute"
-)
-
-//go:generate stringer -type=Type
-
-// Type describes the type of the data Value holds.
-type Type int // nolint: revive // redefines builtin Type.
-
-// Value represents the value part in key-value pairs.
-type Value struct {
- vtype Type
- numeric uint64
- stringly string
- slice interface{}
-}
-
-const (
- // INVALID is used for a Value with no value set.
- INVALID Type = iota
- // BOOL is a boolean Type Value.
- BOOL
- // INT64 is a 64-bit signed integral Type Value.
- INT64
- // FLOAT64 is a 64-bit floating point Type Value.
- FLOAT64
- // STRING is a string Type Value.
- STRING
- // BOOLSLICE is a slice of booleans Type Value.
- BOOLSLICE
- // INT64SLICE is a slice of 64-bit signed integral numbers Type Value.
- INT64SLICE
- // FLOAT64SLICE is a slice of 64-bit floating point numbers Type Value.
- FLOAT64SLICE
- // STRINGSLICE is a slice of strings Type Value.
- STRINGSLICE
-)
-
-// BoolValue creates a BOOL Value.
-func BoolValue(v bool) Value {
- return Value{
- vtype: BOOL,
- numeric: internal.BoolToRaw(v),
- }
-}
-
-// BoolSliceValue creates a BOOLSLICE Value.
-func BoolSliceValue(v []bool) Value {
- return Value{vtype: BOOLSLICE, slice: attribute.BoolSliceValue(v)}
-}
-
-// IntValue creates an INT64 Value.
-func IntValue(v int) Value {
- return Int64Value(int64(v))
-}
-
-// IntSliceValue creates an INTSLICE Value.
-func IntSliceValue(v []int) Value {
- var int64Val int64
- cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(int64Val)))
- for i, val := range v {
- cp.Elem().Index(i).SetInt(int64(val))
- }
- return Value{
- vtype: INT64SLICE,
- slice: cp.Elem().Interface(),
- }
-}
-
-// Int64Value creates an INT64 Value.
-func Int64Value(v int64) Value {
- return Value{
- vtype: INT64,
- numeric: internal.Int64ToRaw(v),
- }
-}
-
-// Int64SliceValue creates an INT64SLICE Value.
-func Int64SliceValue(v []int64) Value {
- return Value{vtype: INT64SLICE, slice: attribute.Int64SliceValue(v)}
-}
-
-// Float64Value creates a FLOAT64 Value.
-func Float64Value(v float64) Value {
- return Value{
- vtype: FLOAT64,
- numeric: internal.Float64ToRaw(v),
- }
-}
-
-// Float64SliceValue creates a FLOAT64SLICE Value.
-func Float64SliceValue(v []float64) Value {
- return Value{vtype: FLOAT64SLICE, slice: attribute.Float64SliceValue(v)}
-}
-
-// StringValue creates a STRING Value.
-func StringValue(v string) Value {
- return Value{
- vtype: STRING,
- stringly: v,
- }
-}
-
-// StringSliceValue creates a STRINGSLICE Value.
-func StringSliceValue(v []string) Value {
- return Value{vtype: STRINGSLICE, slice: attribute.StringSliceValue(v)}
-}
-
-// Type returns a type of the Value.
-func (v Value) Type() Type {
- return v.vtype
-}
-
-// AsBool returns the bool value. Make sure that the Value's type is
-// BOOL.
-func (v Value) AsBool() bool {
- return internal.RawToBool(v.numeric)
-}
-
-// AsBoolSlice returns the []bool value. Make sure that the Value's type is
-// BOOLSLICE.
-func (v Value) AsBoolSlice() []bool {
- if v.vtype != BOOLSLICE {
- return nil
- }
- return v.asBoolSlice()
-}
-
-func (v Value) asBoolSlice() []bool {
- return attribute.AsBoolSlice(v.slice)
-}
-
-// AsInt64 returns the int64 value. Make sure that the Value's type is
-// INT64.
-func (v Value) AsInt64() int64 {
- return internal.RawToInt64(v.numeric)
-}
-
-// AsInt64Slice returns the []int64 value. Make sure that the Value's type is
-// INT64SLICE.
-func (v Value) AsInt64Slice() []int64 {
- if v.vtype != INT64SLICE {
- return nil
- }
- return v.asInt64Slice()
-}
-
-func (v Value) asInt64Slice() []int64 {
- return attribute.AsInt64Slice(v.slice)
-}
-
-// AsFloat64 returns the float64 value. Make sure that the Value's
-// type is FLOAT64.
-func (v Value) AsFloat64() float64 {
- return internal.RawToFloat64(v.numeric)
-}
-
-// AsFloat64Slice returns the []float64 value. Make sure that the Value's type is
-// FLOAT64SLICE.
-func (v Value) AsFloat64Slice() []float64 {
- if v.vtype != FLOAT64SLICE {
- return nil
- }
- return v.asFloat64Slice()
-}
-
-func (v Value) asFloat64Slice() []float64 {
- return attribute.AsFloat64Slice(v.slice)
-}
-
-// AsString returns the string value. Make sure that the Value's type
-// is STRING.
-func (v Value) AsString() string {
- return v.stringly
-}
-
-// AsStringSlice returns the []string value. Make sure that the Value's type is
-// STRINGSLICE.
-func (v Value) AsStringSlice() []string {
- if v.vtype != STRINGSLICE {
- return nil
- }
- return v.asStringSlice()
-}
-
-func (v Value) asStringSlice() []string {
- return attribute.AsStringSlice(v.slice)
-}
-
-type unknownValueType struct{}
-
-// AsInterface returns Value's data as interface{}.
-func (v Value) AsInterface() interface{} {
- switch v.Type() {
- case BOOL:
- return v.AsBool()
- case BOOLSLICE:
- return v.asBoolSlice()
- case INT64:
- return v.AsInt64()
- case INT64SLICE:
- return v.asInt64Slice()
- case FLOAT64:
- return v.AsFloat64()
- case FLOAT64SLICE:
- return v.asFloat64Slice()
- case STRING:
- return v.stringly
- case STRINGSLICE:
- return v.asStringSlice()
- }
- return unknownValueType{}
-}
-
-// Emit returns a string representation of Value's data.
-func (v Value) Emit() string {
- switch v.Type() {
- case BOOLSLICE:
- return fmt.Sprint(v.asBoolSlice())
- case BOOL:
- return strconv.FormatBool(v.AsBool())
- case INT64SLICE:
- j, err := json.Marshal(v.asInt64Slice())
- if err != nil {
- return fmt.Sprintf("invalid: %v", v.asInt64Slice())
- }
- return string(j)
- case INT64:
- return strconv.FormatInt(v.AsInt64(), 10)
- case FLOAT64SLICE:
- j, err := json.Marshal(v.asFloat64Slice())
- if err != nil {
- return fmt.Sprintf("invalid: %v", v.asFloat64Slice())
- }
- return string(j)
- case FLOAT64:
- return fmt.Sprint(v.AsFloat64())
- case STRINGSLICE:
- j, err := json.Marshal(v.asStringSlice())
- if err != nil {
- return fmt.Sprintf("invalid: %v", v.asStringSlice())
- }
- return string(j)
- case STRING:
- return v.stringly
- default:
- return "unknown"
- }
-}
-
-// MarshalJSON returns the JSON encoding of the Value.
-func (v Value) MarshalJSON() ([]byte, error) {
- var jsonVal struct {
- Type string
- Value interface{}
- }
- jsonVal.Type = v.Type().String()
- jsonVal.Value = v.AsInterface()
- return json.Marshal(jsonVal)
-}
diff --git a/vendor/go.opentelemetry.io/otel/baggage/README.md b/vendor/go.opentelemetry.io/otel/baggage/README.md
deleted file mode 100644
index 7d798435e..000000000
--- a/vendor/go.opentelemetry.io/otel/baggage/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Baggage
-
-[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/baggage)](https://pkg.go.dev/go.opentelemetry.io/otel/baggage)
diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go
deleted file mode 100644
index 0e1fe2422..000000000
--- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go
+++ /dev/null
@@ -1,1018 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package baggage // import "go.opentelemetry.io/otel/baggage"
-
-import (
- "errors"
- "fmt"
- "net/url"
- "strings"
- "unicode/utf8"
-
- "go.opentelemetry.io/otel/internal/baggage"
-)
-
-const (
- maxMembers = 180
- maxBytesPerMembers = 4096
- maxBytesPerBaggageString = 8192
-
- listDelimiter = ","
- keyValueDelimiter = "="
- propertyDelimiter = ";"
-)
-
-var (
- errInvalidKey = errors.New("invalid key")
- errInvalidValue = errors.New("invalid value")
- errInvalidProperty = errors.New("invalid baggage list-member property")
- errInvalidMember = errors.New("invalid baggage list-member")
- errMemberNumber = errors.New("too many list-members in baggage-string")
- errMemberBytes = errors.New("list-member too large")
- errBaggageBytes = errors.New("baggage-string too large")
-)
-
-// Property is an additional metadata entry for a baggage list-member.
-type Property struct {
- key, value string
-
- // hasValue indicates if a zero-value value means the property does not
- // have a value or if it was the zero-value.
- hasValue bool
-}
-
-// NewKeyProperty returns a new Property for key.
-//
-// The passed key must be valid, non-empty UTF-8 string.
-// If key is invalid, an error will be returned.
-// However, the specific Propagators that are used to transmit baggage entries across
-// component boundaries may impose their own restrictions on Property key.
-// For example, the W3C Baggage specification restricts the Property keys to strings that
-// satisfy the token definition from RFC7230, Section 3.2.6.
-// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key.
-func NewKeyProperty(key string) (Property, error) {
- if !validateBaggageName(key) {
- return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
- }
-
- p := Property{key: key}
- return p, nil
-}
-
-// NewKeyValueProperty returns a new Property for key with value.
-//
-// The passed key must be compliant with W3C Baggage specification.
-// The passed value must be percent-encoded as defined in W3C Baggage specification.
-//
-// Notice: Consider using [NewKeyValuePropertyRaw] instead
-// that does not require percent-encoding of the value.
-func NewKeyValueProperty(key, value string) (Property, error) {
- if !validateKey(key) {
- return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
- }
-
- if !validateValue(value) {
- return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
- }
- decodedValue, err := url.PathUnescape(value)
- if err != nil {
- return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
- }
- return NewKeyValuePropertyRaw(key, decodedValue)
-}
-
-// NewKeyValuePropertyRaw returns a new Property for key with value.
-//
-// The passed key must be valid, non-empty UTF-8 string.
-// The passed value must be valid UTF-8 string.
-// However, the specific Propagators that are used to transmit baggage entries across
-// component boundaries may impose their own restrictions on Property key.
-// For example, the W3C Baggage specification restricts the Property keys to strings that
-// satisfy the token definition from RFC7230, Section 3.2.6.
-// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key.
-func NewKeyValuePropertyRaw(key, value string) (Property, error) {
- if !validateBaggageName(key) {
- return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
- }
- if !validateBaggageValue(value) {
- return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
- }
-
- p := Property{
- key: key,
- value: value,
- hasValue: true,
- }
- return p, nil
-}
-
-func newInvalidProperty() Property {
- return Property{}
-}
-
-// parseProperty attempts to decode a Property from the passed string. It
-// returns an error if the input is invalid according to the W3C Baggage
-// specification.
-func parseProperty(property string) (Property, error) {
- if property == "" {
- return newInvalidProperty(), nil
- }
-
- p, ok := parsePropertyInternal(property)
- if !ok {
- return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidProperty, property)
- }
-
- return p, nil
-}
-
-// validate ensures p conforms to the W3C Baggage specification, returning an
-// error otherwise.
-func (p Property) validate() error {
- errFunc := func(err error) error {
- return fmt.Errorf("invalid property: %w", err)
- }
-
- if !validateBaggageName(p.key) {
- return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key))
- }
- if !p.hasValue && p.value != "" {
- return errFunc(errors.New("inconsistent value"))
- }
- if p.hasValue && !validateBaggageValue(p.value) {
- return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value))
- }
- return nil
-}
-
-// Key returns the Property key.
-func (p Property) Key() string {
- return p.key
-}
-
-// Value returns the Property value. Additionally, a boolean value is returned
-// indicating if the returned value is the empty if the Property has a value
-// that is empty or if the value is not set.
-func (p Property) Value() (string, bool) {
- return p.value, p.hasValue
-}
-
-// String encodes Property into a header string compliant with the W3C Baggage
-// specification.
-// It would return empty string if the key is invalid with the W3C Baggage
-// specification. This could happen for a UTF-8 key, as it may contain
-// invalid characters.
-func (p Property) String() string {
- // W3C Baggage specification does not allow percent-encoded keys.
- if !validateKey(p.key) {
- return ""
- }
-
- if p.hasValue {
- return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value))
- }
- return p.key
-}
-
-type properties []Property
-
-func fromInternalProperties(iProps []baggage.Property) properties {
- if len(iProps) == 0 {
- return nil
- }
-
- props := make(properties, len(iProps))
- for i, p := range iProps {
- props[i] = Property{
- key: p.Key,
- value: p.Value,
- hasValue: p.HasValue,
- }
- }
- return props
-}
-
-func (p properties) asInternal() []baggage.Property {
- if len(p) == 0 {
- return nil
- }
-
- iProps := make([]baggage.Property, len(p))
- for i, prop := range p {
- iProps[i] = baggage.Property{
- Key: prop.key,
- Value: prop.value,
- HasValue: prop.hasValue,
- }
- }
- return iProps
-}
-
-func (p properties) Copy() properties {
- if len(p) == 0 {
- return nil
- }
-
- props := make(properties, len(p))
- copy(props, p)
- return props
-}
-
-// validate ensures each Property in p conforms to the W3C Baggage
-// specification, returning an error otherwise.
-func (p properties) validate() error {
- for _, prop := range p {
- if err := prop.validate(); err != nil {
- return err
- }
- }
- return nil
-}
-
-// String encodes properties into a header string compliant with the W3C Baggage
-// specification.
-func (p properties) String() string {
- props := make([]string, 0, len(p))
- for _, prop := range p {
- s := prop.String()
-
- // Ignored empty properties.
- if s != "" {
- props = append(props, s)
- }
- }
- return strings.Join(props, propertyDelimiter)
-}
-
-// Member is a list-member of a baggage-string as defined by the W3C Baggage
-// specification.
-type Member struct {
- key, value string
- properties properties
-
- // hasData indicates whether the created property contains data or not.
- // Properties that do not contain data are invalid with no other check
- // required.
- hasData bool
-}
-
-// NewMember returns a new Member from the passed arguments.
-//
-// The passed key must be compliant with W3C Baggage specification.
-// The passed value must be percent-encoded as defined in W3C Baggage specification.
-//
-// Notice: Consider using [NewMemberRaw] instead
-// that does not require percent-encoding of the value.
-func NewMember(key, value string, props ...Property) (Member, error) {
- if !validateKey(key) {
- return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key)
- }
-
- if !validateValue(value) {
- return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
- }
- decodedValue, err := url.PathUnescape(value)
- if err != nil {
- return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
- }
- return NewMemberRaw(key, decodedValue, props...)
-}
-
-// NewMemberRaw returns a new Member from the passed arguments.
-//
-// The passed key must be valid, non-empty UTF-8 string.
-// The passed value must be valid UTF-8 string.
-// However, the specific Propagators that are used to transmit baggage entries across
-// component boundaries may impose their own restrictions on baggage key.
-// For example, the W3C Baggage specification restricts the baggage keys to strings that
-// satisfy the token definition from RFC7230, Section 3.2.6.
-// For maximum compatibility, alphanumeric value are strongly recommended to be used as baggage key.
-func NewMemberRaw(key, value string, props ...Property) (Member, error) {
- m := Member{
- key: key,
- value: value,
- properties: properties(props).Copy(),
- hasData: true,
- }
- if err := m.validate(); err != nil {
- return newInvalidMember(), err
- }
- return m, nil
-}
-
-func newInvalidMember() Member {
- return Member{}
-}
-
-// parseMember attempts to decode a Member from the passed string. It returns
-// an error if the input is invalid according to the W3C Baggage
-// specification.
-func parseMember(member string) (Member, error) {
- if n := len(member); n > maxBytesPerMembers {
- return newInvalidMember(), fmt.Errorf("%w: %d", errMemberBytes, n)
- }
-
- var props properties
- keyValue, properties, found := strings.Cut(member, propertyDelimiter)
- if found {
- // Parse the member properties.
- for _, pStr := range strings.Split(properties, propertyDelimiter) {
- p, err := parseProperty(pStr)
- if err != nil {
- return newInvalidMember(), err
- }
- props = append(props, p)
- }
- }
- // Parse the member key/value pair.
-
- // Take into account a value can contain equal signs (=).
- k, v, found := strings.Cut(keyValue, keyValueDelimiter)
- if !found {
- return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidMember, member)
- }
- // "Leading and trailing whitespaces are allowed but MUST be trimmed
- // when converting the header into a data structure."
- key := strings.TrimSpace(k)
- if !validateKey(key) {
- return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key)
- }
-
- rawVal := strings.TrimSpace(v)
- if !validateValue(rawVal) {
- return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v)
- }
-
- // Decode a percent-encoded value.
- unescapeVal, err := url.PathUnescape(rawVal)
- if err != nil {
- return newInvalidMember(), fmt.Errorf("%w: %w", errInvalidValue, err)
- }
-
- value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal)
- return Member{key: key, value: value, properties: props, hasData: true}, nil
-}
-
-// replaceInvalidUTF8Sequences replaces invalid UTF-8 sequences with '�'.
-func replaceInvalidUTF8Sequences(c int, unescapeVal string) string {
- if utf8.ValidString(unescapeVal) {
- return unescapeVal
- }
- // W3C baggage spec:
- // https://github.com/w3c/baggage/blob/8c215efbeebd3fa4b1aceb937a747e56444f22f3/baggage/HTTP_HEADER_FORMAT.md?plain=1#L69
-
- var b strings.Builder
- b.Grow(c)
- for i := 0; i < len(unescapeVal); {
- r, size := utf8.DecodeRuneInString(unescapeVal[i:])
- if r == utf8.RuneError && size == 1 {
- // Invalid UTF-8 sequence found, replace it with '�'
- _, _ = b.WriteString("�")
- } else {
- _, _ = b.WriteRune(r)
- }
- i += size
- }
-
- return b.String()
-}
-
-// validate ensures m conforms to the W3C Baggage specification.
-// A key must be an ASCII string, returning an error otherwise.
-func (m Member) validate() error {
- if !m.hasData {
- return fmt.Errorf("%w: %q", errInvalidMember, m)
- }
-
- if !validateBaggageName(m.key) {
- return fmt.Errorf("%w: %q", errInvalidKey, m.key)
- }
- if !validateBaggageValue(m.value) {
- return fmt.Errorf("%w: %q", errInvalidValue, m.value)
- }
- return m.properties.validate()
-}
-
-// Key returns the Member key.
-func (m Member) Key() string { return m.key }
-
-// Value returns the Member value.
-func (m Member) Value() string { return m.value }
-
-// Properties returns a copy of the Member properties.
-func (m Member) Properties() []Property { return m.properties.Copy() }
-
-// String encodes Member into a header string compliant with the W3C Baggage
-// specification.
-// It would return empty string if the key is invalid with the W3C Baggage
-// specification. This could happen for a UTF-8 key, as it may contain
-// invalid characters.
-func (m Member) String() string {
- // W3C Baggage specification does not allow percent-encoded keys.
- if !validateKey(m.key) {
- return ""
- }
-
- s := m.key + keyValueDelimiter + valueEscape(m.value)
- if len(m.properties) > 0 {
- s += propertyDelimiter + m.properties.String()
- }
- return s
-}
-
-// Baggage is a list of baggage members representing the baggage-string as
-// defined by the W3C Baggage specification.
-type Baggage struct { //nolint:golint
- list baggage.List
-}
-
-// New returns a new valid Baggage. It returns an error if it results in a
-// Baggage exceeding limits set in that specification.
-//
-// It expects all the provided members to have already been validated.
-func New(members ...Member) (Baggage, error) {
- if len(members) == 0 {
- return Baggage{}, nil
- }
-
- b := make(baggage.List)
- for _, m := range members {
- if !m.hasData {
- return Baggage{}, errInvalidMember
- }
-
- // OpenTelemetry resolves duplicates by last-one-wins.
- b[m.key] = baggage.Item{
- Value: m.value,
- Properties: m.properties.asInternal(),
- }
- }
-
- // Check member numbers after deduplication.
- if len(b) > maxMembers {
- return Baggage{}, errMemberNumber
- }
-
- bag := Baggage{b}
- if n := len(bag.String()); n > maxBytesPerBaggageString {
- return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n)
- }
-
- return bag, nil
-}
-
-// Parse attempts to decode a baggage-string from the passed string. It
-// returns an error if the input is invalid according to the W3C Baggage
-// specification.
-//
-// If there are duplicate list-members contained in baggage, the last one
-// defined (reading left-to-right) will be the only one kept. This diverges
-// from the W3C Baggage specification which allows duplicate list-members, but
-// conforms to the OpenTelemetry Baggage specification.
-func Parse(bStr string) (Baggage, error) {
- if bStr == "" {
- return Baggage{}, nil
- }
-
- if n := len(bStr); n > maxBytesPerBaggageString {
- return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n)
- }
-
- b := make(baggage.List)
- for _, memberStr := range strings.Split(bStr, listDelimiter) {
- m, err := parseMember(memberStr)
- if err != nil {
- return Baggage{}, err
- }
- // OpenTelemetry resolves duplicates by last-one-wins.
- b[m.key] = baggage.Item{
- Value: m.value,
- Properties: m.properties.asInternal(),
- }
- }
-
- // OpenTelemetry does not allow for duplicate list-members, but the W3C
- // specification does. Now that we have deduplicated, ensure the baggage
- // does not exceed list-member limits.
- if len(b) > maxMembers {
- return Baggage{}, errMemberNumber
- }
-
- return Baggage{b}, nil
-}
-
-// Member returns the baggage list-member identified by key.
-//
-// If there is no list-member matching the passed key the returned Member will
-// be a zero-value Member.
-// The returned member is not validated, as we assume the validation happened
-// when it was added to the Baggage.
-func (b Baggage) Member(key string) Member {
- v, ok := b.list[key]
- if !ok {
- // We do not need to worry about distinguishing between the situation
- // where a zero-valued Member is included in the Baggage because a
- // zero-valued Member is invalid according to the W3C Baggage
- // specification (it has an empty key).
- return newInvalidMember()
- }
-
- return Member{
- key: key,
- value: v.Value,
- properties: fromInternalProperties(v.Properties),
- hasData: true,
- }
-}
-
-// Members returns all the baggage list-members.
-// The order of the returned list-members is not significant.
-//
-// The returned members are not validated, as we assume the validation happened
-// when they were added to the Baggage.
-func (b Baggage) Members() []Member {
- if len(b.list) == 0 {
- return nil
- }
-
- members := make([]Member, 0, len(b.list))
- for k, v := range b.list {
- members = append(members, Member{
- key: k,
- value: v.Value,
- properties: fromInternalProperties(v.Properties),
- hasData: true,
- })
- }
- return members
-}
-
-// SetMember returns a copy of the Baggage with the member included. If the
-// baggage contains a Member with the same key, the existing Member is
-// replaced.
-//
-// If member is invalid according to the W3C Baggage specification, an error
-// is returned with the original Baggage.
-func (b Baggage) SetMember(member Member) (Baggage, error) {
- if !member.hasData {
- return b, errInvalidMember
- }
-
- n := len(b.list)
- if _, ok := b.list[member.key]; !ok {
- n++
- }
- list := make(baggage.List, n)
-
- for k, v := range b.list {
- // Do not copy if we are just going to overwrite.
- if k == member.key {
- continue
- }
- list[k] = v
- }
-
- list[member.key] = baggage.Item{
- Value: member.value,
- Properties: member.properties.asInternal(),
- }
-
- return Baggage{list: list}, nil
-}
-
-// DeleteMember returns a copy of the Baggage with the list-member identified
-// by key removed.
-func (b Baggage) DeleteMember(key string) Baggage {
- n := len(b.list)
- if _, ok := b.list[key]; ok {
- n--
- }
- list := make(baggage.List, n)
-
- for k, v := range b.list {
- if k == key {
- continue
- }
- list[k] = v
- }
-
- return Baggage{list: list}
-}
-
-// Len returns the number of list-members in the Baggage.
-func (b Baggage) Len() int {
- return len(b.list)
-}
-
-// String encodes Baggage into a header string compliant with the W3C Baggage
-// specification.
-// It would ignore members where the member key is invalid with the W3C Baggage
-// specification. This could happen for a UTF-8 key, as it may contain
-// invalid characters.
-func (b Baggage) String() string {
- members := make([]string, 0, len(b.list))
- for k, v := range b.list {
- s := Member{
- key: k,
- value: v.Value,
- properties: fromInternalProperties(v.Properties),
- }.String()
-
- // Ignored empty members.
- if s != "" {
- members = append(members, s)
- }
- }
- return strings.Join(members, listDelimiter)
-}
-
-// parsePropertyInternal attempts to decode a Property from the passed string.
-// It follows the spec at https://www.w3.org/TR/baggage/#definition.
-func parsePropertyInternal(s string) (p Property, ok bool) {
- // For the entire function we will use " key = value " as an example.
- // Attempting to parse the key.
- // First skip spaces at the beginning "< >key = value " (they could be empty).
- index := skipSpace(s, 0)
-
- // Parse the key: " <key> = value ".
- keyStart := index
- keyEnd := index
- for _, c := range s[keyStart:] {
- if !validateKeyChar(c) {
- break
- }
- keyEnd++
- }
-
- // If we couldn't find any valid key character,
- // it means the key is either empty or invalid.
- if keyStart == keyEnd {
- return
- }
-
- // Skip spaces after the key: " key< >= value ".
- index = skipSpace(s, keyEnd)
-
- if index == len(s) {
- // A key can have no value, like: " key ".
- ok = true
- p.key = s[keyStart:keyEnd]
- return
- }
-
- // If we have not reached the end and we can't find the '=' delimiter,
- // it means the property is invalid.
- if s[index] != keyValueDelimiter[0] {
- return
- }
-
- // Attempting to parse the value.
- // Match: " key =< >value ".
- index = skipSpace(s, index+1)
-
- // Match the value string: " key = <value> ".
- // A valid property can be: " key =".
- // Therefore, we don't have to check if the value is empty.
- valueStart := index
- valueEnd := index
- for _, c := range s[valueStart:] {
- if !validateValueChar(c) {
- break
- }
- valueEnd++
- }
-
- // Skip all trailing whitespaces: " key = value< >".
- index = skipSpace(s, valueEnd)
-
- // If after looking for the value and skipping whitespaces
- // we have not reached the end, it means the property is
- // invalid, something like: " key = value value1".
- if index != len(s) {
- return
- }
-
- // Decode a percent-encoded value.
- rawVal := s[valueStart:valueEnd]
- unescapeVal, err := url.PathUnescape(rawVal)
- if err != nil {
- return
- }
- value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal)
-
- ok = true
- p.key = s[keyStart:keyEnd]
- p.hasValue = true
-
- p.value = value
- return
-}
-
-func skipSpace(s string, offset int) int {
- i := offset
- for ; i < len(s); i++ {
- c := s[i]
- if c != ' ' && c != '\t' {
- break
- }
- }
- return i
-}
-
-var safeKeyCharset = [utf8.RuneSelf]bool{
- // 0x23 to 0x27
- '#': true,
- '$': true,
- '%': true,
- '&': true,
- '\'': true,
-
- // 0x30 to 0x39
- '0': true,
- '1': true,
- '2': true,
- '3': true,
- '4': true,
- '5': true,
- '6': true,
- '7': true,
- '8': true,
- '9': true,
-
- // 0x41 to 0x5a
- 'A': true,
- 'B': true,
- 'C': true,
- 'D': true,
- 'E': true,
- 'F': true,
- 'G': true,
- 'H': true,
- 'I': true,
- 'J': true,
- 'K': true,
- 'L': true,
- 'M': true,
- 'N': true,
- 'O': true,
- 'P': true,
- 'Q': true,
- 'R': true,
- 'S': true,
- 'T': true,
- 'U': true,
- 'V': true,
- 'W': true,
- 'X': true,
- 'Y': true,
- 'Z': true,
-
- // 0x5e to 0x7a
- '^': true,
- '_': true,
- '`': true,
- 'a': true,
- 'b': true,
- 'c': true,
- 'd': true,
- 'e': true,
- 'f': true,
- 'g': true,
- 'h': true,
- 'i': true,
- 'j': true,
- 'k': true,
- 'l': true,
- 'm': true,
- 'n': true,
- 'o': true,
- 'p': true,
- 'q': true,
- 'r': true,
- 's': true,
- 't': true,
- 'u': true,
- 'v': true,
- 'w': true,
- 'x': true,
- 'y': true,
- 'z': true,
-
- // remainder
- '!': true,
- '*': true,
- '+': true,
- '-': true,
- '.': true,
- '|': true,
- '~': true,
-}
-
-// validateBaggageName checks if the string is a valid OpenTelemetry Baggage name.
-// Baggage name is a valid, non-empty UTF-8 string.
-func validateBaggageName(s string) bool {
- if len(s) == 0 {
- return false
- }
-
- return utf8.ValidString(s)
-}
-
-// validateBaggageValue checks if the string is a valid OpenTelemetry Baggage value.
-// Baggage value is a valid UTF-8 strings.
-// Empty string is also a valid UTF-8 string.
-func validateBaggageValue(s string) bool {
- return utf8.ValidString(s)
-}
-
-// validateKey checks if the string is a valid W3C Baggage key.
-func validateKey(s string) bool {
- if len(s) == 0 {
- return false
- }
-
- for _, c := range s {
- if !validateKeyChar(c) {
- return false
- }
- }
-
- return true
-}
-
-func validateKeyChar(c int32) bool {
- return c >= 0 && c < int32(utf8.RuneSelf) && safeKeyCharset[c]
-}
-
-// validateValue checks if the string is a valid W3C Baggage value.
-func validateValue(s string) bool {
- for _, c := range s {
- if !validateValueChar(c) {
- return false
- }
- }
-
- return true
-}
-
-var safeValueCharset = [utf8.RuneSelf]bool{
- '!': true, // 0x21
-
- // 0x23 to 0x2b
- '#': true,
- '$': true,
- '%': true,
- '&': true,
- '\'': true,
- '(': true,
- ')': true,
- '*': true,
- '+': true,
-
- // 0x2d to 0x3a
- '-': true,
- '.': true,
- '/': true,
- '0': true,
- '1': true,
- '2': true,
- '3': true,
- '4': true,
- '5': true,
- '6': true,
- '7': true,
- '8': true,
- '9': true,
- ':': true,
-
- // 0x3c to 0x5b
- '<': true, // 0x3C
- '=': true, // 0x3D
- '>': true, // 0x3E
- '?': true, // 0x3F
- '@': true, // 0x40
- 'A': true, // 0x41
- 'B': true, // 0x42
- 'C': true, // 0x43
- 'D': true, // 0x44
- 'E': true, // 0x45
- 'F': true, // 0x46
- 'G': true, // 0x47
- 'H': true, // 0x48
- 'I': true, // 0x49
- 'J': true, // 0x4A
- 'K': true, // 0x4B
- 'L': true, // 0x4C
- 'M': true, // 0x4D
- 'N': true, // 0x4E
- 'O': true, // 0x4F
- 'P': true, // 0x50
- 'Q': true, // 0x51
- 'R': true, // 0x52
- 'S': true, // 0x53
- 'T': true, // 0x54
- 'U': true, // 0x55
- 'V': true, // 0x56
- 'W': true, // 0x57
- 'X': true, // 0x58
- 'Y': true, // 0x59
- 'Z': true, // 0x5A
- '[': true, // 0x5B
-
- // 0x5d to 0x7e
- ']': true, // 0x5D
- '^': true, // 0x5E
- '_': true, // 0x5F
- '`': true, // 0x60
- 'a': true, // 0x61
- 'b': true, // 0x62
- 'c': true, // 0x63
- 'd': true, // 0x64
- 'e': true, // 0x65
- 'f': true, // 0x66
- 'g': true, // 0x67
- 'h': true, // 0x68
- 'i': true, // 0x69
- 'j': true, // 0x6A
- 'k': true, // 0x6B
- 'l': true, // 0x6C
- 'm': true, // 0x6D
- 'n': true, // 0x6E
- 'o': true, // 0x6F
- 'p': true, // 0x70
- 'q': true, // 0x71
- 'r': true, // 0x72
- 's': true, // 0x73
- 't': true, // 0x74
- 'u': true, // 0x75
- 'v': true, // 0x76
- 'w': true, // 0x77
- 'x': true, // 0x78
- 'y': true, // 0x79
- 'z': true, // 0x7A
- '{': true, // 0x7B
- '|': true, // 0x7C
- '}': true, // 0x7D
- '~': true, // 0x7E
-}
-
-func validateValueChar(c int32) bool {
- return c >= 0 && c < int32(utf8.RuneSelf) && safeValueCharset[c]
-}
-
-// valueEscape escapes the string so it can be safely placed inside a baggage value,
-// replacing special characters with %XX sequences as needed.
-//
-// The implementation is based on:
-// https://github.com/golang/go/blob/f6509cf5cdbb5787061b784973782933c47f1782/src/net/url/url.go#L285.
-func valueEscape(s string) string {
- hexCount := 0
- for i := 0; i < len(s); i++ {
- c := s[i]
- if shouldEscape(c) {
- hexCount++
- }
- }
-
- if hexCount == 0 {
- return s
- }
-
- var buf [64]byte
- var t []byte
-
- required := len(s) + 2*hexCount
- if required <= len(buf) {
- t = buf[:required]
- } else {
- t = make([]byte, required)
- }
-
- j := 0
- for i := 0; i < len(s); i++ {
- c := s[i]
- if shouldEscape(s[i]) {
- const upperhex = "0123456789ABCDEF"
- t[j] = '%'
- t[j+1] = upperhex[c>>4]
- t[j+2] = upperhex[c&15]
- j += 3
- } else {
- t[j] = c
- j++
- }
- }
-
- return string(t)
-}
-
-// shouldEscape returns true if the specified byte should be escaped when
-// appearing in a baggage value string.
-func shouldEscape(c byte) bool {
- if c == '%' {
- // The percent character must be encoded so that percent-encoding can work.
- return true
- }
- return !validateValueChar(int32(c))
-}
diff --git a/vendor/go.opentelemetry.io/otel/baggage/context.go b/vendor/go.opentelemetry.io/otel/baggage/context.go
deleted file mode 100644
index a572461a0..000000000
--- a/vendor/go.opentelemetry.io/otel/baggage/context.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package baggage // import "go.opentelemetry.io/otel/baggage"
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/internal/baggage"
-)
-
-// ContextWithBaggage returns a copy of parent with baggage.
-func ContextWithBaggage(parent context.Context, b Baggage) context.Context {
- // Delegate so any hooks for the OpenTracing bridge are handled.
- return baggage.ContextWithList(parent, b.list)
-}
-
-// ContextWithoutBaggage returns a copy of parent with no baggage.
-func ContextWithoutBaggage(parent context.Context) context.Context {
- // Delegate so any hooks for the OpenTracing bridge are handled.
- return baggage.ContextWithList(parent, nil)
-}
-
-// FromContext returns the baggage contained in ctx.
-func FromContext(ctx context.Context) Baggage {
- // Delegate so any hooks for the OpenTracing bridge are handled.
- return Baggage{list: baggage.ListFromContext(ctx)}
-}
diff --git a/vendor/go.opentelemetry.io/otel/baggage/doc.go b/vendor/go.opentelemetry.io/otel/baggage/doc.go
deleted file mode 100644
index b51d87cab..000000000
--- a/vendor/go.opentelemetry.io/otel/baggage/doc.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-/*
-Package baggage provides functionality for storing and retrieving
-baggage items in Go context. For propagating the baggage, see the
-go.opentelemetry.io/otel/propagation package.
-*/
-package baggage // import "go.opentelemetry.io/otel/baggage"
diff --git a/vendor/go.opentelemetry.io/otel/codes/README.md b/vendor/go.opentelemetry.io/otel/codes/README.md
deleted file mode 100644
index 24c52b387..000000000
--- a/vendor/go.opentelemetry.io/otel/codes/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Codes
-
-[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/codes)](https://pkg.go.dev/go.opentelemetry.io/otel/codes)
diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go
deleted file mode 100644
index 49a35b122..000000000
--- a/vendor/go.opentelemetry.io/otel/codes/codes.go
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package codes // import "go.opentelemetry.io/otel/codes"
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "strconv"
-)
-
-const (
- // Unset is the default status code.
- Unset Code = 0
-
- // Error indicates the operation contains an error.
- //
- // NOTE: The error code in OTLP is 2.
- // The value of this enum is only relevant to the internals
- // of the Go SDK.
- Error Code = 1
-
- // Ok indicates operation has been validated by an Application developers
- // or Operator to have completed successfully, or contain no error.
- //
- // NOTE: The Ok code in OTLP is 1.
- // The value of this enum is only relevant to the internals
- // of the Go SDK.
- Ok Code = 2
-
- maxCode = 3
-)
-
-// Code is an 32-bit representation of a status state.
-type Code uint32
-
-var codeToStr = map[Code]string{
- Unset: "Unset",
- Error: "Error",
- Ok: "Ok",
-}
-
-var strToCode = map[string]Code{
- `"Unset"`: Unset,
- `"Error"`: Error,
- `"Ok"`: Ok,
-}
-
-// String returns the Code as a string.
-func (c Code) String() string {
- return codeToStr[c]
-}
-
-// UnmarshalJSON unmarshals b into the Code.
-//
-// This is based on the functionality in the gRPC codes package:
-// https://github.com/grpc/grpc-go/blob/bb64fee312b46ebee26be43364a7a966033521b1/codes/codes.go#L218-L244
-func (c *Code) UnmarshalJSON(b []byte) error {
- // From json.Unmarshaler: By convention, to approximate the behavior of
- // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as
- // a no-op.
- if string(b) == "null" {
- return nil
- }
- if c == nil {
- return errors.New("nil receiver passed to UnmarshalJSON")
- }
-
- var x interface{}
- if err := json.Unmarshal(b, &x); err != nil {
- return err
- }
- switch x.(type) {
- case string:
- if jc, ok := strToCode[string(b)]; ok {
- *c = jc
- return nil
- }
- return fmt.Errorf("invalid code: %q", string(b))
- case float64:
- if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil {
- if ci >= maxCode {
- return fmt.Errorf("invalid code: %q", ci)
- }
-
- *c = Code(ci) // nolint: gosec // Bit size of 32 check above.
- return nil
- }
- return fmt.Errorf("invalid code: %q", string(b))
- default:
- return fmt.Errorf("invalid code: %q", string(b))
- }
-}
-
-// MarshalJSON returns c as the JSON encoding of c.
-func (c *Code) MarshalJSON() ([]byte, error) {
- if c == nil {
- return []byte("null"), nil
- }
- str, ok := codeToStr[*c]
- if !ok {
- return nil, fmt.Errorf("invalid code: %d", *c)
- }
- return []byte(fmt.Sprintf("%q", str)), nil
-}
diff --git a/vendor/go.opentelemetry.io/otel/codes/doc.go b/vendor/go.opentelemetry.io/otel/codes/doc.go
deleted file mode 100644
index ee8db448b..000000000
--- a/vendor/go.opentelemetry.io/otel/codes/doc.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-/*
-Package codes defines the canonical error codes used by OpenTelemetry.
-
-It conforms to [the OpenTelemetry
-specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/trace/api.md#set-status).
-*/
-package codes // import "go.opentelemetry.io/otel/codes"
diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go
deleted file mode 100644
index 921f85961..000000000
--- a/vendor/go.opentelemetry.io/otel/doc.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-/*
-Package otel provides global access to the OpenTelemetry API. The subpackages of
-the otel package provide an implementation of the OpenTelemetry API.
-
-The provided API is used to instrument code and measure data about that code's
-performance and operation. The measured data, by default, is not processed or
-transmitted anywhere. An implementation of the OpenTelemetry SDK, like the
-default SDK implementation (go.opentelemetry.io/otel/sdk), and associated
-exporters are used to process and transport this data.
-
-To read the getting started guide, see https://opentelemetry.io/docs/languages/go/getting-started/.
-
-To read more about tracing, see go.opentelemetry.io/otel/trace.
-
-To read more about metrics, see go.opentelemetry.io/otel/metric.
-
-To read more about logs, see go.opentelemetry.io/otel/log.
-
-To read more about propagation, see go.opentelemetry.io/otel/propagation and
-go.opentelemetry.io/otel/baggage.
-*/
-package otel // import "go.opentelemetry.io/otel"
diff --git a/vendor/go.opentelemetry.io/otel/error_handler.go b/vendor/go.opentelemetry.io/otel/error_handler.go
deleted file mode 100644
index 67414c71e..000000000
--- a/vendor/go.opentelemetry.io/otel/error_handler.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package otel // import "go.opentelemetry.io/otel"
-
-// ErrorHandler handles irremediable events.
-type ErrorHandler interface {
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-
- // Handle handles any error deemed irremediable by an OpenTelemetry
- // component.
- Handle(error)
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-}
-
-// ErrorHandlerFunc is a convenience adapter to allow the use of a function
-// as an ErrorHandler.
-type ErrorHandlerFunc func(error)
-
-var _ ErrorHandler = ErrorHandlerFunc(nil)
-
-// Handle handles the irremediable error by calling the ErrorHandlerFunc itself.
-func (f ErrorHandlerFunc) Handle(err error) {
- f(err)
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE
deleted file mode 100644
index 261eeb9e9..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md
deleted file mode 100644
index 50802d5ae..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# OTLP Trace Exporter
-
-[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlptrace)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go
deleted file mode 100644
index 3c1a625c0..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
-
-import (
- "context"
-
- tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
-)
-
-// Client manages connections to the collector, handles the
-// transformation of data into wire format, and the transmission of that
-// data to the collector.
-type Client interface {
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-
- // Start should establish connection(s) to endpoint(s). It is
- // called just once by the exporter, so the implementation
- // does not need to worry about idempotence and locking.
- Start(ctx context.Context) error
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-
- // Stop should close the connections. The function is called
- // only once by the exporter, so the implementation does not
- // need to worry about idempotence, but it may be called
- // concurrently with UploadTraces, so proper
- // locking is required. The function serves as a
- // synchronization point - after the function returns, the
- // process of closing connections is assumed to be finished.
- Stop(ctx context.Context) error
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-
- // UploadTraces should transform the passed traces to the wire
- // format and send it to the collector. May be called
- // concurrently.
- UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/doc.go
deleted file mode 100644
index 09ad5eadb..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/doc.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-/*
-Package otlptrace contains abstractions for OTLP span exporters.
-See the official OTLP span exporter implementations:
- - [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc],
- - [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp].
-*/
-package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go
deleted file mode 100644
index 3f0a518ae..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
-
-import (
- "context"
- "errors"
- "fmt"
- "sync"
-
- "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
- tracesdk "go.opentelemetry.io/otel/sdk/trace"
-)
-
-var errAlreadyStarted = errors.New("already started")
-
-// Exporter exports trace data in the OTLP wire format.
-type Exporter struct {
- client Client
-
- mu sync.RWMutex
- started bool
-
- startOnce sync.Once
- stopOnce sync.Once
-}
-
-// ExportSpans exports a batch of spans.
-func (e *Exporter) ExportSpans(ctx context.Context, ss []tracesdk.ReadOnlySpan) error {
- protoSpans := tracetransform.Spans(ss)
- if len(protoSpans) == 0 {
- return nil
- }
-
- err := e.client.UploadTraces(ctx, protoSpans)
- if err != nil {
- return fmt.Errorf("traces export: %w", err)
- }
- return nil
-}
-
-// Start establishes a connection to the receiving endpoint.
-func (e *Exporter) Start(ctx context.Context) error {
- err := errAlreadyStarted
- e.startOnce.Do(func() {
- e.mu.Lock()
- e.started = true
- e.mu.Unlock()
- err = e.client.Start(ctx)
- })
-
- return err
-}
-
-// Shutdown flushes all exports and closes all connections to the receiving endpoint.
-func (e *Exporter) Shutdown(ctx context.Context) error {
- e.mu.RLock()
- started := e.started
- e.mu.RUnlock()
-
- if !started {
- return nil
- }
-
- var err error
-
- e.stopOnce.Do(func() {
- err = e.client.Stop(ctx)
- e.mu.Lock()
- e.started = false
- e.mu.Unlock()
- })
-
- return err
-}
-
-var _ tracesdk.SpanExporter = (*Exporter)(nil)
-
-// New constructs a new Exporter and starts it.
-func New(ctx context.Context, client Client) (*Exporter, error) {
- exp := NewUnstarted(client)
- if err := exp.Start(ctx); err != nil {
- return nil, err
- }
- return exp, nil
-}
-
-// NewUnstarted constructs a new Exporter and does not start it.
-func NewUnstarted(client Client) *Exporter {
- return &Exporter{
- client: client,
- }
-}
-
-// MarshalLog is the marshaling function used by the logging system to represent this Exporter.
-func (e *Exporter) MarshalLog() interface{} {
- return struct {
- Type string
- Client Client
- }{
- Type: "otlptrace",
- Client: e.client,
- }
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go
deleted file mode 100644
index 4571a5ca3..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go
+++ /dev/null
@@ -1,147 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
-
-import (
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/sdk/resource"
- commonpb "go.opentelemetry.io/proto/otlp/common/v1"
-)
-
-// KeyValues transforms a slice of attribute KeyValues into OTLP key-values.
-func KeyValues(attrs []attribute.KeyValue) []*commonpb.KeyValue {
- if len(attrs) == 0 {
- return nil
- }
-
- out := make([]*commonpb.KeyValue, 0, len(attrs))
- for _, kv := range attrs {
- out = append(out, KeyValue(kv))
- }
- return out
-}
-
-// Iterator transforms an attribute iterator into OTLP key-values.
-func Iterator(iter attribute.Iterator) []*commonpb.KeyValue {
- l := iter.Len()
- if l == 0 {
- return nil
- }
-
- out := make([]*commonpb.KeyValue, 0, l)
- for iter.Next() {
- out = append(out, KeyValue(iter.Attribute()))
- }
- return out
-}
-
-// ResourceAttributes transforms a Resource OTLP key-values.
-func ResourceAttributes(res *resource.Resource) []*commonpb.KeyValue {
- return Iterator(res.Iter())
-}
-
-// KeyValue transforms an attribute KeyValue into an OTLP key-value.
-func KeyValue(kv attribute.KeyValue) *commonpb.KeyValue {
- return &commonpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)}
-}
-
-// Value transforms an attribute Value into an OTLP AnyValue.
-func Value(v attribute.Value) *commonpb.AnyValue {
- av := new(commonpb.AnyValue)
- switch v.Type() {
- case attribute.BOOL:
- av.Value = &commonpb.AnyValue_BoolValue{
- BoolValue: v.AsBool(),
- }
- case attribute.BOOLSLICE:
- av.Value = &commonpb.AnyValue_ArrayValue{
- ArrayValue: &commonpb.ArrayValue{
- Values: boolSliceValues(v.AsBoolSlice()),
- },
- }
- case attribute.INT64:
- av.Value = &commonpb.AnyValue_IntValue{
- IntValue: v.AsInt64(),
- }
- case attribute.INT64SLICE:
- av.Value = &commonpb.AnyValue_ArrayValue{
- ArrayValue: &commonpb.ArrayValue{
- Values: int64SliceValues(v.AsInt64Slice()),
- },
- }
- case attribute.FLOAT64:
- av.Value = &commonpb.AnyValue_DoubleValue{
- DoubleValue: v.AsFloat64(),
- }
- case attribute.FLOAT64SLICE:
- av.Value = &commonpb.AnyValue_ArrayValue{
- ArrayValue: &commonpb.ArrayValue{
- Values: float64SliceValues(v.AsFloat64Slice()),
- },
- }
- case attribute.STRING:
- av.Value = &commonpb.AnyValue_StringValue{
- StringValue: v.AsString(),
- }
- case attribute.STRINGSLICE:
- av.Value = &commonpb.AnyValue_ArrayValue{
- ArrayValue: &commonpb.ArrayValue{
- Values: stringSliceValues(v.AsStringSlice()),
- },
- }
- default:
- av.Value = &commonpb.AnyValue_StringValue{
- StringValue: "INVALID",
- }
- }
- return av
-}
-
-func boolSliceValues(vals []bool) []*commonpb.AnyValue {
- converted := make([]*commonpb.AnyValue, len(vals))
- for i, v := range vals {
- converted[i] = &commonpb.AnyValue{
- Value: &commonpb.AnyValue_BoolValue{
- BoolValue: v,
- },
- }
- }
- return converted
-}
-
-func int64SliceValues(vals []int64) []*commonpb.AnyValue {
- converted := make([]*commonpb.AnyValue, len(vals))
- for i, v := range vals {
- converted[i] = &commonpb.AnyValue{
- Value: &commonpb.AnyValue_IntValue{
- IntValue: v,
- },
- }
- }
- return converted
-}
-
-func float64SliceValues(vals []float64) []*commonpb.AnyValue {
- converted := make([]*commonpb.AnyValue, len(vals))
- for i, v := range vals {
- converted[i] = &commonpb.AnyValue{
- Value: &commonpb.AnyValue_DoubleValue{
- DoubleValue: v,
- },
- }
- }
- return converted
-}
-
-func stringSliceValues(vals []string) []*commonpb.AnyValue {
- converted := make([]*commonpb.AnyValue, len(vals))
- for i, v := range vals {
- converted[i] = &commonpb.AnyValue{
- Value: &commonpb.AnyValue_StringValue{
- StringValue: v,
- },
- }
- }
- return converted
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go
deleted file mode 100644
index 2e7690e43..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
-
-import (
- "go.opentelemetry.io/otel/sdk/instrumentation"
- commonpb "go.opentelemetry.io/proto/otlp/common/v1"
-)
-
-func InstrumentationScope(il instrumentation.Scope) *commonpb.InstrumentationScope {
- if il == (instrumentation.Scope{}) {
- return nil
- }
- return &commonpb.InstrumentationScope{
- Name: il.Name,
- Version: il.Version,
- Attributes: Iterator(il.Attributes.Iter()),
- }
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go
deleted file mode 100644
index db7b698a5..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
-
-import (
- "go.opentelemetry.io/otel/sdk/resource"
- resourcepb "go.opentelemetry.io/proto/otlp/resource/v1"
-)
-
-// Resource transforms a Resource into an OTLP Resource.
-func Resource(r *resource.Resource) *resourcepb.Resource {
- if r == nil {
- return nil
- }
- return &resourcepb.Resource{Attributes: ResourceAttributes(r)}
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go
deleted file mode 100644
index bf27ef022..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go
+++ /dev/null
@@ -1,219 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
-
-import (
- "math"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/codes"
- "go.opentelemetry.io/otel/sdk/instrumentation"
- tracesdk "go.opentelemetry.io/otel/sdk/trace"
- "go.opentelemetry.io/otel/trace"
- tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
-)
-
-// Spans transforms a slice of OpenTelemetry spans into a slice of OTLP
-// ResourceSpans.
-func Spans(sdl []tracesdk.ReadOnlySpan) []*tracepb.ResourceSpans {
- if len(sdl) == 0 {
- return nil
- }
-
- rsm := make(map[attribute.Distinct]*tracepb.ResourceSpans)
-
- type key struct {
- r attribute.Distinct
- is instrumentation.Scope
- }
- ssm := make(map[key]*tracepb.ScopeSpans)
-
- var resources int
- for _, sd := range sdl {
- if sd == nil {
- continue
- }
-
- rKey := sd.Resource().Equivalent()
- k := key{
- r: rKey,
- is: sd.InstrumentationScope(),
- }
- scopeSpan, iOk := ssm[k]
- if !iOk {
- // Either the resource or instrumentation scope were unknown.
- scopeSpan = &tracepb.ScopeSpans{
- Scope: InstrumentationScope(sd.InstrumentationScope()),
- Spans: []*tracepb.Span{},
- SchemaUrl: sd.InstrumentationScope().SchemaURL,
- }
- }
- scopeSpan.Spans = append(scopeSpan.Spans, span(sd))
- ssm[k] = scopeSpan
-
- rs, rOk := rsm[rKey]
- if !rOk {
- resources++
- // The resource was unknown.
- rs = &tracepb.ResourceSpans{
- Resource: Resource(sd.Resource()),
- ScopeSpans: []*tracepb.ScopeSpans{scopeSpan},
- SchemaUrl: sd.Resource().SchemaURL(),
- }
- rsm[rKey] = rs
- continue
- }
-
- // The resource has been seen before. Check if the instrumentation
- // library lookup was unknown because if so we need to add it to the
- // ResourceSpans. Otherwise, the instrumentation library has already
- // been seen and the append we did above will be included it in the
- // ScopeSpans reference.
- if !iOk {
- rs.ScopeSpans = append(rs.ScopeSpans, scopeSpan)
- }
- }
-
- // Transform the categorized map into a slice
- rss := make([]*tracepb.ResourceSpans, 0, resources)
- for _, rs := range rsm {
- rss = append(rss, rs)
- }
- return rss
-}
-
-// span transforms a Span into an OTLP span.
-func span(sd tracesdk.ReadOnlySpan) *tracepb.Span {
- if sd == nil {
- return nil
- }
-
- tid := sd.SpanContext().TraceID()
- sid := sd.SpanContext().SpanID()
-
- s := &tracepb.Span{
- TraceId: tid[:],
- SpanId: sid[:],
- TraceState: sd.SpanContext().TraceState().String(),
- Status: status(sd.Status().Code, sd.Status().Description),
- StartTimeUnixNano: uint64(max(0, sd.StartTime().UnixNano())), // nolint:gosec // Overflow checked.
- EndTimeUnixNano: uint64(max(0, sd.EndTime().UnixNano())), // nolint:gosec // Overflow checked.
- Links: links(sd.Links()),
- Kind: spanKind(sd.SpanKind()),
- Name: sd.Name(),
- Attributes: KeyValues(sd.Attributes()),
- Events: spanEvents(sd.Events()),
- DroppedAttributesCount: clampUint32(sd.DroppedAttributes()),
- DroppedEventsCount: clampUint32(sd.DroppedEvents()),
- DroppedLinksCount: clampUint32(sd.DroppedLinks()),
- }
-
- if psid := sd.Parent().SpanID(); psid.IsValid() {
- s.ParentSpanId = psid[:]
- }
- s.Flags = buildSpanFlags(sd.Parent())
-
- return s
-}
-
-func clampUint32(v int) uint32 {
- if v < 0 {
- return 0
- }
- if int64(v) > math.MaxUint32 {
- return math.MaxUint32
- }
- return uint32(v) // nolint: gosec // Overflow/Underflow checked.
-}
-
-// status transform a span code and message into an OTLP span status.
-func status(status codes.Code, message string) *tracepb.Status {
- var c tracepb.Status_StatusCode
- switch status {
- case codes.Ok:
- c = tracepb.Status_STATUS_CODE_OK
- case codes.Error:
- c = tracepb.Status_STATUS_CODE_ERROR
- default:
- c = tracepb.Status_STATUS_CODE_UNSET
- }
- return &tracepb.Status{
- Code: c,
- Message: message,
- }
-}
-
-// links transforms span Links to OTLP span links.
-func links(links []tracesdk.Link) []*tracepb.Span_Link {
- if len(links) == 0 {
- return nil
- }
-
- sl := make([]*tracepb.Span_Link, 0, len(links))
- for _, otLink := range links {
- // This redefinition is necessary to prevent otLink.*ID[:] copies
- // being reused -- in short we need a new otLink per iteration.
- otLink := otLink
-
- tid := otLink.SpanContext.TraceID()
- sid := otLink.SpanContext.SpanID()
-
- flags := buildSpanFlags(otLink.SpanContext)
-
- sl = append(sl, &tracepb.Span_Link{
- TraceId: tid[:],
- SpanId: sid[:],
- Attributes: KeyValues(otLink.Attributes),
- DroppedAttributesCount: clampUint32(otLink.DroppedAttributeCount),
- Flags: flags,
- })
- }
- return sl
-}
-
-func buildSpanFlags(sc trace.SpanContext) uint32 {
- flags := tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK
- if sc.IsRemote() {
- flags |= tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK
- }
-
- return uint32(flags) // nolint:gosec // Flags is a bitmask and can't be negative
-}
-
-// spanEvents transforms span Events to an OTLP span events.
-func spanEvents(es []tracesdk.Event) []*tracepb.Span_Event {
- if len(es) == 0 {
- return nil
- }
-
- events := make([]*tracepb.Span_Event, len(es))
- // Transform message events
- for i := 0; i < len(es); i++ {
- events[i] = &tracepb.Span_Event{
- Name: es[i].Name,
- TimeUnixNano: uint64(max(0, es[i].Time.UnixNano())), // nolint:gosec // Overflow checked.
- Attributes: KeyValues(es[i].Attributes),
- DroppedAttributesCount: clampUint32(es[i].DroppedAttributeCount),
- }
- }
- return events
-}
-
-// spanKind transforms a SpanKind to an OTLP span kind.
-func spanKind(kind trace.SpanKind) tracepb.Span_SpanKind {
- switch kind {
- case trace.SpanKindInternal:
- return tracepb.Span_SPAN_KIND_INTERNAL
- case trace.SpanKindClient:
- return tracepb.Span_SPAN_KIND_CLIENT
- case trace.SpanKindServer:
- return tracepb.Span_SPAN_KIND_SERVER
- case trace.SpanKindProducer:
- return tracepb.Span_SPAN_KIND_PRODUCER
- case trace.SpanKindConsumer:
- return tracepb.Span_SPAN_KIND_CONSUMER
- default:
- return tracepb.Span_SPAN_KIND_UNSPECIFIED
- }
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE
deleted file mode 100644
index 261eeb9e9..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/README.md
deleted file mode 100644
index 5309bb7cb..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# OTLP Trace gRPC Exporter
-
-[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
deleted file mode 100644
index 8409b5f8f..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
+++ /dev/null
@@ -1,300 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
-
-import (
- "context"
- "errors"
- "sync"
- "time"
-
- "google.golang.org/genproto/googleapis/rpc/errdetails"
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/status"
-
- "go.opentelemetry.io/otel"
- "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
- "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal"
- "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
- "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry"
- coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1"
- tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
-)
-
-type client struct {
- endpoint string
- dialOpts []grpc.DialOption
- metadata metadata.MD
- exportTimeout time.Duration
- requestFunc retry.RequestFunc
-
- // stopCtx is used as a parent context for all exports. Therefore, when it
- // is canceled with the stopFunc all exports are canceled.
- stopCtx context.Context
- // stopFunc cancels stopCtx, stopping any active exports.
- stopFunc context.CancelFunc
-
- // ourConn keeps track of where conn was created: true if created here on
- // Start, or false if passed with an option. This is important on Shutdown
- // as the conn should only be closed if created here on start. Otherwise,
- // it is up to the processes that passed the conn to close it.
- ourConn bool
- conn *grpc.ClientConn
- tscMu sync.RWMutex
- tsc coltracepb.TraceServiceClient
-}
-
-// Compile time check *client implements otlptrace.Client.
-var _ otlptrace.Client = (*client)(nil)
-
-// NewClient creates a new gRPC trace client.
-func NewClient(opts ...Option) otlptrace.Client {
- return newClient(opts...)
-}
-
-func newClient(opts ...Option) *client {
- cfg := otlpconfig.NewGRPCConfig(asGRPCOptions(opts)...)
-
- ctx, cancel := context.WithCancel(context.Background())
-
- c := &client{
- endpoint: cfg.Traces.Endpoint,
- exportTimeout: cfg.Traces.Timeout,
- requestFunc: cfg.RetryConfig.RequestFunc(retryable),
- dialOpts: cfg.DialOptions,
- stopCtx: ctx,
- stopFunc: cancel,
- conn: cfg.GRPCConn,
- }
-
- if len(cfg.Traces.Headers) > 0 {
- c.metadata = metadata.New(cfg.Traces.Headers)
- }
-
- return c
-}
-
-// Start establishes a gRPC connection to the collector.
-func (c *client) Start(context.Context) error {
- if c.conn == nil {
- // If the caller did not provide a ClientConn when the client was
- // created, create one using the configuration they did provide.
- conn, err := grpc.NewClient(c.endpoint, c.dialOpts...)
- if err != nil {
- return err
- }
- // Keep track that we own the lifecycle of this conn and need to close
- // it on Shutdown.
- c.ourConn = true
- c.conn = conn
- }
-
- // The otlptrace.Client interface states this method is called just once,
- // so no need to check if already started.
- c.tscMu.Lock()
- c.tsc = coltracepb.NewTraceServiceClient(c.conn)
- c.tscMu.Unlock()
-
- return nil
-}
-
-var errAlreadyStopped = errors.New("the client is already stopped")
-
-// Stop shuts down the client.
-//
-// Any active connections to a remote endpoint are closed if they were created
-// by the client. Any gRPC connection passed during creation using
-// WithGRPCConn will not be closed. It is the caller's responsibility to
-// handle cleanup of that resource.
-//
-// This method synchronizes with the UploadTraces method of the client. It
-// will wait for any active calls to that method to complete unimpeded, or it
-// will cancel any active calls if ctx expires. If ctx expires, the context
-// error will be forwarded as the returned error. All client held resources
-// will still be released in this situation.
-//
-// If the client has already stopped, an error will be returned describing
-// this.
-func (c *client) Stop(ctx context.Context) error {
- // Make sure to return context error if the context is done when calling this method.
- err := ctx.Err()
-
- // Acquire the c.tscMu lock within the ctx lifetime.
- acquired := make(chan struct{})
- go func() {
- c.tscMu.Lock()
- close(acquired)
- }()
-
- select {
- case <-ctx.Done():
- // The Stop timeout is reached. Kill any remaining exports to force
- // the clear of the lock and save the timeout error to return and
- // signal the shutdown timed out before cleanly stopping.
- c.stopFunc()
- err = ctx.Err()
-
- // To ensure the client is not left in a dirty state c.tsc needs to be
- // set to nil. To avoid the race condition when doing this, ensure
- // that all the exports are killed (initiated by c.stopFunc).
- <-acquired
- case <-acquired:
- }
- // Hold the tscMu lock for the rest of the function to ensure no new
- // exports are started.
- defer c.tscMu.Unlock()
-
- // The otlptrace.Client interface states this method is called only
- // once, but there is no guarantee it is called after Start. Ensure the
- // client is started before doing anything and let the called know if they
- // made a mistake.
- if c.tsc == nil {
- return errAlreadyStopped
- }
-
- // Clear c.tsc to signal the client is stopped.
- c.tsc = nil
-
- if c.ourConn {
- closeErr := c.conn.Close()
- // A context timeout error takes precedence over this error.
- if err == nil && closeErr != nil {
- err = closeErr
- }
- }
- return err
-}
-
-var errShutdown = errors.New("the client is shutdown")
-
-// UploadTraces sends a batch of spans.
-//
-// Retryable errors from the server will be handled according to any
-// RetryConfig the client was created with.
-func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error {
- // Hold a read lock to ensure a shut down initiated after this starts does
- // not abandon the export. This read lock acquire has less priority than a
- // write lock acquire (i.e. Stop), meaning if the client is shutting down
- // this will come after the shut down.
- c.tscMu.RLock()
- defer c.tscMu.RUnlock()
-
- if c.tsc == nil {
- return errShutdown
- }
-
- ctx, cancel := c.exportContext(ctx)
- defer cancel()
-
- return c.requestFunc(ctx, func(iCtx context.Context) error {
- resp, err := c.tsc.Export(iCtx, &coltracepb.ExportTraceServiceRequest{
- ResourceSpans: protoSpans,
- })
- if resp != nil && resp.PartialSuccess != nil {
- msg := resp.PartialSuccess.GetErrorMessage()
- n := resp.PartialSuccess.GetRejectedSpans()
- if n != 0 || msg != "" {
- err := internal.TracePartialSuccessError(n, msg)
- otel.Handle(err)
- }
- }
- // nil is converted to OK.
- if status.Code(err) == codes.OK {
- // Success.
- return nil
- }
- return err
- })
-}
-
-// exportContext returns a copy of parent with an appropriate deadline and
-// cancellation function.
-//
-// It is the callers responsibility to cancel the returned context once its
-// use is complete, via the parent or directly with the returned CancelFunc, to
-// ensure all resources are correctly released.
-func (c *client) exportContext(parent context.Context) (context.Context, context.CancelFunc) {
- var (
- ctx context.Context
- cancel context.CancelFunc
- )
-
- if c.exportTimeout > 0 {
- ctx, cancel = context.WithTimeout(parent, c.exportTimeout)
- } else {
- ctx, cancel = context.WithCancel(parent)
- }
-
- if c.metadata.Len() > 0 {
- md := c.metadata
- if outMD, ok := metadata.FromOutgoingContext(ctx); ok {
- md = metadata.Join(md, outMD)
- }
-
- ctx = metadata.NewOutgoingContext(ctx, md)
- }
-
- // Unify the client stopCtx with the parent.
- go func() {
- select {
- case <-ctx.Done():
- case <-c.stopCtx.Done():
- // Cancel the export as the shutdown has timed out.
- cancel()
- }
- }()
-
- return ctx, cancel
-}
-
-// retryable returns if err identifies a request that can be retried and a
-// duration to wait for if an explicit throttle time is included in err.
-func retryable(err error) (bool, time.Duration) {
- s := status.Convert(err)
- return retryableGRPCStatus(s)
-}
-
-func retryableGRPCStatus(s *status.Status) (bool, time.Duration) {
- switch s.Code() {
- case codes.Canceled,
- codes.DeadlineExceeded,
- codes.Aborted,
- codes.OutOfRange,
- codes.Unavailable,
- codes.DataLoss:
- // Additionally handle RetryInfo.
- _, d := throttleDelay(s)
- return true, d
- case codes.ResourceExhausted:
- // Retry only if the server signals that the recovery from resource exhaustion is possible.
- return throttleDelay(s)
- }
-
- // Not a retry-able error.
- return false, 0
-}
-
-// throttleDelay returns of the status is RetryInfo
-// and the its duration to wait for if an explicit throttle time.
-func throttleDelay(s *status.Status) (bool, time.Duration) {
- for _, detail := range s.Details() {
- if t, ok := detail.(*errdetails.RetryInfo); ok {
- return true, t.RetryDelay.AsDuration()
- }
- }
- return false, 0
-}
-
-// MarshalLog is the marshaling function used by the logging system to represent this Client.
-func (c *client) MarshalLog() interface{} {
- return struct {
- Type string
- Endpoint string
- }{
- Type: "otlptracegrpc",
- Endpoint: c.endpoint,
- }
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go
deleted file mode 100644
index b7bd429ff..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-/*
-Package otlptracegrpc provides an OTLP span exporter using gRPC.
-By default the telemetry is sent to https://localhost:4317.
-
-Exporter should be created using [New].
-
-The environment variables described below can be used for configuration.
-
-OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT (default: "https://localhost:4317") -
-target to which the exporter sends telemetry.
-The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md.
-The value must contain a scheme ("http" or "https") and host.
-The value may additionally contain a port, and a path.
-The value should not contain a query string or fragment.
-OTEL_EXPORTER_OTLP_TRACES_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT.
-The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithGRPCConn] options.
-
-OTEL_EXPORTER_OTLP_INSECURE, OTEL_EXPORTER_OTLP_TRACES_INSECURE (default: "false") -
-setting "true" disables client transport security for the exporter's gRPC connection.
-You can use this only when an endpoint is provided without the http or https scheme.
-OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT setting overrides
-the scheme defined via OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT.
-OTEL_EXPORTER_OTLP_TRACES_INSECURE takes precedence over OTEL_EXPORTER_OTLP_INSECURE.
-The configuration can be overridden by [WithInsecure], [WithGRPCConn] options.
-
-OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_TRACES_HEADERS (default: none) -
-key-value pairs used as gRPC metadata associated with gRPC requests.
-The value is expected to be represented in a format matching the [W3C Baggage HTTP Header Content Format],
-except that additional semi-colon delimited metadata is not supported.
-Example value: "key1=value1,key2=value2".
-OTEL_EXPORTER_OTLP_TRACES_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS.
-The configuration can be overridden by [WithHeaders] option.
-
-OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_TRACES_TIMEOUT (default: "10000") -
-maximum time in milliseconds the OTLP exporter waits for each batch export.
-OTEL_EXPORTER_OTLP_TRACES_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT.
-The configuration can be overridden by [WithTimeout] option.
-
-OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_TRACES_COMPRESSION (default: none) -
-the gRPC compressor the exporter uses.
-Supported value: "gzip".
-OTEL_EXPORTER_OTLP_TRACES_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION.
-The configuration can be overridden by [WithCompressor], [WithGRPCConn] options.
-
-OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE (default: none) -
-the filepath to the trusted certificate to use when verifying a server's TLS credentials.
-OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE.
-The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options.
-
-OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE (default: none) -
-the filepath to the client certificate/chain trust for client's private key to use in mTLS communication in PEM format.
-OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE.
-The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options.
-
-OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY (default: none) -
-the filepath to the client's private key to use in mTLS communication in PEM format.
-OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY.
-The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] option.
-
-[W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content
-*/
-package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go
deleted file mode 100644
index b826b8424..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
-)
-
-// New constructs a new Exporter and starts it.
-func New(ctx context.Context, opts ...Option) (*otlptrace.Exporter, error) {
- return otlptrace.New(ctx, NewClient(opts...))
-}
-
-// NewUnstarted constructs a new Exporter and does not start it.
-func NewUnstarted(opts ...Option) *otlptrace.Exporter {
- return otlptrace.NewUnstarted(NewClient(opts...))
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go
deleted file mode 100644
index 4abf48d1f..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go
+++ /dev/null
@@ -1,215 +0,0 @@
-// Code created by gotmpl. DO NOT MODIFY.
-// source: internal/shared/otlp/envconfig/envconfig.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig"
-
-import (
- "crypto/tls"
- "crypto/x509"
- "errors"
- "fmt"
- "net/url"
- "strconv"
- "strings"
- "time"
- "unicode"
-
- "go.opentelemetry.io/otel/internal/global"
-)
-
-// ConfigFn is the generic function used to set a config.
-type ConfigFn func(*EnvOptionsReader)
-
-// EnvOptionsReader reads the required environment variables.
-type EnvOptionsReader struct {
- GetEnv func(string) string
- ReadFile func(string) ([]byte, error)
- Namespace string
-}
-
-// Apply runs every ConfigFn.
-func (e *EnvOptionsReader) Apply(opts ...ConfigFn) {
- for _, o := range opts {
- o(e)
- }
-}
-
-// GetEnvValue gets an OTLP environment variable value of the specified key
-// using the GetEnv function.
-// This function prepends the OTLP specified namespace to all key lookups.
-func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) {
- v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key)))
- return v, v != ""
-}
-
-// WithString retrieves the specified config and passes it to ConfigFn as a string.
-func WithString(n string, fn func(string)) func(e *EnvOptionsReader) {
- return func(e *EnvOptionsReader) {
- if v, ok := e.GetEnvValue(n); ok {
- fn(v)
- }
- }
-}
-
-// WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn.
-func WithBool(n string, fn func(bool)) ConfigFn {
- return func(e *EnvOptionsReader) {
- if v, ok := e.GetEnvValue(n); ok {
- b := strings.ToLower(v) == "true"
- fn(b)
- }
- }
-}
-
-// WithDuration retrieves the specified config and passes it to ConfigFn as a duration.
-func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) {
- return func(e *EnvOptionsReader) {
- if v, ok := e.GetEnvValue(n); ok {
- d, err := strconv.Atoi(v)
- if err != nil {
- global.Error(err, "parse duration", "input", v)
- return
- }
- fn(time.Duration(d) * time.Millisecond)
- }
- }
-}
-
-// WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers.
-func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) {
- return func(e *EnvOptionsReader) {
- if v, ok := e.GetEnvValue(n); ok {
- fn(stringToHeader(v))
- }
- }
-}
-
-// WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL.
-func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) {
- return func(e *EnvOptionsReader) {
- if v, ok := e.GetEnvValue(n); ok {
- u, err := url.Parse(v)
- if err != nil {
- global.Error(err, "parse url", "input", v)
- return
- }
- fn(u)
- }
- }
-}
-
-// WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn.
-func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn {
- return func(e *EnvOptionsReader) {
- if v, ok := e.GetEnvValue(n); ok {
- b, err := e.ReadFile(v)
- if err != nil {
- global.Error(err, "read tls ca cert file", "file", v)
- return
- }
- c, err := createCertPool(b)
- if err != nil {
- global.Error(err, "create tls cert pool")
- return
- }
- fn(c)
- }
- }
-}
-
-// WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn.
-func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn {
- return func(e *EnvOptionsReader) {
- vc, okc := e.GetEnvValue(nc)
- vk, okk := e.GetEnvValue(nk)
- if !okc || !okk {
- return
- }
- cert, err := e.ReadFile(vc)
- if err != nil {
- global.Error(err, "read tls client cert", "file", vc)
- return
- }
- key, err := e.ReadFile(vk)
- if err != nil {
- global.Error(err, "read tls client key", "file", vk)
- return
- }
- crt, err := tls.X509KeyPair(cert, key)
- if err != nil {
- global.Error(err, "create tls client key pair")
- return
- }
- fn(crt)
- }
-}
-
-func keyWithNamespace(ns, key string) string {
- if ns == "" {
- return key
- }
- return fmt.Sprintf("%s_%s", ns, key)
-}
-
-func stringToHeader(value string) map[string]string {
- headersPairs := strings.Split(value, ",")
- headers := make(map[string]string)
-
- for _, header := range headersPairs {
- n, v, found := strings.Cut(header, "=")
- if !found {
- global.Error(errors.New("missing '="), "parse headers", "input", header)
- continue
- }
-
- trimmedName := strings.TrimSpace(n)
-
- // Validate the key.
- if !isValidHeaderKey(trimmedName) {
- global.Error(errors.New("invalid header key"), "parse headers", "key", trimmedName)
- continue
- }
-
- // Only decode the value.
- value, err := url.PathUnescape(v)
- if err != nil {
- global.Error(err, "escape header value", "value", v)
- continue
- }
- trimmedValue := strings.TrimSpace(value)
-
- headers[trimmedName] = trimmedValue
- }
-
- return headers
-}
-
-func createCertPool(certBytes []byte) (*x509.CertPool, error) {
- cp := x509.NewCertPool()
- if ok := cp.AppendCertsFromPEM(certBytes); !ok {
- return nil, errors.New("failed to append certificate to the cert pool")
- }
- return cp, nil
-}
-
-func isValidHeaderKey(key string) bool {
- if key == "" {
- return false
- }
- for _, c := range key {
- if !isTokenChar(c) {
- return false
- }
- }
- return true
-}
-
-func isTokenChar(c rune) bool {
- return c <= unicode.MaxASCII && (unicode.IsLetter(c) ||
- unicode.IsDigit(c) ||
- c == '!' || c == '#' || c == '$' || c == '%' || c == '&' || c == '\'' || c == '*' ||
- c == '+' || c == '-' || c == '.' || c == '^' || c == '_' || c == '`' || c == '|' || c == '~')
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go
deleted file mode 100644
index 97cd6c54f..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal"
-
-//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go
-//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess_test.go.tmpl "--data={}" --out=partialsuccess_test.go
-
-//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry.go.tmpl "--data={}" --out=retry/retry.go
-//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry_test.go.tmpl "--data={}" --out=retry/retry_test.go
-
-//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig.go.tmpl "--data={}" --out=envconfig/envconfig.go
-//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig_test.go.tmpl "--data={}" --out=envconfig/envconfig_test.go
-
-//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig\"}" --out=otlpconfig/envconfig.go
-//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl "--data={\"retryImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry\"}" --out=otlpconfig/options.go
-//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/options_test.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig\"}" --out=otlpconfig/options_test.go
-//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl "--data={}" --out=otlpconfig/optiontypes.go
-//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl "--data={}" --out=otlpconfig/tls.go
-
-//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/client.go.tmpl "--data={}" --out=otlptracetest/client.go
-//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/collector.go.tmpl "--data={}" --out=otlptracetest/collector.go
-//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/data.go.tmpl "--data={}" --out=otlptracetest/data.go
-//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/otlptest.go.tmpl "--data={}" --out=otlptracetest/otlptest.go
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go
deleted file mode 100644
index 7bb189a94..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go
+++ /dev/null
@@ -1,142 +0,0 @@
-// Code created by gotmpl. DO NOT MODIFY.
-// source: internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
-
-import (
- "crypto/tls"
- "crypto/x509"
- "net/url"
- "os"
- "path"
- "strings"
- "time"
-
- "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig"
-)
-
-// DefaultEnvOptionsReader is the default environments reader.
-var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{
- GetEnv: os.Getenv,
- ReadFile: os.ReadFile,
- Namespace: "OTEL_EXPORTER_OTLP",
-}
-
-// ApplyGRPCEnvConfigs applies the env configurations for gRPC.
-func ApplyGRPCEnvConfigs(cfg Config) Config {
- opts := getOptionsFromEnv()
- for _, opt := range opts {
- cfg = opt.ApplyGRPCOption(cfg)
- }
- return cfg
-}
-
-// ApplyHTTPEnvConfigs applies the env configurations for HTTP.
-func ApplyHTTPEnvConfigs(cfg Config) Config {
- opts := getOptionsFromEnv()
- for _, opt := range opts {
- cfg = opt.ApplyHTTPOption(cfg)
- }
- return cfg
-}
-
-func getOptionsFromEnv() []GenericOption {
- opts := []GenericOption{}
-
- tlsConf := &tls.Config{}
- DefaultEnvOptionsReader.Apply(
- envconfig.WithURL("ENDPOINT", func(u *url.URL) {
- opts = append(opts, withEndpointScheme(u))
- opts = append(opts, newSplitOption(func(cfg Config) Config {
- cfg.Traces.Endpoint = u.Host
- // For OTLP/HTTP endpoint URLs without a per-signal
- // configuration, the passed endpoint is used as a base URL
- // and the signals are sent to these paths relative to that.
- cfg.Traces.URLPath = path.Join(u.Path, DefaultTracesPath)
- return cfg
- }, withEndpointForGRPC(u)))
- }),
- envconfig.WithURL("TRACES_ENDPOINT", func(u *url.URL) {
- opts = append(opts, withEndpointScheme(u))
- opts = append(opts, newSplitOption(func(cfg Config) Config {
- cfg.Traces.Endpoint = u.Host
- // For endpoint URLs for OTLP/HTTP per-signal variables, the
- // URL MUST be used as-is without any modification. The only
- // exception is that if an URL contains no path part, the root
- // path / MUST be used.
- path := u.Path
- if path == "" {
- path = "/"
- }
- cfg.Traces.URLPath = path
- return cfg
- }, withEndpointForGRPC(u)))
- }),
- envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
- envconfig.WithCertPool("TRACES_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
- envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
- envconfig.WithClientCert("TRACES_CLIENT_CERTIFICATE", "TRACES_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
- withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
- envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
- envconfig.WithBool("TRACES_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
- envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
- envconfig.WithHeaders("TRACES_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
- WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
- WithEnvCompression("TRACES_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
- envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
- envconfig.WithDuration("TRACES_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
- )
-
- return opts
-}
-
-func withEndpointScheme(u *url.URL) GenericOption {
- switch strings.ToLower(u.Scheme) {
- case "http", "unix":
- return WithInsecure()
- default:
- return WithSecure()
- }
-}
-
-func withEndpointForGRPC(u *url.URL) func(cfg Config) Config {
- return func(cfg Config) Config {
- // For OTLP/gRPC endpoints, this is the target to which the
- // exporter is going to send telemetry.
- cfg.Traces.Endpoint = path.Join(u.Host, u.Path)
- return cfg
- }
-}
-
-// WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression.
-func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) {
- return func(e *envconfig.EnvOptionsReader) {
- if v, ok := e.GetEnvValue(n); ok {
- cp := NoCompression
- if v == "gzip" {
- cp = GzipCompression
- }
-
- fn(cp)
- }
- }
-}
-
-// revive:disable-next-line:flag-parameter
-func withInsecure(b bool) GenericOption {
- if b {
- return WithInsecure()
- }
- return WithSecure()
-}
-
-func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) {
- return func(e *envconfig.EnvOptionsReader) {
- if c.RootCAs != nil || len(c.Certificates) > 0 {
- fn(c)
- }
- }
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go
deleted file mode 100644
index 0a317d926..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go
+++ /dev/null
@@ -1,351 +0,0 @@
-// Code created by gotmpl. DO NOT MODIFY.
-// source: internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
-
-import (
- "crypto/tls"
- "fmt"
- "net/http"
- "net/url"
- "path"
- "strings"
- "time"
-
- "google.golang.org/grpc"
- "google.golang.org/grpc/backoff"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/credentials/insecure"
- "google.golang.org/grpc/encoding/gzip"
-
- "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
- "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry"
- "go.opentelemetry.io/otel/internal/global"
-)
-
-const (
- // DefaultTracesPath is a default URL path for endpoint that
- // receives spans.
- DefaultTracesPath string = "/v1/traces"
- // DefaultTimeout is a default max waiting time for the backend to process
- // each span batch.
- DefaultTimeout time.Duration = 10 * time.Second
-)
-
-type (
- // HTTPTransportProxyFunc is a function that resolves which URL to use as proxy for a given request.
- // This type is compatible with `http.Transport.Proxy` and can be used to set a custom proxy function to the OTLP HTTP client.
- HTTPTransportProxyFunc func(*http.Request) (*url.URL, error)
-
- SignalConfig struct {
- Endpoint string
- Insecure bool
- TLSCfg *tls.Config
- Headers map[string]string
- Compression Compression
- Timeout time.Duration
- URLPath string
-
- // gRPC configurations
- GRPCCredentials credentials.TransportCredentials
-
- Proxy HTTPTransportProxyFunc
- }
-
- Config struct {
- // Signal specific configurations
- Traces SignalConfig
-
- RetryConfig retry.Config
-
- // gRPC configurations
- ReconnectionPeriod time.Duration
- ServiceConfig string
- DialOptions []grpc.DialOption
- GRPCConn *grpc.ClientConn
- }
-)
-
-// NewHTTPConfig returns a new Config with all settings applied from opts and
-// any unset setting using the default HTTP config values.
-func NewHTTPConfig(opts ...HTTPOption) Config {
- cfg := Config{
- Traces: SignalConfig{
- Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort),
- URLPath: DefaultTracesPath,
- Compression: NoCompression,
- Timeout: DefaultTimeout,
- },
- RetryConfig: retry.DefaultConfig,
- }
- cfg = ApplyHTTPEnvConfigs(cfg)
- for _, opt := range opts {
- cfg = opt.ApplyHTTPOption(cfg)
- }
- cfg.Traces.URLPath = cleanPath(cfg.Traces.URLPath, DefaultTracesPath)
- return cfg
-}
-
-// cleanPath returns a path with all spaces trimmed and all redundancies
-// removed. If urlPath is empty or cleaning it results in an empty string,
-// defaultPath is returned instead.
-func cleanPath(urlPath string, defaultPath string) string {
- tmp := path.Clean(strings.TrimSpace(urlPath))
- if tmp == "." {
- return defaultPath
- }
- if !path.IsAbs(tmp) {
- tmp = "/" + tmp
- }
- return tmp
-}
-
-// NewGRPCConfig returns a new Config with all settings applied from opts and
-// any unset setting using the default gRPC config values.
-func NewGRPCConfig(opts ...GRPCOption) Config {
- userAgent := "OTel OTLP Exporter Go/" + otlptrace.Version()
- cfg := Config{
- Traces: SignalConfig{
- Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort),
- URLPath: DefaultTracesPath,
- Compression: NoCompression,
- Timeout: DefaultTimeout,
- },
- RetryConfig: retry.DefaultConfig,
- DialOptions: []grpc.DialOption{grpc.WithUserAgent(userAgent)},
- }
- cfg = ApplyGRPCEnvConfigs(cfg)
- for _, opt := range opts {
- cfg = opt.ApplyGRPCOption(cfg)
- }
-
- if cfg.ServiceConfig != "" {
- cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
- }
- // Prioritize GRPCCredentials over Insecure (passing both is an error).
- if cfg.Traces.GRPCCredentials != nil {
- cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials))
- } else if cfg.Traces.Insecure {
- cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials()))
- } else {
- // Default to using the host's root CA.
- creds := credentials.NewTLS(nil)
- cfg.Traces.GRPCCredentials = creds
- cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds))
- }
- if cfg.Traces.Compression == GzipCompression {
- cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
- }
- if cfg.ReconnectionPeriod != 0 {
- p := grpc.ConnectParams{
- Backoff: backoff.DefaultConfig,
- MinConnectTimeout: cfg.ReconnectionPeriod,
- }
- cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p))
- }
-
- return cfg
-}
-
-type (
- // GenericOption applies an option to the HTTP or gRPC driver.
- GenericOption interface {
- ApplyHTTPOption(Config) Config
- ApplyGRPCOption(Config) Config
-
- // A private method to prevent users implementing the
- // interface and so future additions to it will not
- // violate compatibility.
- private()
- }
-
- // HTTPOption applies an option to the HTTP driver.
- HTTPOption interface {
- ApplyHTTPOption(Config) Config
-
- // A private method to prevent users implementing the
- // interface and so future additions to it will not
- // violate compatibility.
- private()
- }
-
- // GRPCOption applies an option to the gRPC driver.
- GRPCOption interface {
- ApplyGRPCOption(Config) Config
-
- // A private method to prevent users implementing the
- // interface and so future additions to it will not
- // violate compatibility.
- private()
- }
-)
-
-// genericOption is an option that applies the same logic
-// for both gRPC and HTTP.
-type genericOption struct {
- fn func(Config) Config
-}
-
-func (g *genericOption) ApplyGRPCOption(cfg Config) Config {
- return g.fn(cfg)
-}
-
-func (g *genericOption) ApplyHTTPOption(cfg Config) Config {
- return g.fn(cfg)
-}
-
-func (genericOption) private() {}
-
-func newGenericOption(fn func(cfg Config) Config) GenericOption {
- return &genericOption{fn: fn}
-}
-
-// splitOption is an option that applies different logics
-// for gRPC and HTTP.
-type splitOption struct {
- httpFn func(Config) Config
- grpcFn func(Config) Config
-}
-
-func (g *splitOption) ApplyGRPCOption(cfg Config) Config {
- return g.grpcFn(cfg)
-}
-
-func (g *splitOption) ApplyHTTPOption(cfg Config) Config {
- return g.httpFn(cfg)
-}
-
-func (splitOption) private() {}
-
-func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption {
- return &splitOption{httpFn: httpFn, grpcFn: grpcFn}
-}
-
-// httpOption is an option that is only applied to the HTTP driver.
-type httpOption struct {
- fn func(Config) Config
-}
-
-func (h *httpOption) ApplyHTTPOption(cfg Config) Config {
- return h.fn(cfg)
-}
-
-func (httpOption) private() {}
-
-func NewHTTPOption(fn func(cfg Config) Config) HTTPOption {
- return &httpOption{fn: fn}
-}
-
-// grpcOption is an option that is only applied to the gRPC driver.
-type grpcOption struct {
- fn func(Config) Config
-}
-
-func (h *grpcOption) ApplyGRPCOption(cfg Config) Config {
- return h.fn(cfg)
-}
-
-func (grpcOption) private() {}
-
-func NewGRPCOption(fn func(cfg Config) Config) GRPCOption {
- return &grpcOption{fn: fn}
-}
-
-// Generic Options
-
-// WithEndpoint configures the trace host and port only; endpoint should
-// resemble "example.com" or "localhost:4317". To configure the scheme and path,
-// use WithEndpointURL.
-func WithEndpoint(endpoint string) GenericOption {
- return newGenericOption(func(cfg Config) Config {
- cfg.Traces.Endpoint = endpoint
- return cfg
- })
-}
-
-// WithEndpointURL configures the trace scheme, host, port, and path; the
-// provided value should resemble "https://example.com:4318/v1/traces".
-func WithEndpointURL(v string) GenericOption {
- return newGenericOption(func(cfg Config) Config {
- u, err := url.Parse(v)
- if err != nil {
- global.Error(err, "otlptrace: parse endpoint url", "url", v)
- return cfg
- }
-
- cfg.Traces.Endpoint = u.Host
- cfg.Traces.URLPath = u.Path
- cfg.Traces.Insecure = u.Scheme != "https"
-
- return cfg
- })
-}
-
-func WithCompression(compression Compression) GenericOption {
- return newGenericOption(func(cfg Config) Config {
- cfg.Traces.Compression = compression
- return cfg
- })
-}
-
-func WithURLPath(urlPath string) GenericOption {
- return newGenericOption(func(cfg Config) Config {
- cfg.Traces.URLPath = urlPath
- return cfg
- })
-}
-
-func WithRetry(rc retry.Config) GenericOption {
- return newGenericOption(func(cfg Config) Config {
- cfg.RetryConfig = rc
- return cfg
- })
-}
-
-func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption {
- return newSplitOption(func(cfg Config) Config {
- cfg.Traces.TLSCfg = tlsCfg.Clone()
- return cfg
- }, func(cfg Config) Config {
- cfg.Traces.GRPCCredentials = credentials.NewTLS(tlsCfg)
- return cfg
- })
-}
-
-func WithInsecure() GenericOption {
- return newGenericOption(func(cfg Config) Config {
- cfg.Traces.Insecure = true
- return cfg
- })
-}
-
-func WithSecure() GenericOption {
- return newGenericOption(func(cfg Config) Config {
- cfg.Traces.Insecure = false
- return cfg
- })
-}
-
-func WithHeaders(headers map[string]string) GenericOption {
- return newGenericOption(func(cfg Config) Config {
- cfg.Traces.Headers = headers
- return cfg
- })
-}
-
-func WithTimeout(duration time.Duration) GenericOption {
- return newGenericOption(func(cfg Config) Config {
- cfg.Traces.Timeout = duration
- return cfg
- })
-}
-
-func WithProxy(pf HTTPTransportProxyFunc) GenericOption {
- return newGenericOption(func(cfg Config) Config {
- cfg.Traces.Proxy = pf
- return cfg
- })
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go
deleted file mode 100644
index 3d4f699d4..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Code created by gotmpl. DO NOT MODIFY.
-// source: internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
-
-const (
- // DefaultCollectorGRPCPort is the default gRPC port of the collector.
- DefaultCollectorGRPCPort uint16 = 4317
- // DefaultCollectorHTTPPort is the default HTTP port of the collector.
- DefaultCollectorHTTPPort uint16 = 4318
- // DefaultCollectorHost is the host address the Exporter will attempt
- // connect to if no collector address is provided.
- DefaultCollectorHost string = "localhost"
-)
-
-// Compression describes the compression used for payloads sent to the
-// collector.
-type Compression int
-
-const (
- // NoCompression tells the driver to send payloads without
- // compression.
- NoCompression Compression = iota
- // GzipCompression tells the driver to send payloads after
- // compressing them with gzip.
- GzipCompression
-)
-
-// Marshaler describes the kind of message format sent to the collector.
-type Marshaler int
-
-const (
- // MarshalProto tells the driver to send using the protobuf binary format.
- MarshalProto Marshaler = iota
- // MarshalJSON tells the driver to send using json format.
- MarshalJSON
-)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go
deleted file mode 100644
index 38b97a013..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Code created by gotmpl. DO NOT MODIFY.
-// source: internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
-
-import (
- "crypto/tls"
- "crypto/x509"
- "errors"
-)
-
-// CreateTLSConfig creates a tls.Config from a raw certificate bytes
-// to verify a server certificate.
-func CreateTLSConfig(certBytes []byte) (*tls.Config, error) {
- cp := x509.NewCertPool()
- if ok := cp.AppendCertsFromPEM(certBytes); !ok {
- return nil, errors.New("failed to append certificate to the cert pool")
- }
-
- return &tls.Config{
- RootCAs: cp,
- }, nil
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go
deleted file mode 100644
index a12ea4c48..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Code created by gotmpl. DO NOT MODIFY.
-// source: internal/shared/otlp/partialsuccess.go
-
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal"
-
-import "fmt"
-
-// PartialSuccess represents the underlying error for all handling
-// OTLP partial success messages. Use `errors.Is(err,
-// PartialSuccess{})` to test whether an error passed to the OTel
-// error handler belongs to this category.
-type PartialSuccess struct {
- ErrorMessage string
- RejectedItems int64
- RejectedKind string
-}
-
-var _ error = PartialSuccess{}
-
-// Error implements the error interface.
-func (ps PartialSuccess) Error() string {
- msg := ps.ErrorMessage
- if msg == "" {
- msg = "empty message"
- }
- return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind)
-}
-
-// Is supports the errors.Is() interface.
-func (ps PartialSuccess) Is(err error) bool {
- _, ok := err.(PartialSuccess)
- return ok
-}
-
-// TracePartialSuccessError returns an error describing a partial success
-// response for the trace signal.
-func TracePartialSuccessError(itemsRejected int64, errorMessage string) error {
- return PartialSuccess{
- ErrorMessage: errorMessage,
- RejectedItems: itemsRejected,
- RejectedKind: "spans",
- }
-}
-
-// MetricPartialSuccessError returns an error describing a partial success
-// response for the metric signal.
-func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error {
- return PartialSuccess{
- ErrorMessage: errorMessage,
- RejectedItems: itemsRejected,
- RejectedKind: "metric data points",
- }
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go
deleted file mode 100644
index 1c5450ab6..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go
+++ /dev/null
@@ -1,145 +0,0 @@
-// Code created by gotmpl. DO NOT MODIFY.
-// source: internal/shared/otlp/retry/retry.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Package retry provides request retry functionality that can perform
-// configurable exponential backoff for transient errors and honor any
-// explicit throttle responses received.
-package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry"
-
-import (
- "context"
- "fmt"
- "time"
-
- "github.com/cenkalti/backoff/v4"
-)
-
-// DefaultConfig are the recommended defaults to use.
-var DefaultConfig = Config{
- Enabled: true,
- InitialInterval: 5 * time.Second,
- MaxInterval: 30 * time.Second,
- MaxElapsedTime: time.Minute,
-}
-
-// Config defines configuration for retrying batches in case of export failure
-// using an exponential backoff.
-type Config struct {
- // Enabled indicates whether to not retry sending batches in case of
- // export failure.
- Enabled bool
- // InitialInterval the time to wait after the first failure before
- // retrying.
- InitialInterval time.Duration
- // MaxInterval is the upper bound on backoff interval. Once this value is
- // reached the delay between consecutive retries will always be
- // `MaxInterval`.
- MaxInterval time.Duration
- // MaxElapsedTime is the maximum amount of time (including retries) spent
- // trying to send a request/batch. Once this value is reached, the data
- // is discarded.
- MaxElapsedTime time.Duration
-}
-
-// RequestFunc wraps a request with retry logic.
-type RequestFunc func(context.Context, func(context.Context) error) error
-
-// EvaluateFunc returns if an error is retry-able and if an explicit throttle
-// duration should be honored that was included in the error.
-//
-// The function must return true if the error argument is retry-able,
-// otherwise it must return false for the first return parameter.
-//
-// The function must return a non-zero time.Duration if the error contains
-// explicit throttle duration that should be honored, otherwise it must return
-// a zero valued time.Duration.
-type EvaluateFunc func(error) (bool, time.Duration)
-
-// RequestFunc returns a RequestFunc using the evaluate function to determine
-// if requests can be retried and based on the exponential backoff
-// configuration of c.
-func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
- if !c.Enabled {
- return func(ctx context.Context, fn func(context.Context) error) error {
- return fn(ctx)
- }
- }
-
- return func(ctx context.Context, fn func(context.Context) error) error {
- // Do not use NewExponentialBackOff since it calls Reset and the code here
- // must call Reset after changing the InitialInterval (this saves an
- // unnecessary call to Now).
- b := &backoff.ExponentialBackOff{
- InitialInterval: c.InitialInterval,
- RandomizationFactor: backoff.DefaultRandomizationFactor,
- Multiplier: backoff.DefaultMultiplier,
- MaxInterval: c.MaxInterval,
- MaxElapsedTime: c.MaxElapsedTime,
- Stop: backoff.Stop,
- Clock: backoff.SystemClock,
- }
- b.Reset()
-
- for {
- err := fn(ctx)
- if err == nil {
- return nil
- }
-
- retryable, throttle := evaluate(err)
- if !retryable {
- return err
- }
-
- bOff := b.NextBackOff()
- if bOff == backoff.Stop {
- return fmt.Errorf("max retry time elapsed: %w", err)
- }
-
- // Wait for the greater of the backoff or throttle delay.
- var delay time.Duration
- if bOff > throttle {
- delay = bOff
- } else {
- elapsed := b.GetElapsedTime()
- if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
- return fmt.Errorf("max retry time would elapse: %w", err)
- }
- delay = throttle
- }
-
- if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
- return fmt.Errorf("%w: %w", ctxErr, err)
- }
- }
- }
-}
-
-// Allow override for testing.
-var waitFunc = wait
-
-// wait takes the caller's context, and the amount of time to wait. It will
-// return nil if the timer fires before or at the same time as the context's
-// deadline. This indicates that the call can be retried.
-func wait(ctx context.Context, delay time.Duration) error {
- timer := time.NewTimer(delay)
- defer timer.Stop()
-
- select {
- case <-ctx.Done():
- // Handle the case where the timer and context deadline end
- // simultaneously by prioritizing the timer expiration nil value
- // response.
- select {
- case <-timer.C:
- default:
- return ctx.Err()
- }
- case <-timer.C:
- }
-
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go
deleted file mode 100644
index 00ab1f20c..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
-
-import (
- "fmt"
- "time"
-
- "google.golang.org/grpc"
- "google.golang.org/grpc/credentials"
-
- "go.opentelemetry.io/otel"
- "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
- "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry"
-)
-
-// Option applies an option to the gRPC driver.
-type Option interface {
- applyGRPCOption(otlpconfig.Config) otlpconfig.Config
-}
-
-func asGRPCOptions(opts []Option) []otlpconfig.GRPCOption {
- converted := make([]otlpconfig.GRPCOption, len(opts))
- for i, o := range opts {
- converted[i] = otlpconfig.NewGRPCOption(o.applyGRPCOption)
- }
- return converted
-}
-
-// RetryConfig defines configuration for retrying export of span batches that
-// failed to be received by the target endpoint.
-//
-// This configuration does not define any network retry strategy. That is
-// entirely handled by the gRPC ClientConn.
-type RetryConfig retry.Config
-
-type wrappedOption struct {
- otlpconfig.GRPCOption
-}
-
-func (w wrappedOption) applyGRPCOption(cfg otlpconfig.Config) otlpconfig.Config {
- return w.ApplyGRPCOption(cfg)
-}
-
-// WithInsecure disables client transport security for the exporter's gRPC
-// connection just like grpc.WithInsecure()
-// (https://pkg.go.dev/google.golang.org/grpc#WithInsecure) does. Note, by
-// default, client security is required unless WithInsecure is used.
-//
-// This option has no effect if WithGRPCConn is used.
-func WithInsecure() Option {
- return wrappedOption{otlpconfig.WithInsecure()}
-}
-
-// WithEndpoint sets the target endpoint (host and port) the Exporter will
-// connect to. The provided endpoint should resemble "example.com:4317" (no
-// scheme or path).
-//
-// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
-// environment variable is set, and this option is not passed, that variable
-// value will be used. If both environment variables are set,
-// OTEL_EXPORTER_OTLP_TRACES_ENDPOINT will take precedence. If an environment
-// variable is set, and this option is passed, this option will take precedence.
-//
-// If both this option and WithEndpointURL are used, the last used option will
-// take precedence.
-//
-// By default, if an environment variable is not set, and this option is not
-// passed, "localhost:4317" will be used.
-//
-// This option has no effect if WithGRPCConn is used.
-func WithEndpoint(endpoint string) Option {
- return wrappedOption{otlpconfig.WithEndpoint(endpoint)}
-}
-
-// WithEndpointURL sets the target endpoint URL (scheme, host, port, path)
-// the Exporter will connect to. The provided endpoint URL should resemble
-// "https://example.com:4318/v1/traces".
-//
-// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
-// environment variable is set, and this option is not passed, that variable
-// value will be used. If both environment variables are set,
-// OTEL_EXPORTER_OTLP_TRACES_ENDPOINT will take precedence. If an environment
-// variable is set, and this option is passed, this option will take precedence.
-//
-// If both this option and WithEndpoint are used, the last used option will
-// take precedence.
-//
-// If an invalid URL is provided, the default value will be kept.
-//
-// By default, if an environment variable is not set, and this option is not
-// passed, "https://localhost:4317/v1/traces" will be used.
-//
-// This option has no effect if WithGRPCConn is used.
-func WithEndpointURL(u string) Option {
- return wrappedOption{otlpconfig.WithEndpointURL(u)}
-}
-
-// WithReconnectionPeriod set the minimum amount of time between connection
-// attempts to the target endpoint.
-//
-// This option has no effect if WithGRPCConn is used.
-func WithReconnectionPeriod(rp time.Duration) Option {
- return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
- cfg.ReconnectionPeriod = rp
- return cfg
- })}
-}
-
-func compressorToCompression(compressor string) otlpconfig.Compression {
- if compressor == "gzip" {
- return otlpconfig.GzipCompression
- }
-
- otel.Handle(fmt.Errorf("invalid compression type: '%s', using no compression as default", compressor))
- return otlpconfig.NoCompression
-}
-
-// WithCompressor sets the compressor for the gRPC client to use when sending
-// requests. Supported compressor values: "gzip".
-func WithCompressor(compressor string) Option {
- return wrappedOption{otlpconfig.WithCompression(compressorToCompression(compressor))}
-}
-
-// WithHeaders will send the provided headers with each gRPC requests.
-func WithHeaders(headers map[string]string) Option {
- return wrappedOption{otlpconfig.WithHeaders(headers)}
-}
-
-// WithTLSCredentials allows the connection to use TLS credentials when
-// talking to the server. It takes in grpc.TransportCredentials instead of say
-// a Certificate file or a tls.Certificate, because the retrieving of these
-// credentials can be done in many ways e.g. plain file, in code tls.Config or
-// by certificate rotation, so it is up to the caller to decide what to use.
-//
-// This option has no effect if WithGRPCConn is used.
-func WithTLSCredentials(creds credentials.TransportCredentials) Option {
- return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
- cfg.Traces.GRPCCredentials = creds
- return cfg
- })}
-}
-
-// WithServiceConfig defines the default gRPC service config used.
-//
-// This option has no effect if WithGRPCConn is used.
-func WithServiceConfig(serviceConfig string) Option {
- return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
- cfg.ServiceConfig = serviceConfig
- return cfg
- })}
-}
-
-// WithDialOption sets explicit grpc.DialOptions to use when making a
-// connection. The options here are appended to the internal grpc.DialOptions
-// used so they will take precedence over any other internal grpc.DialOptions
-// they might conflict with.
-// The [grpc.WithBlock], [grpc.WithTimeout], and [grpc.WithReturnConnectionError]
-// grpc.DialOptions are ignored.
-//
-// This option has no effect if WithGRPCConn is used.
-func WithDialOption(opts ...grpc.DialOption) Option {
- return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
- cfg.DialOptions = opts
- return cfg
- })}
-}
-
-// WithGRPCConn sets conn as the gRPC ClientConn used for all communication.
-//
-// This option takes precedence over any other option that relates to
-// establishing or persisting a gRPC connection to a target endpoint. Any
-// other option of those types passed will be ignored.
-//
-// It is the callers responsibility to close the passed conn. The client
-// Shutdown method will not close this connection.
-func WithGRPCConn(conn *grpc.ClientConn) Option {
- return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
- cfg.GRPCConn = conn
- return cfg
- })}
-}
-
-// WithTimeout sets the max amount of time a client will attempt to export a
-// batch of spans. This takes precedence over any retry settings defined with
-// WithRetry, once this time limit has been reached the export is abandoned
-// and the batch of spans is dropped.
-//
-// If unset, the default timeout will be set to 10 seconds.
-func WithTimeout(duration time.Duration) Option {
- return wrappedOption{otlpconfig.WithTimeout(duration)}
-}
-
-// WithRetry sets the retry policy for transient retryable errors that may be
-// returned by the target endpoint when exporting a batch of spans.
-//
-// If the target endpoint responds with not only a retryable error, but
-// explicitly returns a backoff time in the response. That time will take
-// precedence over these settings.
-//
-// These settings do not define any network retry strategy. That is entirely
-// handled by the gRPC ClientConn.
-//
-// If unset, the default retry policy will be used. It will retry the export
-// 5 seconds after receiving a retryable error and increase exponentially
-// after each error for no more than a total time of 1 minute.
-func WithRetry(settings RetryConfig) Option {
- return wrappedOption{otlpconfig.WithRetry(retry.Config(settings))}
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/LICENSE
deleted file mode 100644
index 261eeb9e9..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/README.md
deleted file mode 100644
index 365264009..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# OTLP Trace HTTP Exporter
-
-[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go
deleted file mode 100644
index 16c006b2c..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go
+++ /dev/null
@@ -1,392 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package otlptracehttp // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
-
-import (
- "bytes"
- "compress/gzip"
- "context"
- "errors"
- "fmt"
- "io"
- "net"
- "net/http"
- "net/url"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "google.golang.org/protobuf/proto"
-
- "go.opentelemetry.io/otel"
- "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
- "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal"
- "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig"
- "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry"
- coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1"
- tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
-)
-
-const contentTypeProto = "application/x-protobuf"
-
-var gzPool = sync.Pool{
- New: func() interface{} {
- w := gzip.NewWriter(io.Discard)
- return w
- },
-}
-
-// Keep it in sync with golang's DefaultTransport from net/http! We
-// have our own copy to avoid handling a situation where the
-// DefaultTransport is overwritten with some different implementation
-// of http.RoundTripper or it's modified by other package.
-var ourTransport = &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- DialContext: (&net.Dialer{
- Timeout: 30 * time.Second,
- KeepAlive: 30 * time.Second,
- }).DialContext,
- ForceAttemptHTTP2: true,
- MaxIdleConns: 100,
- IdleConnTimeout: 90 * time.Second,
- TLSHandshakeTimeout: 10 * time.Second,
- ExpectContinueTimeout: 1 * time.Second,
-}
-
-type client struct {
- name string
- cfg otlpconfig.SignalConfig
- generalCfg otlpconfig.Config
- requestFunc retry.RequestFunc
- client *http.Client
- stopCh chan struct{}
- stopOnce sync.Once
-}
-
-var _ otlptrace.Client = (*client)(nil)
-
-// NewClient creates a new HTTP trace client.
-func NewClient(opts ...Option) otlptrace.Client {
- cfg := otlpconfig.NewHTTPConfig(asHTTPOptions(opts)...)
-
- httpClient := &http.Client{
- Transport: ourTransport,
- Timeout: cfg.Traces.Timeout,
- }
-
- if cfg.Traces.TLSCfg != nil || cfg.Traces.Proxy != nil {
- clonedTransport := ourTransport.Clone()
- httpClient.Transport = clonedTransport
-
- if cfg.Traces.TLSCfg != nil {
- clonedTransport.TLSClientConfig = cfg.Traces.TLSCfg
- }
- if cfg.Traces.Proxy != nil {
- clonedTransport.Proxy = cfg.Traces.Proxy
- }
- }
-
- stopCh := make(chan struct{})
- return &client{
- name: "traces",
- cfg: cfg.Traces,
- generalCfg: cfg,
- requestFunc: cfg.RetryConfig.RequestFunc(evaluate),
- stopCh: stopCh,
- client: httpClient,
- }
-}
-
-// Start does nothing in a HTTP client.
-func (d *client) Start(ctx context.Context) error {
- // nothing to do
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
- return nil
-}
-
-// Stop shuts down the client and interrupt any in-flight request.
-func (d *client) Stop(ctx context.Context) error {
- d.stopOnce.Do(func() {
- close(d.stopCh)
- })
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
- return nil
-}
-
-// UploadTraces sends a batch of spans to the collector.
-func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error {
- pbRequest := &coltracepb.ExportTraceServiceRequest{
- ResourceSpans: protoSpans,
- }
- rawRequest, err := proto.Marshal(pbRequest)
- if err != nil {
- return err
- }
-
- ctx, cancel := d.contextWithStop(ctx)
- defer cancel()
-
- request, err := d.newRequest(rawRequest)
- if err != nil {
- return err
- }
-
- return d.requestFunc(ctx, func(ctx context.Context) error {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- request.reset(ctx)
- resp, err := d.client.Do(request.Request)
- var urlErr *url.Error
- if errors.As(err, &urlErr) && urlErr.Temporary() {
- return newResponseError(http.Header{}, err)
- }
- if err != nil {
- return err
- }
-
- if resp != nil && resp.Body != nil {
- defer func() {
- if err := resp.Body.Close(); err != nil {
- otel.Handle(err)
- }
- }()
- }
-
- if sc := resp.StatusCode; sc >= 200 && sc <= 299 {
- // Success, do not retry.
- // Read the partial success message, if any.
- var respData bytes.Buffer
- if _, err := io.Copy(&respData, resp.Body); err != nil {
- return err
- }
- if respData.Len() == 0 {
- return nil
- }
-
- if resp.Header.Get("Content-Type") == "application/x-protobuf" {
- var respProto coltracepb.ExportTraceServiceResponse
- if err := proto.Unmarshal(respData.Bytes(), &respProto); err != nil {
- return err
- }
-
- if respProto.PartialSuccess != nil {
- msg := respProto.PartialSuccess.GetErrorMessage()
- n := respProto.PartialSuccess.GetRejectedSpans()
- if n != 0 || msg != "" {
- err := internal.TracePartialSuccessError(n, msg)
- otel.Handle(err)
- }
- }
- }
- return nil
- }
- // Error cases.
-
- // server may return a message with the response
- // body, so we read it to include in the error
- // message to be returned. It will help in
- // debugging the actual issue.
- var respData bytes.Buffer
- if _, err := io.Copy(&respData, resp.Body); err != nil {
- return err
- }
- respStr := strings.TrimSpace(respData.String())
- if len(respStr) == 0 {
- respStr = "(empty)"
- }
- bodyErr := fmt.Errorf("body: %s", respStr)
-
- switch resp.StatusCode {
- case http.StatusTooManyRequests,
- http.StatusBadGateway,
- http.StatusServiceUnavailable,
- http.StatusGatewayTimeout:
- // Retryable failure.
- return newResponseError(resp.Header, bodyErr)
- default:
- // Non-retryable failure.
- return fmt.Errorf("failed to send to %s: %s (%w)", request.URL, resp.Status, bodyErr)
- }
- })
-}
-
-func (d *client) newRequest(body []byte) (request, error) {
- u := url.URL{Scheme: d.getScheme(), Host: d.cfg.Endpoint, Path: d.cfg.URLPath}
- r, err := http.NewRequest(http.MethodPost, u.String(), nil)
- if err != nil {
- return request{Request: r}, err
- }
-
- userAgent := "OTel OTLP Exporter Go/" + otlptrace.Version()
- r.Header.Set("User-Agent", userAgent)
-
- for k, v := range d.cfg.Headers {
- r.Header.Set(k, v)
- }
- r.Header.Set("Content-Type", contentTypeProto)
-
- req := request{Request: r}
- switch Compression(d.cfg.Compression) {
- case NoCompression:
- r.ContentLength = (int64)(len(body))
- req.bodyReader = bodyReader(body)
- case GzipCompression:
- // Ensure the content length is not used.
- r.ContentLength = -1
- r.Header.Set("Content-Encoding", "gzip")
-
- gz := gzPool.Get().(*gzip.Writer)
- defer gzPool.Put(gz)
-
- var b bytes.Buffer
- gz.Reset(&b)
-
- if _, err := gz.Write(body); err != nil {
- return req, err
- }
- // Close needs to be called to ensure body is fully written.
- if err := gz.Close(); err != nil {
- return req, err
- }
-
- req.bodyReader = bodyReader(b.Bytes())
- }
-
- return req, nil
-}
-
-// MarshalLog is the marshaling function used by the logging system to represent this Client.
-func (d *client) MarshalLog() interface{} {
- return struct {
- Type string
- Endpoint string
- Insecure bool
- }{
- Type: "otlptracehttp",
- Endpoint: d.cfg.Endpoint,
- Insecure: d.cfg.Insecure,
- }
-}
-
-// bodyReader returns a closure returning a new reader for buf.
-func bodyReader(buf []byte) func() io.ReadCloser {
- return func() io.ReadCloser {
- return io.NopCloser(bytes.NewReader(buf))
- }
-}
-
-// request wraps an http.Request with a resettable body reader.
-type request struct {
- *http.Request
-
- // bodyReader allows the same body to be used for multiple requests.
- bodyReader func() io.ReadCloser
-}
-
-// reset reinitializes the request Body and uses ctx for the request.
-func (r *request) reset(ctx context.Context) {
- r.Body = r.bodyReader()
- r.Request = r.Request.WithContext(ctx)
-}
-
-// retryableError represents a request failure that can be retried.
-type retryableError struct {
- throttle int64
- err error
-}
-
-// newResponseError returns a retryableError and will extract any explicit
-// throttle delay contained in headers. The returned error wraps wrapped
-// if it is not nil.
-func newResponseError(header http.Header, wrapped error) error {
- var rErr retryableError
- if s, ok := header["Retry-After"]; ok {
- if t, err := strconv.ParseInt(s[0], 10, 64); err == nil {
- rErr.throttle = t
- }
- }
-
- rErr.err = wrapped
- return rErr
-}
-
-func (e retryableError) Error() string {
- if e.err != nil {
- return "retry-able request failure: " + e.err.Error()
- }
-
- return "retry-able request failure"
-}
-
-func (e retryableError) Unwrap() error {
- return e.err
-}
-
-func (e retryableError) As(target interface{}) bool {
- if e.err == nil {
- return false
- }
-
- switch v := target.(type) {
- case **retryableError:
- *v = &e
- return true
- default:
- return false
- }
-}
-
-// evaluate returns if err is retry-able. If it is and it includes an explicit
-// throttling delay, that delay is also returned.
-func evaluate(err error) (bool, time.Duration) {
- if err == nil {
- return false, 0
- }
-
- // Do not use errors.As here, this should only be flattened one layer. If
- // there are several chained errors, all the errors above it will be
- // discarded if errors.As is used instead.
- rErr, ok := err.(retryableError) //nolint:errorlint
- if !ok {
- return false, 0
- }
-
- return true, time.Duration(rErr.throttle)
-}
-
-func (d *client) getScheme() string {
- if d.cfg.Insecure {
- return "http"
- }
- return "https"
-}
-
-func (d *client) contextWithStop(ctx context.Context) (context.Context, context.CancelFunc) {
- // Unify the parent context Done signal with the client's stop
- // channel.
- ctx, cancel := context.WithCancel(ctx)
- go func(ctx context.Context, cancel context.CancelFunc) {
- select {
- case <-ctx.Done():
- // Nothing to do, either cancelled or deadline
- // happened.
- case <-d.stopCh:
- cancel()
- }
- }(ctx, cancel)
- return ctx, cancel
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go
deleted file mode 100644
index 9fea75ad1..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-/*
-Package otlptracehttp provides an OTLP span exporter using HTTP with protobuf payloads.
-By default the telemetry is sent to https://localhost:4318/v1/traces.
-
-Exporter should be created using [New].
-
-The environment variables described below can be used for configuration.
-
-OTEL_EXPORTER_OTLP_ENDPOINT (default: "https://localhost:4318") -
-target base URL ("/v1/traces" is appended) to which the exporter sends telemetry.
-The value must contain a scheme ("http" or "https") and host.
-The value may additionally contain a port and a path.
-The value should not contain a query string or fragment.
-The configuration can be overridden by OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
-environment variable and by [WithEndpoint], [WithEndpointURL], [WithInsecure] options.
-
-OTEL_EXPORTER_OTLP_TRACES_ENDPOINT (default: "https://localhost:4318/v1/traces") -
-target URL to which the exporter sends telemetry.
-The value must contain a scheme ("http" or "https") and host.
-The value may additionally contain a port and a path.
-The value should not contain a query string or fragment.
-The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithURLPath] options.
-
-OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_TRACES_HEADERS (default: none) -
-key-value pairs used as headers associated with HTTP requests.
-The value is expected to be represented in a format matching the [W3C Baggage HTTP Header Content Format],
-except that additional semi-colon delimited metadata is not supported.
-Example value: "key1=value1,key2=value2".
-OTEL_EXPORTER_OTLP_TRACES_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS.
-The configuration can be overridden by [WithHeaders] option.
-
-OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_TRACES_TIMEOUT (default: "10000") -
-maximum time in milliseconds the OTLP exporter waits for each batch export.
-OTEL_EXPORTER_OTLP_TRACES_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT.
-The configuration can be overridden by [WithTimeout] option.
-
-OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_TRACES_COMPRESSION (default: none) -
-the compression strategy the exporter uses to compress the HTTP body.
-Supported value: "gzip".
-OTEL_EXPORTER_OTLP_TRACES_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION.
-The configuration can be overridden by [WithCompression] option.
-
-OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE (default: none) -
-the filepath to the trusted certificate to use when verifying a server's TLS credentials.
-OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE.
-The configuration can be overridden by [WithTLSClientConfig] option.
-
-OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE (default: none) -
-the filepath to the client certificate/chain trust for client's private key to use in mTLS communication in PEM format.
-OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE.
-The configuration can be overridden by [WithTLSClientConfig] option.
-
-OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY (default: none) -
-the filepath to the client's private key to use in mTLS communication in PEM format.
-OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY.
-The configuration can be overridden by [WithTLSClientConfig] option.
-
-[W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content
-*/
-package otlptracehttp // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/exporter.go
deleted file mode 100644
index fae89ea4f..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/exporter.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package otlptracehttp // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
-)
-
-// New constructs a new Exporter and starts it.
-func New(ctx context.Context, opts ...Option) (*otlptrace.Exporter, error) {
- return otlptrace.New(ctx, NewClient(opts...))
-}
-
-// NewUnstarted constructs a new Exporter and does not start it.
-func NewUnstarted(opts ...Option) *otlptrace.Exporter {
- return otlptrace.NewUnstarted(NewClient(opts...))
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go
deleted file mode 100644
index f30bb66ae..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go
+++ /dev/null
@@ -1,215 +0,0 @@
-// Code created by gotmpl. DO NOT MODIFY.
-// source: internal/shared/otlp/envconfig/envconfig.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig"
-
-import (
- "crypto/tls"
- "crypto/x509"
- "errors"
- "fmt"
- "net/url"
- "strconv"
- "strings"
- "time"
- "unicode"
-
- "go.opentelemetry.io/otel/internal/global"
-)
-
-// ConfigFn is the generic function used to set a config.
-type ConfigFn func(*EnvOptionsReader)
-
-// EnvOptionsReader reads the required environment variables.
-type EnvOptionsReader struct {
- GetEnv func(string) string
- ReadFile func(string) ([]byte, error)
- Namespace string
-}
-
-// Apply runs every ConfigFn.
-func (e *EnvOptionsReader) Apply(opts ...ConfigFn) {
- for _, o := range opts {
- o(e)
- }
-}
-
-// GetEnvValue gets an OTLP environment variable value of the specified key
-// using the GetEnv function.
-// This function prepends the OTLP specified namespace to all key lookups.
-func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) {
- v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key)))
- return v, v != ""
-}
-
-// WithString retrieves the specified config and passes it to ConfigFn as a string.
-func WithString(n string, fn func(string)) func(e *EnvOptionsReader) {
- return func(e *EnvOptionsReader) {
- if v, ok := e.GetEnvValue(n); ok {
- fn(v)
- }
- }
-}
-
-// WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn.
-func WithBool(n string, fn func(bool)) ConfigFn {
- return func(e *EnvOptionsReader) {
- if v, ok := e.GetEnvValue(n); ok {
- b := strings.ToLower(v) == "true"
- fn(b)
- }
- }
-}
-
-// WithDuration retrieves the specified config and passes it to ConfigFn as a duration.
-func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) {
- return func(e *EnvOptionsReader) {
- if v, ok := e.GetEnvValue(n); ok {
- d, err := strconv.Atoi(v)
- if err != nil {
- global.Error(err, "parse duration", "input", v)
- return
- }
- fn(time.Duration(d) * time.Millisecond)
- }
- }
-}
-
-// WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers.
-func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) {
- return func(e *EnvOptionsReader) {
- if v, ok := e.GetEnvValue(n); ok {
- fn(stringToHeader(v))
- }
- }
-}
-
-// WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL.
-func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) {
- return func(e *EnvOptionsReader) {
- if v, ok := e.GetEnvValue(n); ok {
- u, err := url.Parse(v)
- if err != nil {
- global.Error(err, "parse url", "input", v)
- return
- }
- fn(u)
- }
- }
-}
-
-// WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn.
-func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn {
- return func(e *EnvOptionsReader) {
- if v, ok := e.GetEnvValue(n); ok {
- b, err := e.ReadFile(v)
- if err != nil {
- global.Error(err, "read tls ca cert file", "file", v)
- return
- }
- c, err := createCertPool(b)
- if err != nil {
- global.Error(err, "create tls cert pool")
- return
- }
- fn(c)
- }
- }
-}
-
-// WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn.
-func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn {
- return func(e *EnvOptionsReader) {
- vc, okc := e.GetEnvValue(nc)
- vk, okk := e.GetEnvValue(nk)
- if !okc || !okk {
- return
- }
- cert, err := e.ReadFile(vc)
- if err != nil {
- global.Error(err, "read tls client cert", "file", vc)
- return
- }
- key, err := e.ReadFile(vk)
- if err != nil {
- global.Error(err, "read tls client key", "file", vk)
- return
- }
- crt, err := tls.X509KeyPair(cert, key)
- if err != nil {
- global.Error(err, "create tls client key pair")
- return
- }
- fn(crt)
- }
-}
-
-func keyWithNamespace(ns, key string) string {
- if ns == "" {
- return key
- }
- return fmt.Sprintf("%s_%s", ns, key)
-}
-
-func stringToHeader(value string) map[string]string {
- headersPairs := strings.Split(value, ",")
- headers := make(map[string]string)
-
- for _, header := range headersPairs {
- n, v, found := strings.Cut(header, "=")
- if !found {
- global.Error(errors.New("missing '="), "parse headers", "input", header)
- continue
- }
-
- trimmedName := strings.TrimSpace(n)
-
- // Validate the key.
- if !isValidHeaderKey(trimmedName) {
- global.Error(errors.New("invalid header key"), "parse headers", "key", trimmedName)
- continue
- }
-
- // Only decode the value.
- value, err := url.PathUnescape(v)
- if err != nil {
- global.Error(err, "escape header value", "value", v)
- continue
- }
- trimmedValue := strings.TrimSpace(value)
-
- headers[trimmedName] = trimmedValue
- }
-
- return headers
-}
-
-func createCertPool(certBytes []byte) (*x509.CertPool, error) {
- cp := x509.NewCertPool()
- if ok := cp.AppendCertsFromPEM(certBytes); !ok {
- return nil, errors.New("failed to append certificate to the cert pool")
- }
- return cp, nil
-}
-
-func isValidHeaderKey(key string) bool {
- if key == "" {
- return false
- }
- for _, c := range key {
- if !isTokenChar(c) {
- return false
- }
- }
- return true
-}
-
-func isTokenChar(c rune) bool {
- return c <= unicode.MaxASCII && (unicode.IsLetter(c) ||
- unicode.IsDigit(c) ||
- c == '!' || c == '#' || c == '$' || c == '%' || c == '&' || c == '\'' || c == '*' ||
- c == '+' || c == '-' || c == '.' || c == '^' || c == '_' || c == '`' || c == '|' || c == '~')
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/gen.go
deleted file mode 100644
index e4142b9d7..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/gen.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal"
-
-//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go
-//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess_test.go.tmpl "--data={}" --out=partialsuccess_test.go
-
-//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry.go.tmpl "--data={}" --out=retry/retry.go
-//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry_test.go.tmpl "--data={}" --out=retry/retry_test.go
-
-//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig.go.tmpl "--data={}" --out=envconfig/envconfig.go
-//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig_test.go.tmpl "--data={}" --out=envconfig/envconfig_test.go
-
-//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig\"}" --out=otlpconfig/envconfig.go
-//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl "--data={\"retryImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry\"}" --out=otlpconfig/options.go
-//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/options_test.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig\"}" --out=otlpconfig/options_test.go
-//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl "--data={}" --out=otlpconfig/optiontypes.go
-//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl "--data={}" --out=otlpconfig/tls.go
-
-//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/client.go.tmpl "--data={}" --out=otlptracetest/client.go
-//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/collector.go.tmpl "--data={}" --out=otlptracetest/collector.go
-//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/data.go.tmpl "--data={}" --out=otlptracetest/data.go
-//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/otlptest.go.tmpl "--data={}" --out=otlptracetest/otlptest.go
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/envconfig.go
deleted file mode 100644
index ff4141b6d..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/envconfig.go
+++ /dev/null
@@ -1,142 +0,0 @@
-// Code created by gotmpl. DO NOT MODIFY.
-// source: internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig"
-
-import (
- "crypto/tls"
- "crypto/x509"
- "net/url"
- "os"
- "path"
- "strings"
- "time"
-
- "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig"
-)
-
-// DefaultEnvOptionsReader is the default environments reader.
-var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{
- GetEnv: os.Getenv,
- ReadFile: os.ReadFile,
- Namespace: "OTEL_EXPORTER_OTLP",
-}
-
-// ApplyGRPCEnvConfigs applies the env configurations for gRPC.
-func ApplyGRPCEnvConfigs(cfg Config) Config {
- opts := getOptionsFromEnv()
- for _, opt := range opts {
- cfg = opt.ApplyGRPCOption(cfg)
- }
- return cfg
-}
-
-// ApplyHTTPEnvConfigs applies the env configurations for HTTP.
-func ApplyHTTPEnvConfigs(cfg Config) Config {
- opts := getOptionsFromEnv()
- for _, opt := range opts {
- cfg = opt.ApplyHTTPOption(cfg)
- }
- return cfg
-}
-
-func getOptionsFromEnv() []GenericOption {
- opts := []GenericOption{}
-
- tlsConf := &tls.Config{}
- DefaultEnvOptionsReader.Apply(
- envconfig.WithURL("ENDPOINT", func(u *url.URL) {
- opts = append(opts, withEndpointScheme(u))
- opts = append(opts, newSplitOption(func(cfg Config) Config {
- cfg.Traces.Endpoint = u.Host
- // For OTLP/HTTP endpoint URLs without a per-signal
- // configuration, the passed endpoint is used as a base URL
- // and the signals are sent to these paths relative to that.
- cfg.Traces.URLPath = path.Join(u.Path, DefaultTracesPath)
- return cfg
- }, withEndpointForGRPC(u)))
- }),
- envconfig.WithURL("TRACES_ENDPOINT", func(u *url.URL) {
- opts = append(opts, withEndpointScheme(u))
- opts = append(opts, newSplitOption(func(cfg Config) Config {
- cfg.Traces.Endpoint = u.Host
- // For endpoint URLs for OTLP/HTTP per-signal variables, the
- // URL MUST be used as-is without any modification. The only
- // exception is that if an URL contains no path part, the root
- // path / MUST be used.
- path := u.Path
- if path == "" {
- path = "/"
- }
- cfg.Traces.URLPath = path
- return cfg
- }, withEndpointForGRPC(u)))
- }),
- envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
- envconfig.WithCertPool("TRACES_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
- envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
- envconfig.WithClientCert("TRACES_CLIENT_CERTIFICATE", "TRACES_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
- withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
- envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
- envconfig.WithBool("TRACES_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
- envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
- envconfig.WithHeaders("TRACES_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
- WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
- WithEnvCompression("TRACES_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
- envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
- envconfig.WithDuration("TRACES_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
- )
-
- return opts
-}
-
-func withEndpointScheme(u *url.URL) GenericOption {
- switch strings.ToLower(u.Scheme) {
- case "http", "unix":
- return WithInsecure()
- default:
- return WithSecure()
- }
-}
-
-func withEndpointForGRPC(u *url.URL) func(cfg Config) Config {
- return func(cfg Config) Config {
- // For OTLP/gRPC endpoints, this is the target to which the
- // exporter is going to send telemetry.
- cfg.Traces.Endpoint = path.Join(u.Host, u.Path)
- return cfg
- }
-}
-
-// WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression.
-func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) {
- return func(e *envconfig.EnvOptionsReader) {
- if v, ok := e.GetEnvValue(n); ok {
- cp := NoCompression
- if v == "gzip" {
- cp = GzipCompression
- }
-
- fn(cp)
- }
- }
-}
-
-// revive:disable-next-line:flag-parameter
-func withInsecure(b bool) GenericOption {
- if b {
- return WithInsecure()
- }
- return WithSecure()
-}
-
-func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) {
- return func(e *envconfig.EnvOptionsReader) {
- if c.RootCAs != nil || len(c.Certificates) > 0 {
- fn(c)
- }
- }
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go
deleted file mode 100644
index 6a9c4d3a6..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go
+++ /dev/null
@@ -1,351 +0,0 @@
-// Code created by gotmpl. DO NOT MODIFY.
-// source: internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig"
-
-import (
- "crypto/tls"
- "fmt"
- "net/http"
- "net/url"
- "path"
- "strings"
- "time"
-
- "google.golang.org/grpc"
- "google.golang.org/grpc/backoff"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/credentials/insecure"
- "google.golang.org/grpc/encoding/gzip"
-
- "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
- "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry"
- "go.opentelemetry.io/otel/internal/global"
-)
-
-const (
- // DefaultTracesPath is a default URL path for endpoint that
- // receives spans.
- DefaultTracesPath string = "/v1/traces"
- // DefaultTimeout is a default max waiting time for the backend to process
- // each span batch.
- DefaultTimeout time.Duration = 10 * time.Second
-)
-
-type (
- // HTTPTransportProxyFunc is a function that resolves which URL to use as proxy for a given request.
- // This type is compatible with `http.Transport.Proxy` and can be used to set a custom proxy function to the OTLP HTTP client.
- HTTPTransportProxyFunc func(*http.Request) (*url.URL, error)
-
- SignalConfig struct {
- Endpoint string
- Insecure bool
- TLSCfg *tls.Config
- Headers map[string]string
- Compression Compression
- Timeout time.Duration
- URLPath string
-
- // gRPC configurations
- GRPCCredentials credentials.TransportCredentials
-
- Proxy HTTPTransportProxyFunc
- }
-
- Config struct {
- // Signal specific configurations
- Traces SignalConfig
-
- RetryConfig retry.Config
-
- // gRPC configurations
- ReconnectionPeriod time.Duration
- ServiceConfig string
- DialOptions []grpc.DialOption
- GRPCConn *grpc.ClientConn
- }
-)
-
-// NewHTTPConfig returns a new Config with all settings applied from opts and
-// any unset setting using the default HTTP config values.
-func NewHTTPConfig(opts ...HTTPOption) Config {
- cfg := Config{
- Traces: SignalConfig{
- Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort),
- URLPath: DefaultTracesPath,
- Compression: NoCompression,
- Timeout: DefaultTimeout,
- },
- RetryConfig: retry.DefaultConfig,
- }
- cfg = ApplyHTTPEnvConfigs(cfg)
- for _, opt := range opts {
- cfg = opt.ApplyHTTPOption(cfg)
- }
- cfg.Traces.URLPath = cleanPath(cfg.Traces.URLPath, DefaultTracesPath)
- return cfg
-}
-
-// cleanPath returns a path with all spaces trimmed and all redundancies
-// removed. If urlPath is empty or cleaning it results in an empty string,
-// defaultPath is returned instead.
-func cleanPath(urlPath string, defaultPath string) string {
- tmp := path.Clean(strings.TrimSpace(urlPath))
- if tmp == "." {
- return defaultPath
- }
- if !path.IsAbs(tmp) {
- tmp = "/" + tmp
- }
- return tmp
-}
-
-// NewGRPCConfig returns a new Config with all settings applied from opts and
-// any unset setting using the default gRPC config values.
-func NewGRPCConfig(opts ...GRPCOption) Config {
- userAgent := "OTel OTLP Exporter Go/" + otlptrace.Version()
- cfg := Config{
- Traces: SignalConfig{
- Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort),
- URLPath: DefaultTracesPath,
- Compression: NoCompression,
- Timeout: DefaultTimeout,
- },
- RetryConfig: retry.DefaultConfig,
- DialOptions: []grpc.DialOption{grpc.WithUserAgent(userAgent)},
- }
- cfg = ApplyGRPCEnvConfigs(cfg)
- for _, opt := range opts {
- cfg = opt.ApplyGRPCOption(cfg)
- }
-
- if cfg.ServiceConfig != "" {
- cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
- }
- // Prioritize GRPCCredentials over Insecure (passing both is an error).
- if cfg.Traces.GRPCCredentials != nil {
- cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials))
- } else if cfg.Traces.Insecure {
- cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials()))
- } else {
- // Default to using the host's root CA.
- creds := credentials.NewTLS(nil)
- cfg.Traces.GRPCCredentials = creds
- cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds))
- }
- if cfg.Traces.Compression == GzipCompression {
- cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
- }
- if cfg.ReconnectionPeriod != 0 {
- p := grpc.ConnectParams{
- Backoff: backoff.DefaultConfig,
- MinConnectTimeout: cfg.ReconnectionPeriod,
- }
- cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p))
- }
-
- return cfg
-}
-
-type (
- // GenericOption applies an option to the HTTP or gRPC driver.
- GenericOption interface {
- ApplyHTTPOption(Config) Config
- ApplyGRPCOption(Config) Config
-
- // A private method to prevent users implementing the
- // interface and so future additions to it will not
- // violate compatibility.
- private()
- }
-
- // HTTPOption applies an option to the HTTP driver.
- HTTPOption interface {
- ApplyHTTPOption(Config) Config
-
- // A private method to prevent users implementing the
- // interface and so future additions to it will not
- // violate compatibility.
- private()
- }
-
- // GRPCOption applies an option to the gRPC driver.
- GRPCOption interface {
- ApplyGRPCOption(Config) Config
-
- // A private method to prevent users implementing the
- // interface and so future additions to it will not
- // violate compatibility.
- private()
- }
-)
-
-// genericOption is an option that applies the same logic
-// for both gRPC and HTTP.
-type genericOption struct {
- fn func(Config) Config
-}
-
-func (g *genericOption) ApplyGRPCOption(cfg Config) Config {
- return g.fn(cfg)
-}
-
-func (g *genericOption) ApplyHTTPOption(cfg Config) Config {
- return g.fn(cfg)
-}
-
-func (genericOption) private() {}
-
-func newGenericOption(fn func(cfg Config) Config) GenericOption {
- return &genericOption{fn: fn}
-}
-
-// splitOption is an option that applies different logics
-// for gRPC and HTTP.
-type splitOption struct {
- httpFn func(Config) Config
- grpcFn func(Config) Config
-}
-
-func (g *splitOption) ApplyGRPCOption(cfg Config) Config {
- return g.grpcFn(cfg)
-}
-
-func (g *splitOption) ApplyHTTPOption(cfg Config) Config {
- return g.httpFn(cfg)
-}
-
-func (splitOption) private() {}
-
-func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption {
- return &splitOption{httpFn: httpFn, grpcFn: grpcFn}
-}
-
-// httpOption is an option that is only applied to the HTTP driver.
-type httpOption struct {
- fn func(Config) Config
-}
-
-func (h *httpOption) ApplyHTTPOption(cfg Config) Config {
- return h.fn(cfg)
-}
-
-func (httpOption) private() {}
-
-func NewHTTPOption(fn func(cfg Config) Config) HTTPOption {
- return &httpOption{fn: fn}
-}
-
-// grpcOption is an option that is only applied to the gRPC driver.
-type grpcOption struct {
- fn func(Config) Config
-}
-
-func (h *grpcOption) ApplyGRPCOption(cfg Config) Config {
- return h.fn(cfg)
-}
-
-func (grpcOption) private() {}
-
-func NewGRPCOption(fn func(cfg Config) Config) GRPCOption {
- return &grpcOption{fn: fn}
-}
-
-// Generic Options
-
-// WithEndpoint configures the trace host and port only; endpoint should
-// resemble "example.com" or "localhost:4317". To configure the scheme and path,
-// use WithEndpointURL.
-func WithEndpoint(endpoint string) GenericOption {
- return newGenericOption(func(cfg Config) Config {
- cfg.Traces.Endpoint = endpoint
- return cfg
- })
-}
-
-// WithEndpointURL configures the trace scheme, host, port, and path; the
-// provided value should resemble "https://example.com:4318/v1/traces".
-func WithEndpointURL(v string) GenericOption {
- return newGenericOption(func(cfg Config) Config {
- u, err := url.Parse(v)
- if err != nil {
- global.Error(err, "otlptrace: parse endpoint url", "url", v)
- return cfg
- }
-
- cfg.Traces.Endpoint = u.Host
- cfg.Traces.URLPath = u.Path
- cfg.Traces.Insecure = u.Scheme != "https"
-
- return cfg
- })
-}
-
-func WithCompression(compression Compression) GenericOption {
- return newGenericOption(func(cfg Config) Config {
- cfg.Traces.Compression = compression
- return cfg
- })
-}
-
-func WithURLPath(urlPath string) GenericOption {
- return newGenericOption(func(cfg Config) Config {
- cfg.Traces.URLPath = urlPath
- return cfg
- })
-}
-
-func WithRetry(rc retry.Config) GenericOption {
- return newGenericOption(func(cfg Config) Config {
- cfg.RetryConfig = rc
- return cfg
- })
-}
-
-func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption {
- return newSplitOption(func(cfg Config) Config {
- cfg.Traces.TLSCfg = tlsCfg.Clone()
- return cfg
- }, func(cfg Config) Config {
- cfg.Traces.GRPCCredentials = credentials.NewTLS(tlsCfg)
- return cfg
- })
-}
-
-func WithInsecure() GenericOption {
- return newGenericOption(func(cfg Config) Config {
- cfg.Traces.Insecure = true
- return cfg
- })
-}
-
-func WithSecure() GenericOption {
- return newGenericOption(func(cfg Config) Config {
- cfg.Traces.Insecure = false
- return cfg
- })
-}
-
-func WithHeaders(headers map[string]string) GenericOption {
- return newGenericOption(func(cfg Config) Config {
- cfg.Traces.Headers = headers
- return cfg
- })
-}
-
-func WithTimeout(duration time.Duration) GenericOption {
- return newGenericOption(func(cfg Config) Config {
- cfg.Traces.Timeout = duration
- return cfg
- })
-}
-
-func WithProxy(pf HTTPTransportProxyFunc) GenericOption {
- return newGenericOption(func(cfg Config) Config {
- cfg.Traces.Proxy = pf
- return cfg
- })
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/optiontypes.go
deleted file mode 100644
index bc4db0595..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/optiontypes.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Code created by gotmpl. DO NOT MODIFY.
-// source: internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig"
-
-const (
- // DefaultCollectorGRPCPort is the default gRPC port of the collector.
- DefaultCollectorGRPCPort uint16 = 4317
- // DefaultCollectorHTTPPort is the default HTTP port of the collector.
- DefaultCollectorHTTPPort uint16 = 4318
- // DefaultCollectorHost is the host address the Exporter will attempt
- // connect to if no collector address is provided.
- DefaultCollectorHost string = "localhost"
-)
-
-// Compression describes the compression used for payloads sent to the
-// collector.
-type Compression int
-
-const (
- // NoCompression tells the driver to send payloads without
- // compression.
- NoCompression Compression = iota
- // GzipCompression tells the driver to send payloads after
- // compressing them with gzip.
- GzipCompression
-)
-
-// Marshaler describes the kind of message format sent to the collector.
-type Marshaler int
-
-const (
- // MarshalProto tells the driver to send using the protobuf binary format.
- MarshalProto Marshaler = iota
- // MarshalJSON tells the driver to send using json format.
- MarshalJSON
-)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/tls.go
deleted file mode 100644
index dd6f12b22..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/tls.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Code created by gotmpl. DO NOT MODIFY.
-// source: internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig"
-
-import (
- "crypto/tls"
- "crypto/x509"
- "errors"
-)
-
-// CreateTLSConfig creates a tls.Config from a raw certificate bytes
-// to verify a server certificate.
-func CreateTLSConfig(certBytes []byte) (*tls.Config, error) {
- cp := x509.NewCertPool()
- if ok := cp.AppendCertsFromPEM(certBytes); !ok {
- return nil, errors.New("failed to append certificate to the cert pool")
- }
-
- return &tls.Config{
- RootCAs: cp,
- }, nil
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/partialsuccess.go
deleted file mode 100644
index 9e04a9bc1..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/partialsuccess.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Code created by gotmpl. DO NOT MODIFY.
-// source: internal/shared/otlp/partialsuccess.go
-
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal"
-
-import "fmt"
-
-// PartialSuccess represents the underlying error for all handling
-// OTLP partial success messages. Use `errors.Is(err,
-// PartialSuccess{})` to test whether an error passed to the OTel
-// error handler belongs to this category.
-type PartialSuccess struct {
- ErrorMessage string
- RejectedItems int64
- RejectedKind string
-}
-
-var _ error = PartialSuccess{}
-
-// Error implements the error interface.
-func (ps PartialSuccess) Error() string {
- msg := ps.ErrorMessage
- if msg == "" {
- msg = "empty message"
- }
- return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind)
-}
-
-// Is supports the errors.Is() interface.
-func (ps PartialSuccess) Is(err error) bool {
- _, ok := err.(PartialSuccess)
- return ok
-}
-
-// TracePartialSuccessError returns an error describing a partial success
-// response for the trace signal.
-func TracePartialSuccessError(itemsRejected int64, errorMessage string) error {
- return PartialSuccess{
- ErrorMessage: errorMessage,
- RejectedItems: itemsRejected,
- RejectedKind: "spans",
- }
-}
-
-// MetricPartialSuccessError returns an error describing a partial success
-// response for the metric signal.
-func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error {
- return PartialSuccess{
- ErrorMessage: errorMessage,
- RejectedItems: itemsRejected,
- RejectedKind: "metric data points",
- }
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go
deleted file mode 100644
index 86c4819f4..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go
+++ /dev/null
@@ -1,145 +0,0 @@
-// Code created by gotmpl. DO NOT MODIFY.
-// source: internal/shared/otlp/retry/retry.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Package retry provides request retry functionality that can perform
-// configurable exponential backoff for transient errors and honor any
-// explicit throttle responses received.
-package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry"
-
-import (
- "context"
- "fmt"
- "time"
-
- "github.com/cenkalti/backoff/v4"
-)
-
-// DefaultConfig are the recommended defaults to use.
-var DefaultConfig = Config{
- Enabled: true,
- InitialInterval: 5 * time.Second,
- MaxInterval: 30 * time.Second,
- MaxElapsedTime: time.Minute,
-}
-
-// Config defines configuration for retrying batches in case of export failure
-// using an exponential backoff.
-type Config struct {
- // Enabled indicates whether to not retry sending batches in case of
- // export failure.
- Enabled bool
- // InitialInterval the time to wait after the first failure before
- // retrying.
- InitialInterval time.Duration
- // MaxInterval is the upper bound on backoff interval. Once this value is
- // reached the delay between consecutive retries will always be
- // `MaxInterval`.
- MaxInterval time.Duration
- // MaxElapsedTime is the maximum amount of time (including retries) spent
- // trying to send a request/batch. Once this value is reached, the data
- // is discarded.
- MaxElapsedTime time.Duration
-}
-
-// RequestFunc wraps a request with retry logic.
-type RequestFunc func(context.Context, func(context.Context) error) error
-
-// EvaluateFunc returns if an error is retry-able and if an explicit throttle
-// duration should be honored that was included in the error.
-//
-// The function must return true if the error argument is retry-able,
-// otherwise it must return false for the first return parameter.
-//
-// The function must return a non-zero time.Duration if the error contains
-// explicit throttle duration that should be honored, otherwise it must return
-// a zero valued time.Duration.
-type EvaluateFunc func(error) (bool, time.Duration)
-
-// RequestFunc returns a RequestFunc using the evaluate function to determine
-// if requests can be retried and based on the exponential backoff
-// configuration of c.
-func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
- if !c.Enabled {
- return func(ctx context.Context, fn func(context.Context) error) error {
- return fn(ctx)
- }
- }
-
- return func(ctx context.Context, fn func(context.Context) error) error {
- // Do not use NewExponentialBackOff since it calls Reset and the code here
- // must call Reset after changing the InitialInterval (this saves an
- // unnecessary call to Now).
- b := &backoff.ExponentialBackOff{
- InitialInterval: c.InitialInterval,
- RandomizationFactor: backoff.DefaultRandomizationFactor,
- Multiplier: backoff.DefaultMultiplier,
- MaxInterval: c.MaxInterval,
- MaxElapsedTime: c.MaxElapsedTime,
- Stop: backoff.Stop,
- Clock: backoff.SystemClock,
- }
- b.Reset()
-
- for {
- err := fn(ctx)
- if err == nil {
- return nil
- }
-
- retryable, throttle := evaluate(err)
- if !retryable {
- return err
- }
-
- bOff := b.NextBackOff()
- if bOff == backoff.Stop {
- return fmt.Errorf("max retry time elapsed: %w", err)
- }
-
- // Wait for the greater of the backoff or throttle delay.
- var delay time.Duration
- if bOff > throttle {
- delay = bOff
- } else {
- elapsed := b.GetElapsedTime()
- if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
- return fmt.Errorf("max retry time would elapse: %w", err)
- }
- delay = throttle
- }
-
- if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
- return fmt.Errorf("%w: %w", ctxErr, err)
- }
- }
- }
-}
-
-// Allow override for testing.
-var waitFunc = wait
-
-// wait takes the caller's context, and the amount of time to wait. It will
-// return nil if the timer fires before or at the same time as the context's
-// deadline. This indicates that the call can be retried.
-func wait(ctx context.Context, delay time.Duration) error {
- timer := time.NewTimer(delay)
- defer timer.Stop()
-
- select {
- case <-ctx.Done():
- // Handle the case where the timer and context deadline end
- // simultaneously by prioritizing the timer expiration nil value
- // response.
- select {
- case <-timer.C:
- default:
- return ctx.Err()
- }
- case <-timer.C:
- }
-
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go
deleted file mode 100644
index 3559c5664..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package otlptracehttp // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
-
-import (
- "crypto/tls"
- "net/http"
- "net/url"
- "time"
-
- "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig"
- "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry"
-)
-
-// Compression describes the compression used for payloads sent to the
-// collector.
-type Compression otlpconfig.Compression
-
-// HTTPTransportProxyFunc is a function that resolves which URL to use as proxy for a given request.
-// This type is compatible with http.Transport.Proxy and can be used to set a custom proxy function
-// to the OTLP HTTP client.
-type HTTPTransportProxyFunc func(*http.Request) (*url.URL, error)
-
-const (
- // NoCompression tells the driver to send payloads without
- // compression.
- NoCompression = Compression(otlpconfig.NoCompression)
- // GzipCompression tells the driver to send payloads after
- // compressing them with gzip.
- GzipCompression = Compression(otlpconfig.GzipCompression)
-)
-
-// Option applies an option to the HTTP client.
-type Option interface {
- applyHTTPOption(otlpconfig.Config) otlpconfig.Config
-}
-
-func asHTTPOptions(opts []Option) []otlpconfig.HTTPOption {
- converted := make([]otlpconfig.HTTPOption, len(opts))
- for i, o := range opts {
- converted[i] = otlpconfig.NewHTTPOption(o.applyHTTPOption)
- }
- return converted
-}
-
-// RetryConfig defines configuration for retrying batches in case of export
-// failure using an exponential backoff.
-type RetryConfig retry.Config
-
-type wrappedOption struct {
- otlpconfig.HTTPOption
-}
-
-func (w wrappedOption) applyHTTPOption(cfg otlpconfig.Config) otlpconfig.Config {
- return w.ApplyHTTPOption(cfg)
-}
-
-// WithEndpoint sets the target endpoint (host and port) the Exporter will
-// connect to. The provided endpoint should resemble "example.com:4318" (no
-// scheme or path).
-//
-// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
-// environment variable is set, and this option is not passed, that variable
-// value will be used. If both environment variables are set,
-// OTEL_EXPORTER_OTLP_TRACES_ENDPOINT will take precedence. If an environment
-// variable is set, and this option is passed, this option will take precedence.
-// Note, both environment variables include the full
-// scheme and path, while WithEndpoint sets only the host and port.
-//
-// If both this option and WithEndpointURL are used, the last used option will
-// take precedence.
-//
-// By default, if an environment variable is not set, and this option is not
-// passed, "localhost:4318" will be used.
-//
-// This option has no effect if WithGRPCConn is used.
-func WithEndpoint(endpoint string) Option {
- return wrappedOption{otlpconfig.WithEndpoint(endpoint)}
-}
-
-// WithEndpointURL sets the target endpoint URL (scheme, host, port, path) the
-// Exporter will connect to.
-//
-// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
-// environment variable is set, and this option is not passed, that variable
-// value will be used. If both environment variables are set,
-// OTEL_EXPORTER_OTLP_TRACES_ENDPOINT will take precedence. If an environment
-// variable is set, and this option is passed, this option will take precedence.
-//
-// If both this option and WithEndpoint are used, the last used option will
-// take precedence.
-//
-// If an invalid URL is provided, the default value will be kept.
-//
-// By default, if an environment variable is not set, and this option is not
-// passed, "localhost:4318" will be used.
-//
-// This option has no effect if WithGRPCConn is used.
-func WithEndpointURL(u string) Option {
- return wrappedOption{otlpconfig.WithEndpointURL(u)}
-}
-
-// WithCompression tells the driver to compress the sent data.
-func WithCompression(compression Compression) Option {
- return wrappedOption{otlpconfig.WithCompression(otlpconfig.Compression(compression))}
-}
-
-// WithURLPath allows one to override the default URL path used
-// for sending traces. If unset, default ("/v1/traces") will be used.
-func WithURLPath(urlPath string) Option {
- return wrappedOption{otlpconfig.WithURLPath(urlPath)}
-}
-
-// WithTLSClientConfig can be used to set up a custom TLS
-// configuration for the client used to send payloads to the
-// collector. Use it if you want to use a custom certificate.
-func WithTLSClientConfig(tlsCfg *tls.Config) Option {
- return wrappedOption{otlpconfig.WithTLSClientConfig(tlsCfg)}
-}
-
-// WithInsecure tells the driver to connect to the collector using the
-// HTTP scheme, instead of HTTPS.
-func WithInsecure() Option {
- return wrappedOption{otlpconfig.WithInsecure()}
-}
-
-// WithHeaders allows one to tell the driver to send additional HTTP
-// headers with the payloads. Specifying headers like Content-Length,
-// Content-Encoding and Content-Type may result in a broken driver.
-func WithHeaders(headers map[string]string) Option {
- return wrappedOption{otlpconfig.WithHeaders(headers)}
-}
-
-// WithTimeout tells the driver the max waiting time for the backend to process
-// each spans batch. If unset, the default will be 10 seconds.
-func WithTimeout(duration time.Duration) Option {
- return wrappedOption{otlpconfig.WithTimeout(duration)}
-}
-
-// WithRetry configures the retry policy for transient errors that may occurs
-// when exporting traces. An exponential back-off algorithm is used to ensure
-// endpoints are not overwhelmed with retries. If unset, the default retry
-// policy will retry after 5 seconds and increase exponentially after each
-// error for a total of 1 minute.
-func WithRetry(rc RetryConfig) Option {
- return wrappedOption{otlpconfig.WithRetry(retry.Config(rc))}
-}
-
-// WithProxy sets the Proxy function the client will use to determine the
-// proxy to use for an HTTP request. If this option is not used, the client
-// will use [http.ProxyFromEnvironment].
-func WithProxy(pf HTTPTransportProxyFunc) Option {
- return wrappedOption{otlpconfig.WithProxy(otlpconfig.HTTPTransportProxyFunc(pf))}
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go
deleted file mode 100644
index f156ee667..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
-
-// Version is the current release version of the OpenTelemetry OTLP trace exporter in use.
-func Version() string {
- return "1.34.0"
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/prometheus/LICENSE
deleted file mode 100644
index 261eeb9e9..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/prometheus/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/README.md b/vendor/go.opentelemetry.io/otel/exporters/prometheus/README.md
deleted file mode 100644
index f4dc09d38..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/prometheus/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Prometheus Exporter
-
-[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/prometheus)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/prometheus)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go
deleted file mode 100644
index 660675dd6..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package prometheus // import "go.opentelemetry.io/otel/exporters/prometheus"
-
-import (
- "strings"
-
- "github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/common/model"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/sdk/metric"
-)
-
-// config contains options for the exporter.
-type config struct {
- registerer prometheus.Registerer
- disableTargetInfo bool
- withoutUnits bool
- withoutCounterSuffixes bool
- readerOpts []metric.ManualReaderOption
- disableScopeInfo bool
- namespace string
- resourceAttributesFilter attribute.Filter
-}
-
-// newConfig creates a validated config configured with options.
-func newConfig(opts ...Option) config {
- cfg := config{}
- for _, opt := range opts {
- cfg = opt.apply(cfg)
- }
-
- if cfg.registerer == nil {
- cfg.registerer = prometheus.DefaultRegisterer
- }
-
- return cfg
-}
-
-// Option sets exporter option values.
-type Option interface {
- apply(config) config
-}
-
-type optionFunc func(config) config
-
-func (fn optionFunc) apply(cfg config) config {
- return fn(cfg)
-}
-
-// WithRegisterer configures which prometheus Registerer the Exporter will
-// register with. If no registerer is used the prometheus DefaultRegisterer is
-// used.
-func WithRegisterer(reg prometheus.Registerer) Option {
- return optionFunc(func(cfg config) config {
- cfg.registerer = reg
- return cfg
- })
-}
-
-// WithAggregationSelector configure the Aggregation Selector the exporter will
-// use. If no AggregationSelector is provided the DefaultAggregationSelector is
-// used.
-func WithAggregationSelector(agg metric.AggregationSelector) Option {
- return optionFunc(func(cfg config) config {
- cfg.readerOpts = append(cfg.readerOpts, metric.WithAggregationSelector(agg))
- return cfg
- })
-}
-
-// WithProducer configure the metric Producer the exporter will use as a source
-// of external metric data.
-func WithProducer(producer metric.Producer) Option {
- return optionFunc(func(cfg config) config {
- cfg.readerOpts = append(cfg.readerOpts, metric.WithProducer(producer))
- return cfg
- })
-}
-
-// WithoutTargetInfo configures the Exporter to not export the resource target_info metric.
-// If not specified, the Exporter will create a target_info metric containing
-// the metrics' resource.Resource attributes.
-func WithoutTargetInfo() Option {
- return optionFunc(func(cfg config) config {
- cfg.disableTargetInfo = true
- return cfg
- })
-}
-
-// WithoutUnits disables exporter's addition of unit suffixes to metric names,
-// and will also prevent unit comments from being added in OpenMetrics once
-// unit comments are supported.
-//
-// By default, metric names include a unit suffix to follow Prometheus naming
-// conventions. For example, the counter metric request.duration, with unit
-// milliseconds would become request_duration_milliseconds_total.
-// With this option set, the name would instead be request_duration_total.
-func WithoutUnits() Option {
- return optionFunc(func(cfg config) config {
- cfg.withoutUnits = true
- return cfg
- })
-}
-
-// WithoutCounterSuffixes disables exporter's addition _total suffixes on counters.
-//
-// By default, metric names include a _total suffix to follow Prometheus naming
-// conventions. For example, the counter metric happy.people would become
-// happy_people_total. With this option set, the name would instead be
-// happy_people.
-func WithoutCounterSuffixes() Option {
- return optionFunc(func(cfg config) config {
- cfg.withoutCounterSuffixes = true
- return cfg
- })
-}
-
-// WithoutScopeInfo configures the Exporter to not export the otel_scope_info metric.
-// If not specified, the Exporter will create a otel_scope_info metric containing
-// the metrics' Instrumentation Scope, and also add labels about Instrumentation Scope to all metric points.
-func WithoutScopeInfo() Option {
- return optionFunc(func(cfg config) config {
- cfg.disableScopeInfo = true
- return cfg
- })
-}
-
-// WithNamespace configures the Exporter to prefix metric with the given namespace.
-// Metadata metrics such as target_info and otel_scope_info are not prefixed since these
-// have special behavior based on their name.
-func WithNamespace(ns string) Option {
- return optionFunc(func(cfg config) config {
- if model.NameValidationScheme != model.UTF8Validation {
- // Only sanitize if prometheus does not support UTF-8.
- ns = model.EscapeName(ns, model.NameEscapingScheme)
- }
- if !strings.HasSuffix(ns, "_") {
- // namespace and metric names should be separated with an underscore,
- // adds a trailing underscore if there is not one already.
- ns = ns + "_"
- }
-
- cfg.namespace = ns
- return cfg
- })
-}
-
-// WithResourceAsConstantLabels configures the Exporter to add the resource attributes the
-// resourceFilter returns true for as attributes on all exported metrics.
-//
-// The does not affect the target info generated from resource attributes.
-func WithResourceAsConstantLabels(resourceFilter attribute.Filter) Option {
- return optionFunc(func(cfg config) config {
- cfg.resourceAttributesFilter = resourceFilter
- return cfg
- })
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/doc.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/doc.go
deleted file mode 100644
index e9b77869e..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/prometheus/doc.go
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Package prometheus provides a Prometheus Exporter that converts
-// OTLP metrics into the Prometheus exposition format and implements
-// prometheus.Collector to provide a handler for these metrics.
-package prometheus // import "go.opentelemetry.io/otel/exporters/prometheus"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go
deleted file mode 100644
index 50c95a16f..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go
+++ /dev/null
@@ -1,554 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package prometheus // import "go.opentelemetry.io/otel/exporters/prometheus"
-
-import (
- "context"
- "encoding/hex"
- "errors"
- "fmt"
- "slices"
- "strings"
- "sync"
-
- "github.com/prometheus/client_golang/prometheus"
- dto "github.com/prometheus/client_model/go"
- "github.com/prometheus/common/model"
- "google.golang.org/protobuf/proto"
-
- "go.opentelemetry.io/otel"
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/internal/global"
- "go.opentelemetry.io/otel/sdk/instrumentation"
- "go.opentelemetry.io/otel/sdk/metric"
- "go.opentelemetry.io/otel/sdk/metric/metricdata"
- "go.opentelemetry.io/otel/sdk/resource"
-)
-
-const (
- targetInfoMetricName = "target_info"
- targetInfoDescription = "Target metadata"
-
- scopeInfoMetricName = "otel_scope_info"
- scopeInfoDescription = "Instrumentation Scope metadata"
-
- scopeNameLabel = "otel_scope_name"
- scopeVersionLabel = "otel_scope_version"
-
- traceIDExemplarKey = "trace_id"
- spanIDExemplarKey = "span_id"
-)
-
-var errScopeInvalid = errors.New("invalid scope")
-
-// Exporter is a Prometheus Exporter that embeds the OTel metric.Reader
-// interface for easy instantiation with a MeterProvider.
-type Exporter struct {
- metric.Reader
-}
-
-// MarshalLog returns logging data about the Exporter.
-func (e *Exporter) MarshalLog() interface{} {
- const t = "Prometheus exporter"
-
- if r, ok := e.Reader.(*metric.ManualReader); ok {
- under := r.MarshalLog()
- if data, ok := under.(struct {
- Type string
- Registered bool
- Shutdown bool
- }); ok {
- data.Type = t
- return data
- }
- }
-
- return struct{ Type string }{Type: t}
-}
-
-var _ metric.Reader = &Exporter{}
-
-// keyVals is used to store resource attribute key value pairs.
-type keyVals struct {
- keys []string
- vals []string
-}
-
-// collector is used to implement prometheus.Collector.
-type collector struct {
- reader metric.Reader
-
- withoutUnits bool
- withoutCounterSuffixes bool
- disableScopeInfo bool
- namespace string
- resourceAttributesFilter attribute.Filter
-
- mu sync.Mutex // mu protects all members below from the concurrent access.
- disableTargetInfo bool
- targetInfo prometheus.Metric
- scopeInfos map[instrumentation.Scope]prometheus.Metric
- scopeInfosInvalid map[instrumentation.Scope]struct{}
- metricFamilies map[string]*dto.MetricFamily
- resourceKeyVals keyVals
-}
-
-// prometheus counters MUST have a _total suffix by default:
-// https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/compatibility/prometheus_and_openmetrics.md
-const counterSuffix = "_total"
-
-// New returns a Prometheus Exporter.
-func New(opts ...Option) (*Exporter, error) {
- cfg := newConfig(opts...)
-
- // this assumes that the default temporality selector will always return cumulative.
- // we only support cumulative temporality, so building our own reader enforces this.
- // TODO (#3244): Enable some way to configure the reader, but not change temporality.
- reader := metric.NewManualReader(cfg.readerOpts...)
-
- collector := &collector{
- reader: reader,
- disableTargetInfo: cfg.disableTargetInfo,
- withoutUnits: cfg.withoutUnits,
- withoutCounterSuffixes: cfg.withoutCounterSuffixes,
- disableScopeInfo: cfg.disableScopeInfo,
- scopeInfos: make(map[instrumentation.Scope]prometheus.Metric),
- scopeInfosInvalid: make(map[instrumentation.Scope]struct{}),
- metricFamilies: make(map[string]*dto.MetricFamily),
- namespace: cfg.namespace,
- resourceAttributesFilter: cfg.resourceAttributesFilter,
- }
-
- if err := cfg.registerer.Register(collector); err != nil {
- return nil, fmt.Errorf("cannot register the collector: %w", err)
- }
-
- e := &Exporter{
- Reader: reader,
- }
-
- return e, nil
-}
-
-// Describe implements prometheus.Collector.
-func (c *collector) Describe(ch chan<- *prometheus.Desc) {
- // The Opentelemetry SDK doesn't have information on which will exist when the collector
- // is registered. By returning nothing we are an "unchecked" collector in Prometheus,
- // and assume responsibility for consistency of the metrics produced.
- //
- // See https://pkg.go.dev/github.com/prometheus/client_golang@v1.13.0/prometheus#hdr-Custom_Collectors_and_constant_Metrics
-}
-
-// Collect implements prometheus.Collector.
-//
-// This method is safe to call concurrently.
-func (c *collector) Collect(ch chan<- prometheus.Metric) {
- // TODO (#3047): Use a sync.Pool instead of allocating metrics every Collect.
- metrics := metricdata.ResourceMetrics{}
- err := c.reader.Collect(context.TODO(), &metrics)
- if err != nil {
- if errors.Is(err, metric.ErrReaderShutdown) {
- return
- }
- otel.Handle(err)
- if errors.Is(err, metric.ErrReaderNotRegistered) {
- return
- }
- }
-
- global.Debug("Prometheus exporter export", "Data", metrics)
-
- // Initialize (once) targetInfo and disableTargetInfo.
- func() {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- if c.targetInfo == nil && !c.disableTargetInfo {
- targetInfo, err := createInfoMetric(targetInfoMetricName, targetInfoDescription, metrics.Resource)
- if err != nil {
- // If the target info metric is invalid, disable sending it.
- c.disableTargetInfo = true
- otel.Handle(err)
- return
- }
-
- c.targetInfo = targetInfo
- }
- }()
-
- if !c.disableTargetInfo {
- ch <- c.targetInfo
- }
-
- if c.resourceAttributesFilter != nil && len(c.resourceKeyVals.keys) == 0 {
- c.createResourceAttributes(metrics.Resource)
- }
-
- for _, scopeMetrics := range metrics.ScopeMetrics {
- n := len(c.resourceKeyVals.keys) + 2 // resource attrs + scope name + scope version
- kv := keyVals{
- keys: make([]string, 0, n),
- vals: make([]string, 0, n),
- }
-
- if !c.disableScopeInfo {
- scopeInfo, err := c.scopeInfo(scopeMetrics.Scope)
- if errors.Is(err, errScopeInvalid) {
- // Do not report the same error multiple times.
- continue
- }
- if err != nil {
- otel.Handle(err)
- continue
- }
-
- ch <- scopeInfo
-
- kv.keys = append(kv.keys, scopeNameLabel, scopeVersionLabel)
- kv.vals = append(kv.vals, scopeMetrics.Scope.Name, scopeMetrics.Scope.Version)
- }
-
- kv.keys = append(kv.keys, c.resourceKeyVals.keys...)
- kv.vals = append(kv.vals, c.resourceKeyVals.vals...)
-
- for _, m := range scopeMetrics.Metrics {
- typ := c.metricType(m)
- if typ == nil {
- continue
- }
- name := c.getName(m, typ)
-
- drop, help := c.validateMetrics(name, m.Description, typ)
- if drop {
- continue
- }
-
- if help != "" {
- m.Description = help
- }
-
- switch v := m.Data.(type) {
- case metricdata.Histogram[int64]:
- addHistogramMetric(ch, v, m, name, kv)
- case metricdata.Histogram[float64]:
- addHistogramMetric(ch, v, m, name, kv)
- case metricdata.Sum[int64]:
- addSumMetric(ch, v, m, name, kv)
- case metricdata.Sum[float64]:
- addSumMetric(ch, v, m, name, kv)
- case metricdata.Gauge[int64]:
- addGaugeMetric(ch, v, m, name, kv)
- case metricdata.Gauge[float64]:
- addGaugeMetric(ch, v, m, name, kv)
- }
- }
- }
-}
-
-func addHistogramMetric[N int64 | float64](ch chan<- prometheus.Metric, histogram metricdata.Histogram[N], m metricdata.Metrics, name string, kv keyVals) {
- for _, dp := range histogram.DataPoints {
- keys, values := getAttrs(dp.Attributes)
- keys = append(keys, kv.keys...)
- values = append(values, kv.vals...)
-
- desc := prometheus.NewDesc(name, m.Description, keys, nil)
- buckets := make(map[float64]uint64, len(dp.Bounds))
-
- cumulativeCount := uint64(0)
- for i, bound := range dp.Bounds {
- cumulativeCount += dp.BucketCounts[i]
- buckets[bound] = cumulativeCount
- }
- m, err := prometheus.NewConstHistogram(desc, dp.Count, float64(dp.Sum), buckets, values...)
- if err != nil {
- otel.Handle(err)
- continue
- }
- m = addExemplars(m, dp.Exemplars)
- ch <- m
- }
-}
-
-func addSumMetric[N int64 | float64](ch chan<- prometheus.Metric, sum metricdata.Sum[N], m metricdata.Metrics, name string, kv keyVals) {
- valueType := prometheus.CounterValue
- if !sum.IsMonotonic {
- valueType = prometheus.GaugeValue
- }
-
- for _, dp := range sum.DataPoints {
- keys, values := getAttrs(dp.Attributes)
- keys = append(keys, kv.keys...)
- values = append(values, kv.vals...)
-
- desc := prometheus.NewDesc(name, m.Description, keys, nil)
- m, err := prometheus.NewConstMetric(desc, valueType, float64(dp.Value), values...)
- if err != nil {
- otel.Handle(err)
- continue
- }
- // GaugeValues don't support Exemplars at this time
- // https://github.com/prometheus/client_golang/blob/aef8aedb4b6e1fb8ac1c90790645169125594096/prometheus/metric.go#L199
- if valueType != prometheus.GaugeValue {
- m = addExemplars(m, dp.Exemplars)
- }
- ch <- m
- }
-}
-
-func addGaugeMetric[N int64 | float64](ch chan<- prometheus.Metric, gauge metricdata.Gauge[N], m metricdata.Metrics, name string, kv keyVals) {
- for _, dp := range gauge.DataPoints {
- keys, values := getAttrs(dp.Attributes)
- keys = append(keys, kv.keys...)
- values = append(values, kv.vals...)
-
- desc := prometheus.NewDesc(name, m.Description, keys, nil)
- m, err := prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(dp.Value), values...)
- if err != nil {
- otel.Handle(err)
- continue
- }
- ch <- m
- }
-}
-
-// getAttrs converts the attribute.Set to two lists of matching Prometheus-style
-// keys and values.
-func getAttrs(attrs attribute.Set) ([]string, []string) {
- keys := make([]string, 0, attrs.Len())
- values := make([]string, 0, attrs.Len())
- itr := attrs.Iter()
-
- if model.NameValidationScheme == model.UTF8Validation {
- // Do not perform sanitization if prometheus supports UTF-8.
- for itr.Next() {
- kv := itr.Attribute()
- keys = append(keys, string(kv.Key))
- values = append(values, kv.Value.Emit())
- }
- } else {
- // It sanitizes invalid characters and handles duplicate keys
- // (due to sanitization) by sorting and concatenating the values following the spec.
- keysMap := make(map[string][]string)
- for itr.Next() {
- kv := itr.Attribute()
- key := model.EscapeName(string(kv.Key), model.NameEscapingScheme)
- if _, ok := keysMap[key]; !ok {
- keysMap[key] = []string{kv.Value.Emit()}
- } else {
- // if the sanitized key is a duplicate, append to the list of keys
- keysMap[key] = append(keysMap[key], kv.Value.Emit())
- }
- }
- for key, vals := range keysMap {
- keys = append(keys, key)
- slices.Sort(vals)
- values = append(values, strings.Join(vals, ";"))
- }
- }
- return keys, values
-}
-
-func createInfoMetric(name, description string, res *resource.Resource) (prometheus.Metric, error) {
- keys, values := getAttrs(*res.Set())
- desc := prometheus.NewDesc(name, description, keys, nil)
- return prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(1), values...)
-}
-
-func createScopeInfoMetric(scope instrumentation.Scope) (prometheus.Metric, error) {
- attrs := make([]attribute.KeyValue, 0, scope.Attributes.Len()+2) // resource attrs + scope name + scope version
- attrs = append(attrs, scope.Attributes.ToSlice()...)
- attrs = append(attrs, attribute.String(scopeNameLabel, scope.Name))
- attrs = append(attrs, attribute.String(scopeVersionLabel, scope.Version))
-
- keys, values := getAttrs(attribute.NewSet(attrs...))
- desc := prometheus.NewDesc(scopeInfoMetricName, scopeInfoDescription, keys, nil)
- return prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(1), values...)
-}
-
-var unitSuffixes = map[string]string{
- // Time
- "d": "_days",
- "h": "_hours",
- "min": "_minutes",
- "s": "_seconds",
- "ms": "_milliseconds",
- "us": "_microseconds",
- "ns": "_nanoseconds",
-
- // Bytes
- "By": "_bytes",
- "KiBy": "_kibibytes",
- "MiBy": "_mebibytes",
- "GiBy": "_gibibytes",
- "TiBy": "_tibibytes",
- "KBy": "_kilobytes",
- "MBy": "_megabytes",
- "GBy": "_gigabytes",
- "TBy": "_terabytes",
-
- // SI
- "m": "_meters",
- "V": "_volts",
- "A": "_amperes",
- "J": "_joules",
- "W": "_watts",
- "g": "_grams",
-
- // Misc
- "Cel": "_celsius",
- "Hz": "_hertz",
- "1": "_ratio",
- "%": "_percent",
-}
-
-// getName returns the sanitized name, prefixed with the namespace and suffixed with unit.
-func (c *collector) getName(m metricdata.Metrics, typ *dto.MetricType) string {
- name := m.Name
- if model.NameValidationScheme != model.UTF8Validation {
- // Only sanitize if prometheus does not support UTF-8.
- name = model.EscapeName(name, model.NameEscapingScheme)
- }
- addCounterSuffix := !c.withoutCounterSuffixes && *typ == dto.MetricType_COUNTER
- if addCounterSuffix {
- // Remove the _total suffix here, as we will re-add the total suffix
- // later, and it needs to come after the unit suffix.
- name = strings.TrimSuffix(name, counterSuffix)
- }
- if c.namespace != "" {
- name = c.namespace + name
- }
- if suffix, ok := unitSuffixes[m.Unit]; ok && !c.withoutUnits && !strings.HasSuffix(name, suffix) {
- name += suffix
- }
- if addCounterSuffix {
- name += counterSuffix
- }
- return name
-}
-
-func (c *collector) metricType(m metricdata.Metrics) *dto.MetricType {
- switch v := m.Data.(type) {
- case metricdata.Histogram[int64], metricdata.Histogram[float64]:
- return dto.MetricType_HISTOGRAM.Enum()
- case metricdata.Sum[float64]:
- if v.IsMonotonic {
- return dto.MetricType_COUNTER.Enum()
- }
- return dto.MetricType_GAUGE.Enum()
- case metricdata.Sum[int64]:
- if v.IsMonotonic {
- return dto.MetricType_COUNTER.Enum()
- }
- return dto.MetricType_GAUGE.Enum()
- case metricdata.Gauge[int64], metricdata.Gauge[float64]:
- return dto.MetricType_GAUGE.Enum()
- }
- return nil
-}
-
-func (c *collector) createResourceAttributes(res *resource.Resource) {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- resourceAttrs, _ := res.Set().Filter(c.resourceAttributesFilter)
- resourceKeys, resourceValues := getAttrs(resourceAttrs)
- c.resourceKeyVals = keyVals{keys: resourceKeys, vals: resourceValues}
-}
-
-func (c *collector) scopeInfo(scope instrumentation.Scope) (prometheus.Metric, error) {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- scopeInfo, ok := c.scopeInfos[scope]
- if ok {
- return scopeInfo, nil
- }
-
- if _, ok := c.scopeInfosInvalid[scope]; ok {
- return nil, errScopeInvalid
- }
-
- scopeInfo, err := createScopeInfoMetric(scope)
- if err != nil {
- c.scopeInfosInvalid[scope] = struct{}{}
- return nil, fmt.Errorf("cannot create scope info metric: %w", err)
- }
-
- c.scopeInfos[scope] = scopeInfo
-
- return scopeInfo, nil
-}
-
-func (c *collector) validateMetrics(name, description string, metricType *dto.MetricType) (drop bool, help string) {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- emf, exist := c.metricFamilies[name]
-
- if !exist {
- c.metricFamilies[name] = &dto.MetricFamily{
- Name: proto.String(name),
- Help: proto.String(description),
- Type: metricType,
- }
- return false, ""
- }
-
- if emf.GetType() != *metricType {
- global.Error(
- errors.New("instrument type conflict"),
- "Using existing type definition.",
- "instrument", name,
- "existing", emf.GetType(),
- "dropped", *metricType,
- )
- return true, ""
- }
- if emf.GetHelp() != description {
- global.Info(
- "Instrument description conflict, using existing",
- "instrument", name,
- "existing", emf.GetHelp(),
- "dropped", description,
- )
- return false, emf.GetHelp()
- }
-
- return false, ""
-}
-
-func addExemplars[N int64 | float64](m prometheus.Metric, exemplars []metricdata.Exemplar[N]) prometheus.Metric {
- if len(exemplars) == 0 {
- return m
- }
- promExemplars := make([]prometheus.Exemplar, len(exemplars))
- for i, exemplar := range exemplars {
- labels := attributesToLabels(exemplar.FilteredAttributes)
- // Overwrite any existing trace ID or span ID attributes
- labels[traceIDExemplarKey] = hex.EncodeToString(exemplar.TraceID[:])
- labels[spanIDExemplarKey] = hex.EncodeToString(exemplar.SpanID[:])
- promExemplars[i] = prometheus.Exemplar{
- Value: float64(exemplar.Value),
- Timestamp: exemplar.Time,
- Labels: labels,
- }
- }
- metricWithExemplar, err := prometheus.NewMetricWithExemplars(m, promExemplars...)
- if err != nil {
- // If there are errors creating the metric with exemplars, just warn
- // and return the metric without exemplars.
- otel.Handle(err)
- return m
- }
- return metricWithExemplar
-}
-
-func attributesToLabels(attrs []attribute.KeyValue) prometheus.Labels {
- labels := make(map[string]string)
- for _, attr := range attrs {
- key := model.EscapeName(string(attr.Key), model.NameEscapingScheme)
- labels[key] = attr.Value.Emit()
- }
- return labels
-}
diff --git a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh
deleted file mode 100644
index 93e80ea30..000000000
--- a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright The OpenTelemetry Authors
-# SPDX-License-Identifier: Apache-2.0
-
-set -euo pipefail
-
-top_dir='.'
-if [[ $# -gt 0 ]]; then
- top_dir="${1}"
-fi
-
-p=$(pwd)
-mod_dirs=()
-
-# Note `mapfile` does not exist in older bash versions:
-# https://stackoverflow.com/questions/41475261/need-alternative-to-readarray-mapfile-for-script-on-older-version-of-bash
-
-while IFS= read -r line; do
- mod_dirs+=("$line")
-done < <(find "${top_dir}" -type f -name 'go.mod' -exec dirname {} \; | sort)
-
-for mod_dir in "${mod_dirs[@]}"; do
- cd "${mod_dir}"
-
- while IFS= read -r line; do
- echo ".${line#${p}}"
- done < <(go list --find -f '{{.Name}}|{{.Dir}}' ./... | grep '^main|' | cut -f 2- -d '|')
- cd "${p}"
-done
diff --git a/vendor/go.opentelemetry.io/otel/handler.go b/vendor/go.opentelemetry.io/otel/handler.go
deleted file mode 100644
index 07623b679..000000000
--- a/vendor/go.opentelemetry.io/otel/handler.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package otel // import "go.opentelemetry.io/otel"
-
-import (
- "go.opentelemetry.io/otel/internal/global"
-)
-
-// Compile-time check global.ErrDelegator implements ErrorHandler.
-var _ ErrorHandler = (*global.ErrDelegator)(nil)
-
-// GetErrorHandler returns the global ErrorHandler instance.
-//
-// The default ErrorHandler instance returned will log all errors to STDERR
-// until an override ErrorHandler is set with SetErrorHandler. All
-// ErrorHandler returned prior to this will automatically forward errors to
-// the set instance instead of logging.
-//
-// Subsequent calls to SetErrorHandler after the first will not forward errors
-// to the new ErrorHandler for prior returned instances.
-func GetErrorHandler() ErrorHandler { return global.GetErrorHandler() }
-
-// SetErrorHandler sets the global ErrorHandler to h.
-//
-// The first time this is called all ErrorHandler previously returned from
-// GetErrorHandler will send errors to h instead of the default logging
-// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not
-// delegate errors to h.
-func SetErrorHandler(h ErrorHandler) { global.SetErrorHandler(h) }
-
-// Handle is a convenience function for GetErrorHandler().Handle(err).
-func Handle(err error) { global.GetErrorHandler().Handle(err) }
diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go
deleted file mode 100644
index 691d96c75..000000000
--- a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-/*
-Package attribute provide several helper functions for some commonly used
-logic of processing attributes.
-*/
-package attribute // import "go.opentelemetry.io/otel/internal/attribute"
-
-import (
- "reflect"
-)
-
-// BoolSliceValue converts a bool slice into an array with same elements as slice.
-func BoolSliceValue(v []bool) interface{} {
- var zero bool
- cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
- reflect.Copy(cp, reflect.ValueOf(v))
- return cp.Interface()
-}
-
-// Int64SliceValue converts an int64 slice into an array with same elements as slice.
-func Int64SliceValue(v []int64) interface{} {
- var zero int64
- cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
- reflect.Copy(cp, reflect.ValueOf(v))
- return cp.Interface()
-}
-
-// Float64SliceValue converts a float64 slice into an array with same elements as slice.
-func Float64SliceValue(v []float64) interface{} {
- var zero float64
- cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
- reflect.Copy(cp, reflect.ValueOf(v))
- return cp.Interface()
-}
-
-// StringSliceValue converts a string slice into an array with same elements as slice.
-func StringSliceValue(v []string) interface{} {
- var zero string
- cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
- reflect.Copy(cp, reflect.ValueOf(v))
- return cp.Interface()
-}
-
-// AsBoolSlice converts a bool array into a slice into with same elements as array.
-func AsBoolSlice(v interface{}) []bool {
- rv := reflect.ValueOf(v)
- if rv.Type().Kind() != reflect.Array {
- return nil
- }
- cpy := make([]bool, rv.Len())
- if len(cpy) > 0 {
- _ = reflect.Copy(reflect.ValueOf(cpy), rv)
- }
- return cpy
-}
-
-// AsInt64Slice converts an int64 array into a slice into with same elements as array.
-func AsInt64Slice(v interface{}) []int64 {
- rv := reflect.ValueOf(v)
- if rv.Type().Kind() != reflect.Array {
- return nil
- }
- cpy := make([]int64, rv.Len())
- if len(cpy) > 0 {
- _ = reflect.Copy(reflect.ValueOf(cpy), rv)
- }
- return cpy
-}
-
-// AsFloat64Slice converts a float64 array into a slice into with same elements as array.
-func AsFloat64Slice(v interface{}) []float64 {
- rv := reflect.ValueOf(v)
- if rv.Type().Kind() != reflect.Array {
- return nil
- }
- cpy := make([]float64, rv.Len())
- if len(cpy) > 0 {
- _ = reflect.Copy(reflect.ValueOf(cpy), rv)
- }
- return cpy
-}
-
-// AsStringSlice converts a string array into a slice into with same elements as array.
-func AsStringSlice(v interface{}) []string {
- rv := reflect.ValueOf(v)
- if rv.Type().Kind() != reflect.Array {
- return nil
- }
- cpy := make([]string, rv.Len())
- if len(cpy) > 0 {
- _ = reflect.Copy(reflect.ValueOf(cpy), rv)
- }
- return cpy
-}
diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go
deleted file mode 100644
index b4f85f44a..000000000
--- a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-/*
-Package baggage provides base types and functionality to store and retrieve
-baggage in Go context. This package exists because the OpenTracing bridge to
-OpenTelemetry needs to synchronize state whenever baggage for a context is
-modified and that context contains an OpenTracing span. If it were not for
-this need this package would not need to exist and the
-`go.opentelemetry.io/otel/baggage` package would be the singular place where
-W3C baggage is handled.
-*/
-package baggage // import "go.opentelemetry.io/otel/internal/baggage"
-
-// List is the collection of baggage members. The W3C allows for duplicates,
-// but OpenTelemetry does not, therefore, this is represented as a map.
-type List map[string]Item
-
-// Item is the value and metadata properties part of a list-member.
-type Item struct {
- Value string
- Properties []Property
-}
-
-// Property is a metadata entry for a list-member.
-type Property struct {
- Key, Value string
-
- // HasValue indicates if a zero-value value means the property does not
- // have a value or if it was the zero-value.
- HasValue bool
-}
diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go
deleted file mode 100644
index 3aea9c491..000000000
--- a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package baggage // import "go.opentelemetry.io/otel/internal/baggage"
-
-import "context"
-
-type baggageContextKeyType int
-
-const baggageKey baggageContextKeyType = iota
-
-// SetHookFunc is a callback called when storing baggage in the context.
-type SetHookFunc func(context.Context, List) context.Context
-
-// GetHookFunc is a callback called when getting baggage from the context.
-type GetHookFunc func(context.Context, List) List
-
-type baggageState struct {
- list List
-
- setHook SetHookFunc
- getHook GetHookFunc
-}
-
-// ContextWithSetHook returns a copy of parent with hook configured to be
-// invoked every time ContextWithBaggage is called.
-//
-// Passing nil SetHookFunc creates a context with no set hook to call.
-func ContextWithSetHook(parent context.Context, hook SetHookFunc) context.Context {
- var s baggageState
- if v, ok := parent.Value(baggageKey).(baggageState); ok {
- s = v
- }
-
- s.setHook = hook
- return context.WithValue(parent, baggageKey, s)
-}
-
-// ContextWithGetHook returns a copy of parent with hook configured to be
-// invoked every time FromContext is called.
-//
-// Passing nil GetHookFunc creates a context with no get hook to call.
-func ContextWithGetHook(parent context.Context, hook GetHookFunc) context.Context {
- var s baggageState
- if v, ok := parent.Value(baggageKey).(baggageState); ok {
- s = v
- }
-
- s.getHook = hook
- return context.WithValue(parent, baggageKey, s)
-}
-
-// ContextWithList returns a copy of parent with baggage. Passing nil list
-// returns a context without any baggage.
-func ContextWithList(parent context.Context, list List) context.Context {
- var s baggageState
- if v, ok := parent.Value(baggageKey).(baggageState); ok {
- s = v
- }
-
- s.list = list
- ctx := context.WithValue(parent, baggageKey, s)
- if s.setHook != nil {
- ctx = s.setHook(ctx, list)
- }
-
- return ctx
-}
-
-// ListFromContext returns the baggage contained in ctx.
-func ListFromContext(ctx context.Context) List {
- switch v := ctx.Value(baggageKey).(type) {
- case baggageState:
- if v.getHook != nil {
- return v.getHook(ctx, v.list)
- }
- return v.list
- default:
- return nil
- }
-}
diff --git a/vendor/go.opentelemetry.io/otel/internal/gen.go b/vendor/go.opentelemetry.io/otel/internal/gen.go
deleted file mode 100644
index 4259f0320..000000000
--- a/vendor/go.opentelemetry.io/otel/internal/gen.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package internal // import "go.opentelemetry.io/otel/internal"
-
-//go:generate gotmpl --body=./shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go
-//go:generate gotmpl --body=./shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go
-//go:generate gotmpl --body=./shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go
-
-//go:generate gotmpl --body=./shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go
-//go:generate gotmpl --body=./shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go
-//go:generate gotmpl --body=./shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go
-//go:generate gotmpl --body=./shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go
-//go:generate gotmpl --body=./shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/internal/matchers\"}" --out=internaltest/harness.go
-//go:generate gotmpl --body=./shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go
-//go:generate gotmpl --body=./shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go
-//go:generate gotmpl --body=./shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go
-//go:generate gotmpl --body=./shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/handler.go b/vendor/go.opentelemetry.io/otel/internal/global/handler.go
deleted file mode 100644
index c657ff8e7..000000000
--- a/vendor/go.opentelemetry.io/otel/internal/global/handler.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package global // import "go.opentelemetry.io/otel/internal/global"
-
-import (
- "log"
- "sync/atomic"
-)
-
-// ErrorHandler handles irremediable events.
-type ErrorHandler interface {
- // Handle handles any error deemed irremediable by an OpenTelemetry
- // component.
- Handle(error)
-}
-
-type ErrDelegator struct {
- delegate atomic.Pointer[ErrorHandler]
-}
-
-// Compile-time check that delegator implements ErrorHandler.
-var _ ErrorHandler = (*ErrDelegator)(nil)
-
-func (d *ErrDelegator) Handle(err error) {
- if eh := d.delegate.Load(); eh != nil {
- (*eh).Handle(err)
- return
- }
- log.Print(err)
-}
-
-// setDelegate sets the ErrorHandler delegate.
-func (d *ErrDelegator) setDelegate(eh ErrorHandler) {
- d.delegate.Store(&eh)
-}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go
deleted file mode 100644
index ae92a4251..000000000
--- a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go
+++ /dev/null
@@ -1,412 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package global // import "go.opentelemetry.io/otel/internal/global"
-
-import (
- "context"
- "sync/atomic"
-
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/metric/embedded"
-)
-
-// unwrapper unwraps to return the underlying instrument implementation.
-type unwrapper interface {
- unwrap() metric.Observable
-}
-
-type afCounter struct {
- embedded.Float64ObservableCounter
- metric.Float64Observable
-
- name string
- opts []metric.Float64ObservableCounterOption
-
- delegate atomic.Value // metric.Float64ObservableCounter
-}
-
-var (
- _ unwrapper = (*afCounter)(nil)
- _ metric.Float64ObservableCounter = (*afCounter)(nil)
-)
-
-func (i *afCounter) setDelegate(m metric.Meter) {
- ctr, err := m.Float64ObservableCounter(i.name, i.opts...)
- if err != nil {
- GetErrorHandler().Handle(err)
- return
- }
- i.delegate.Store(ctr)
-}
-
-func (i *afCounter) unwrap() metric.Observable {
- if ctr := i.delegate.Load(); ctr != nil {
- return ctr.(metric.Float64ObservableCounter)
- }
- return nil
-}
-
-type afUpDownCounter struct {
- embedded.Float64ObservableUpDownCounter
- metric.Float64Observable
-
- name string
- opts []metric.Float64ObservableUpDownCounterOption
-
- delegate atomic.Value // metric.Float64ObservableUpDownCounter
-}
-
-var (
- _ unwrapper = (*afUpDownCounter)(nil)
- _ metric.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil)
-)
-
-func (i *afUpDownCounter) setDelegate(m metric.Meter) {
- ctr, err := m.Float64ObservableUpDownCounter(i.name, i.opts...)
- if err != nil {
- GetErrorHandler().Handle(err)
- return
- }
- i.delegate.Store(ctr)
-}
-
-func (i *afUpDownCounter) unwrap() metric.Observable {
- if ctr := i.delegate.Load(); ctr != nil {
- return ctr.(metric.Float64ObservableUpDownCounter)
- }
- return nil
-}
-
-type afGauge struct {
- embedded.Float64ObservableGauge
- metric.Float64Observable
-
- name string
- opts []metric.Float64ObservableGaugeOption
-
- delegate atomic.Value // metric.Float64ObservableGauge
-}
-
-var (
- _ unwrapper = (*afGauge)(nil)
- _ metric.Float64ObservableGauge = (*afGauge)(nil)
-)
-
-func (i *afGauge) setDelegate(m metric.Meter) {
- ctr, err := m.Float64ObservableGauge(i.name, i.opts...)
- if err != nil {
- GetErrorHandler().Handle(err)
- return
- }
- i.delegate.Store(ctr)
-}
-
-func (i *afGauge) unwrap() metric.Observable {
- if ctr := i.delegate.Load(); ctr != nil {
- return ctr.(metric.Float64ObservableGauge)
- }
- return nil
-}
-
-type aiCounter struct {
- embedded.Int64ObservableCounter
- metric.Int64Observable
-
- name string
- opts []metric.Int64ObservableCounterOption
-
- delegate atomic.Value // metric.Int64ObservableCounter
-}
-
-var (
- _ unwrapper = (*aiCounter)(nil)
- _ metric.Int64ObservableCounter = (*aiCounter)(nil)
-)
-
-func (i *aiCounter) setDelegate(m metric.Meter) {
- ctr, err := m.Int64ObservableCounter(i.name, i.opts...)
- if err != nil {
- GetErrorHandler().Handle(err)
- return
- }
- i.delegate.Store(ctr)
-}
-
-func (i *aiCounter) unwrap() metric.Observable {
- if ctr := i.delegate.Load(); ctr != nil {
- return ctr.(metric.Int64ObservableCounter)
- }
- return nil
-}
-
-type aiUpDownCounter struct {
- embedded.Int64ObservableUpDownCounter
- metric.Int64Observable
-
- name string
- opts []metric.Int64ObservableUpDownCounterOption
-
- delegate atomic.Value // metric.Int64ObservableUpDownCounter
-}
-
-var (
- _ unwrapper = (*aiUpDownCounter)(nil)
- _ metric.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil)
-)
-
-func (i *aiUpDownCounter) setDelegate(m metric.Meter) {
- ctr, err := m.Int64ObservableUpDownCounter(i.name, i.opts...)
- if err != nil {
- GetErrorHandler().Handle(err)
- return
- }
- i.delegate.Store(ctr)
-}
-
-func (i *aiUpDownCounter) unwrap() metric.Observable {
- if ctr := i.delegate.Load(); ctr != nil {
- return ctr.(metric.Int64ObservableUpDownCounter)
- }
- return nil
-}
-
-type aiGauge struct {
- embedded.Int64ObservableGauge
- metric.Int64Observable
-
- name string
- opts []metric.Int64ObservableGaugeOption
-
- delegate atomic.Value // metric.Int64ObservableGauge
-}
-
-var (
- _ unwrapper = (*aiGauge)(nil)
- _ metric.Int64ObservableGauge = (*aiGauge)(nil)
-)
-
-func (i *aiGauge) setDelegate(m metric.Meter) {
- ctr, err := m.Int64ObservableGauge(i.name, i.opts...)
- if err != nil {
- GetErrorHandler().Handle(err)
- return
- }
- i.delegate.Store(ctr)
-}
-
-func (i *aiGauge) unwrap() metric.Observable {
- if ctr := i.delegate.Load(); ctr != nil {
- return ctr.(metric.Int64ObservableGauge)
- }
- return nil
-}
-
-// Sync Instruments.
-type sfCounter struct {
- embedded.Float64Counter
-
- name string
- opts []metric.Float64CounterOption
-
- delegate atomic.Value // metric.Float64Counter
-}
-
-var _ metric.Float64Counter = (*sfCounter)(nil)
-
-func (i *sfCounter) setDelegate(m metric.Meter) {
- ctr, err := m.Float64Counter(i.name, i.opts...)
- if err != nil {
- GetErrorHandler().Handle(err)
- return
- }
- i.delegate.Store(ctr)
-}
-
-func (i *sfCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) {
- if ctr := i.delegate.Load(); ctr != nil {
- ctr.(metric.Float64Counter).Add(ctx, incr, opts...)
- }
-}
-
-type sfUpDownCounter struct {
- embedded.Float64UpDownCounter
-
- name string
- opts []metric.Float64UpDownCounterOption
-
- delegate atomic.Value // metric.Float64UpDownCounter
-}
-
-var _ metric.Float64UpDownCounter = (*sfUpDownCounter)(nil)
-
-func (i *sfUpDownCounter) setDelegate(m metric.Meter) {
- ctr, err := m.Float64UpDownCounter(i.name, i.opts...)
- if err != nil {
- GetErrorHandler().Handle(err)
- return
- }
- i.delegate.Store(ctr)
-}
-
-func (i *sfUpDownCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) {
- if ctr := i.delegate.Load(); ctr != nil {
- ctr.(metric.Float64UpDownCounter).Add(ctx, incr, opts...)
- }
-}
-
-type sfHistogram struct {
- embedded.Float64Histogram
-
- name string
- opts []metric.Float64HistogramOption
-
- delegate atomic.Value // metric.Float64Histogram
-}
-
-var _ metric.Float64Histogram = (*sfHistogram)(nil)
-
-func (i *sfHistogram) setDelegate(m metric.Meter) {
- ctr, err := m.Float64Histogram(i.name, i.opts...)
- if err != nil {
- GetErrorHandler().Handle(err)
- return
- }
- i.delegate.Store(ctr)
-}
-
-func (i *sfHistogram) Record(ctx context.Context, x float64, opts ...metric.RecordOption) {
- if ctr := i.delegate.Load(); ctr != nil {
- ctr.(metric.Float64Histogram).Record(ctx, x, opts...)
- }
-}
-
-type sfGauge struct {
- embedded.Float64Gauge
-
- name string
- opts []metric.Float64GaugeOption
-
- delegate atomic.Value // metric.Float64Gauge
-}
-
-var _ metric.Float64Gauge = (*sfGauge)(nil)
-
-func (i *sfGauge) setDelegate(m metric.Meter) {
- ctr, err := m.Float64Gauge(i.name, i.opts...)
- if err != nil {
- GetErrorHandler().Handle(err)
- return
- }
- i.delegate.Store(ctr)
-}
-
-func (i *sfGauge) Record(ctx context.Context, x float64, opts ...metric.RecordOption) {
- if ctr := i.delegate.Load(); ctr != nil {
- ctr.(metric.Float64Gauge).Record(ctx, x, opts...)
- }
-}
-
-type siCounter struct {
- embedded.Int64Counter
-
- name string
- opts []metric.Int64CounterOption
-
- delegate atomic.Value // metric.Int64Counter
-}
-
-var _ metric.Int64Counter = (*siCounter)(nil)
-
-func (i *siCounter) setDelegate(m metric.Meter) {
- ctr, err := m.Int64Counter(i.name, i.opts...)
- if err != nil {
- GetErrorHandler().Handle(err)
- return
- }
- i.delegate.Store(ctr)
-}
-
-func (i *siCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) {
- if ctr := i.delegate.Load(); ctr != nil {
- ctr.(metric.Int64Counter).Add(ctx, x, opts...)
- }
-}
-
-type siUpDownCounter struct {
- embedded.Int64UpDownCounter
-
- name string
- opts []metric.Int64UpDownCounterOption
-
- delegate atomic.Value // metric.Int64UpDownCounter
-}
-
-var _ metric.Int64UpDownCounter = (*siUpDownCounter)(nil)
-
-func (i *siUpDownCounter) setDelegate(m metric.Meter) {
- ctr, err := m.Int64UpDownCounter(i.name, i.opts...)
- if err != nil {
- GetErrorHandler().Handle(err)
- return
- }
- i.delegate.Store(ctr)
-}
-
-func (i *siUpDownCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) {
- if ctr := i.delegate.Load(); ctr != nil {
- ctr.(metric.Int64UpDownCounter).Add(ctx, x, opts...)
- }
-}
-
-type siHistogram struct {
- embedded.Int64Histogram
-
- name string
- opts []metric.Int64HistogramOption
-
- delegate atomic.Value // metric.Int64Histogram
-}
-
-var _ metric.Int64Histogram = (*siHistogram)(nil)
-
-func (i *siHistogram) setDelegate(m metric.Meter) {
- ctr, err := m.Int64Histogram(i.name, i.opts...)
- if err != nil {
- GetErrorHandler().Handle(err)
- return
- }
- i.delegate.Store(ctr)
-}
-
-func (i *siHistogram) Record(ctx context.Context, x int64, opts ...metric.RecordOption) {
- if ctr := i.delegate.Load(); ctr != nil {
- ctr.(metric.Int64Histogram).Record(ctx, x, opts...)
- }
-}
-
-type siGauge struct {
- embedded.Int64Gauge
-
- name string
- opts []metric.Int64GaugeOption
-
- delegate atomic.Value // metric.Int64Gauge
-}
-
-var _ metric.Int64Gauge = (*siGauge)(nil)
-
-func (i *siGauge) setDelegate(m metric.Meter) {
- ctr, err := m.Int64Gauge(i.name, i.opts...)
- if err != nil {
- GetErrorHandler().Handle(err)
- return
- }
- i.delegate.Store(ctr)
-}
-
-func (i *siGauge) Record(ctx context.Context, x int64, opts ...metric.RecordOption) {
- if ctr := i.delegate.Load(); ctr != nil {
- ctr.(metric.Int64Gauge).Record(ctx, x, opts...)
- }
-}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
deleted file mode 100644
index adbca7d34..000000000
--- a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package global // import "go.opentelemetry.io/otel/internal/global"
-
-import (
- "log"
- "os"
- "sync/atomic"
-
- "github.com/go-logr/logr"
- "github.com/go-logr/stdr"
-)
-
-// globalLogger holds a reference to the [logr.Logger] used within
-// go.opentelemetry.io/otel.
-//
-// The default logger uses stdr which is backed by the standard `log.Logger`
-// interface. This logger will only show messages at the Error Level.
-var globalLogger = func() *atomic.Pointer[logr.Logger] {
- l := stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))
-
- p := new(atomic.Pointer[logr.Logger])
- p.Store(&l)
- return p
-}()
-
-// SetLogger sets the global Logger to l.
-//
-// To see Warn messages use a logger with `l.V(1).Enabled() == true`
-// To see Info messages use a logger with `l.V(4).Enabled() == true`
-// To see Debug messages use a logger with `l.V(8).Enabled() == true`.
-func SetLogger(l logr.Logger) {
- globalLogger.Store(&l)
-}
-
-// GetLogger returns the global logger.
-func GetLogger() logr.Logger {
- return *globalLogger.Load()
-}
-
-// Info prints messages about the general state of the API or SDK.
-// This should usually be less than 5 messages a minute.
-func Info(msg string, keysAndValues ...interface{}) {
- GetLogger().V(4).Info(msg, keysAndValues...)
-}
-
-// Error prints messages about exceptional states of the API or SDK.
-func Error(err error, msg string, keysAndValues ...interface{}) {
- GetLogger().Error(err, msg, keysAndValues...)
-}
-
-// Debug prints messages about all internal changes in the API or SDK.
-func Debug(msg string, keysAndValues ...interface{}) {
- GetLogger().V(8).Info(msg, keysAndValues...)
-}
-
-// Warn prints messages about warnings in the API or SDK.
-// Not an error but is likely more important than an informational event.
-func Warn(msg string, keysAndValues ...interface{}) {
- GetLogger().V(1).Info(msg, keysAndValues...)
-}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go
deleted file mode 100644
index a6acd8dca..000000000
--- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go
+++ /dev/null
@@ -1,598 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package global // import "go.opentelemetry.io/otel/internal/global"
-
-import (
- "container/list"
- "context"
- "reflect"
- "sync"
-
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/metric/embedded"
-)
-
-// meterProvider is a placeholder for a configured SDK MeterProvider.
-//
-// All MeterProvider functionality is forwarded to a delegate once
-// configured.
-type meterProvider struct {
- embedded.MeterProvider
-
- mtx sync.Mutex
- meters map[il]*meter
-
- delegate metric.MeterProvider
-}
-
-// setDelegate configures p to delegate all MeterProvider functionality to
-// provider.
-//
-// All Meters provided prior to this function call are switched out to be
-// Meters provided by provider. All instruments and callbacks are recreated and
-// delegated.
-//
-// It is guaranteed by the caller that this happens only once.
-func (p *meterProvider) setDelegate(provider metric.MeterProvider) {
- p.mtx.Lock()
- defer p.mtx.Unlock()
-
- p.delegate = provider
-
- if len(p.meters) == 0 {
- return
- }
-
- for _, meter := range p.meters {
- meter.setDelegate(provider)
- }
-
- p.meters = nil
-}
-
-// Meter implements MeterProvider.
-func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Meter {
- p.mtx.Lock()
- defer p.mtx.Unlock()
-
- if p.delegate != nil {
- return p.delegate.Meter(name, opts...)
- }
-
- // At this moment it is guaranteed that no sdk is installed, save the meter in the meters map.
-
- c := metric.NewMeterConfig(opts...)
- key := il{
- name: name,
- version: c.InstrumentationVersion(),
- schema: c.SchemaURL(),
- attrs: c.InstrumentationAttributes(),
- }
-
- if p.meters == nil {
- p.meters = make(map[il]*meter)
- }
-
- if val, ok := p.meters[key]; ok {
- return val
- }
-
- t := &meter{name: name, opts: opts, instruments: make(map[instID]delegatedInstrument)}
- p.meters[key] = t
- return t
-}
-
-// meter is a placeholder for a metric.Meter.
-//
-// All Meter functionality is forwarded to a delegate once configured.
-// Otherwise, all functionality is forwarded to a NoopMeter.
-type meter struct {
- embedded.Meter
-
- name string
- opts []metric.MeterOption
-
- mtx sync.Mutex
- instruments map[instID]delegatedInstrument
-
- registry list.List
-
- delegate metric.Meter
-}
-
-type delegatedInstrument interface {
- setDelegate(metric.Meter)
-}
-
-// instID are the identifying properties of a instrument.
-type instID struct {
- // name is the name of the stream.
- name string
- // description is the description of the stream.
- description string
- // kind defines the functional group of the instrument.
- kind reflect.Type
- // unit is the unit of the stream.
- unit string
-}
-
-// setDelegate configures m to delegate all Meter functionality to Meters
-// created by provider.
-//
-// All subsequent calls to the Meter methods will be passed to the delegate.
-//
-// It is guaranteed by the caller that this happens only once.
-func (m *meter) setDelegate(provider metric.MeterProvider) {
- m.mtx.Lock()
- defer m.mtx.Unlock()
-
- meter := provider.Meter(m.name, m.opts...)
- m.delegate = meter
-
- for _, inst := range m.instruments {
- inst.setDelegate(meter)
- }
-
- var n *list.Element
- for e := m.registry.Front(); e != nil; e = n {
- r := e.Value.(*registration)
- r.setDelegate(meter)
- n = e.Next()
- m.registry.Remove(e)
- }
-
- m.instruments = nil
- m.registry.Init()
-}
-
-func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) {
- m.mtx.Lock()
- defer m.mtx.Unlock()
-
- if m.delegate != nil {
- return m.delegate.Int64Counter(name, options...)
- }
-
- cfg := metric.NewInt64CounterConfig(options...)
- id := instID{
- name: name,
- kind: reflect.TypeOf((*siCounter)(nil)),
- description: cfg.Description(),
- unit: cfg.Unit(),
- }
- if f, ok := m.instruments[id]; ok {
- return f.(metric.Int64Counter), nil
- }
- i := &siCounter{name: name, opts: options}
- m.instruments[id] = i
- return i, nil
-}
-
-func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) {
- m.mtx.Lock()
- defer m.mtx.Unlock()
-
- if m.delegate != nil {
- return m.delegate.Int64UpDownCounter(name, options...)
- }
-
- cfg := metric.NewInt64UpDownCounterConfig(options...)
- id := instID{
- name: name,
- kind: reflect.TypeOf((*siUpDownCounter)(nil)),
- description: cfg.Description(),
- unit: cfg.Unit(),
- }
- if f, ok := m.instruments[id]; ok {
- return f.(metric.Int64UpDownCounter), nil
- }
- i := &siUpDownCounter{name: name, opts: options}
- m.instruments[id] = i
- return i, nil
-}
-
-func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) {
- m.mtx.Lock()
- defer m.mtx.Unlock()
-
- if m.delegate != nil {
- return m.delegate.Int64Histogram(name, options...)
- }
-
- cfg := metric.NewInt64HistogramConfig(options...)
- id := instID{
- name: name,
- kind: reflect.TypeOf((*siHistogram)(nil)),
- description: cfg.Description(),
- unit: cfg.Unit(),
- }
- if f, ok := m.instruments[id]; ok {
- return f.(metric.Int64Histogram), nil
- }
- i := &siHistogram{name: name, opts: options}
- m.instruments[id] = i
- return i, nil
-}
-
-func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) {
- m.mtx.Lock()
- defer m.mtx.Unlock()
-
- if m.delegate != nil {
- return m.delegate.Int64Gauge(name, options...)
- }
-
- cfg := metric.NewInt64GaugeConfig(options...)
- id := instID{
- name: name,
- kind: reflect.TypeOf((*siGauge)(nil)),
- description: cfg.Description(),
- unit: cfg.Unit(),
- }
- if f, ok := m.instruments[id]; ok {
- return f.(metric.Int64Gauge), nil
- }
- i := &siGauge{name: name, opts: options}
- m.instruments[id] = i
- return i, nil
-}
-
-func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) {
- m.mtx.Lock()
- defer m.mtx.Unlock()
-
- if m.delegate != nil {
- return m.delegate.Int64ObservableCounter(name, options...)
- }
-
- cfg := metric.NewInt64ObservableCounterConfig(options...)
- id := instID{
- name: name,
- kind: reflect.TypeOf((*aiCounter)(nil)),
- description: cfg.Description(),
- unit: cfg.Unit(),
- }
- if f, ok := m.instruments[id]; ok {
- return f.(metric.Int64ObservableCounter), nil
- }
- i := &aiCounter{name: name, opts: options}
- m.instruments[id] = i
- return i, nil
-}
-
-func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) {
- m.mtx.Lock()
- defer m.mtx.Unlock()
-
- if m.delegate != nil {
- return m.delegate.Int64ObservableUpDownCounter(name, options...)
- }
-
- cfg := metric.NewInt64ObservableUpDownCounterConfig(options...)
- id := instID{
- name: name,
- kind: reflect.TypeOf((*aiUpDownCounter)(nil)),
- description: cfg.Description(),
- unit: cfg.Unit(),
- }
- if f, ok := m.instruments[id]; ok {
- return f.(metric.Int64ObservableUpDownCounter), nil
- }
- i := &aiUpDownCounter{name: name, opts: options}
- m.instruments[id] = i
- return i, nil
-}
-
-func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) {
- m.mtx.Lock()
- defer m.mtx.Unlock()
-
- if m.delegate != nil {
- return m.delegate.Int64ObservableGauge(name, options...)
- }
-
- cfg := metric.NewInt64ObservableGaugeConfig(options...)
- id := instID{
- name: name,
- kind: reflect.TypeOf((*aiGauge)(nil)),
- description: cfg.Description(),
- unit: cfg.Unit(),
- }
- if f, ok := m.instruments[id]; ok {
- return f.(metric.Int64ObservableGauge), nil
- }
- i := &aiGauge{name: name, opts: options}
- m.instruments[id] = i
- return i, nil
-}
-
-func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) {
- m.mtx.Lock()
- defer m.mtx.Unlock()
-
- if m.delegate != nil {
- return m.delegate.Float64Counter(name, options...)
- }
-
- cfg := metric.NewFloat64CounterConfig(options...)
- id := instID{
- name: name,
- kind: reflect.TypeOf((*sfCounter)(nil)),
- description: cfg.Description(),
- unit: cfg.Unit(),
- }
- if f, ok := m.instruments[id]; ok {
- return f.(metric.Float64Counter), nil
- }
- i := &sfCounter{name: name, opts: options}
- m.instruments[id] = i
- return i, nil
-}
-
-func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) {
- m.mtx.Lock()
- defer m.mtx.Unlock()
-
- if m.delegate != nil {
- return m.delegate.Float64UpDownCounter(name, options...)
- }
-
- cfg := metric.NewFloat64UpDownCounterConfig(options...)
- id := instID{
- name: name,
- kind: reflect.TypeOf((*sfUpDownCounter)(nil)),
- description: cfg.Description(),
- unit: cfg.Unit(),
- }
- if f, ok := m.instruments[id]; ok {
- return f.(metric.Float64UpDownCounter), nil
- }
- i := &sfUpDownCounter{name: name, opts: options}
- m.instruments[id] = i
- return i, nil
-}
-
-func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) {
- m.mtx.Lock()
- defer m.mtx.Unlock()
-
- if m.delegate != nil {
- return m.delegate.Float64Histogram(name, options...)
- }
-
- cfg := metric.NewFloat64HistogramConfig(options...)
- id := instID{
- name: name,
- kind: reflect.TypeOf((*sfHistogram)(nil)),
- description: cfg.Description(),
- unit: cfg.Unit(),
- }
- if f, ok := m.instruments[id]; ok {
- return f.(metric.Float64Histogram), nil
- }
- i := &sfHistogram{name: name, opts: options}
- m.instruments[id] = i
- return i, nil
-}
-
-func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) {
- m.mtx.Lock()
- defer m.mtx.Unlock()
-
- if m.delegate != nil {
- return m.delegate.Float64Gauge(name, options...)
- }
-
- cfg := metric.NewFloat64GaugeConfig(options...)
- id := instID{
- name: name,
- kind: reflect.TypeOf((*sfGauge)(nil)),
- description: cfg.Description(),
- unit: cfg.Unit(),
- }
- if f, ok := m.instruments[id]; ok {
- return f.(metric.Float64Gauge), nil
- }
- i := &sfGauge{name: name, opts: options}
- m.instruments[id] = i
- return i, nil
-}
-
-func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) {
- m.mtx.Lock()
- defer m.mtx.Unlock()
-
- if m.delegate != nil {
- return m.delegate.Float64ObservableCounter(name, options...)
- }
-
- cfg := metric.NewFloat64ObservableCounterConfig(options...)
- id := instID{
- name: name,
- kind: reflect.TypeOf((*afCounter)(nil)),
- description: cfg.Description(),
- unit: cfg.Unit(),
- }
- if f, ok := m.instruments[id]; ok {
- return f.(metric.Float64ObservableCounter), nil
- }
- i := &afCounter{name: name, opts: options}
- m.instruments[id] = i
- return i, nil
-}
-
-func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) {
- m.mtx.Lock()
- defer m.mtx.Unlock()
-
- if m.delegate != nil {
- return m.delegate.Float64ObservableUpDownCounter(name, options...)
- }
-
- cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...)
- id := instID{
- name: name,
- kind: reflect.TypeOf((*afUpDownCounter)(nil)),
- description: cfg.Description(),
- unit: cfg.Unit(),
- }
- if f, ok := m.instruments[id]; ok {
- return f.(metric.Float64ObservableUpDownCounter), nil
- }
- i := &afUpDownCounter{name: name, opts: options}
- m.instruments[id] = i
- return i, nil
-}
-
-func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) {
- m.mtx.Lock()
- defer m.mtx.Unlock()
-
- if m.delegate != nil {
- return m.delegate.Float64ObservableGauge(name, options...)
- }
-
- cfg := metric.NewFloat64ObservableGaugeConfig(options...)
- id := instID{
- name: name,
- kind: reflect.TypeOf((*afGauge)(nil)),
- description: cfg.Description(),
- unit: cfg.Unit(),
- }
- if f, ok := m.instruments[id]; ok {
- return f.(metric.Float64ObservableGauge), nil
- }
- i := &afGauge{name: name, opts: options}
- m.instruments[id] = i
- return i, nil
-}
-
-// RegisterCallback captures the function that will be called during Collect.
-func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) {
- m.mtx.Lock()
- defer m.mtx.Unlock()
-
- if m.delegate != nil {
- return m.delegate.RegisterCallback(unwrapCallback(f), unwrapInstruments(insts)...)
- }
-
- reg := &registration{instruments: insts, function: f}
- e := m.registry.PushBack(reg)
- reg.unreg = func() error {
- m.mtx.Lock()
- _ = m.registry.Remove(e)
- m.mtx.Unlock()
- return nil
- }
- return reg, nil
-}
-
-func unwrapInstruments(instruments []metric.Observable) []metric.Observable {
- out := make([]metric.Observable, 0, len(instruments))
-
- for _, inst := range instruments {
- if in, ok := inst.(unwrapper); ok {
- out = append(out, in.unwrap())
- } else {
- out = append(out, inst)
- }
- }
-
- return out
-}
-
-type registration struct {
- embedded.Registration
-
- instruments []metric.Observable
- function metric.Callback
-
- unreg func() error
- unregMu sync.Mutex
-}
-
-type unwrapObs struct {
- embedded.Observer
- obs metric.Observer
-}
-
-// unwrapFloat64Observable returns an expected metric.Float64Observable after
-// unwrapping the global object.
-func unwrapFloat64Observable(inst metric.Float64Observable) metric.Float64Observable {
- if unwrapped, ok := inst.(unwrapper); ok {
- if floatObs, ok := unwrapped.unwrap().(metric.Float64Observable); ok {
- // Note: if the unwrapped object does not
- // unwrap as an observable for either of the
- // predicates here, it means an internal bug in
- // this package. We avoid logging an error in
- // this case, because the SDK has to try its
- // own type conversion on the object. The SDK
- // will see this and be forced to respond with
- // its own error.
- //
- // This code uses a double-nested if statement
- // to avoid creating a branch that is
- // impossible to cover.
- inst = floatObs
- }
- }
- return inst
-}
-
-// unwrapInt64Observable returns an expected metric.Int64Observable after
-// unwrapping the global object.
-func unwrapInt64Observable(inst metric.Int64Observable) metric.Int64Observable {
- if unwrapped, ok := inst.(unwrapper); ok {
- if unint, ok := unwrapped.unwrap().(metric.Int64Observable); ok {
- // See the comment in unwrapFloat64Observable().
- inst = unint
- }
- }
- return inst
-}
-
-func (uo *unwrapObs) ObserveFloat64(inst metric.Float64Observable, value float64, opts ...metric.ObserveOption) {
- uo.obs.ObserveFloat64(unwrapFloat64Observable(inst), value, opts...)
-}
-
-func (uo *unwrapObs) ObserveInt64(inst metric.Int64Observable, value int64, opts ...metric.ObserveOption) {
- uo.obs.ObserveInt64(unwrapInt64Observable(inst), value, opts...)
-}
-
-func unwrapCallback(f metric.Callback) metric.Callback {
- return func(ctx context.Context, obs metric.Observer) error {
- return f(ctx, &unwrapObs{obs: obs})
- }
-}
-
-func (c *registration) setDelegate(m metric.Meter) {
- c.unregMu.Lock()
- defer c.unregMu.Unlock()
-
- if c.unreg == nil {
- // Unregister already called.
- return
- }
-
- reg, err := m.RegisterCallback(unwrapCallback(c.function), unwrapInstruments(c.instruments)...)
- if err != nil {
- GetErrorHandler().Handle(err)
- return
- }
-
- c.unreg = reg.Unregister
-}
-
-func (c *registration) Unregister() error {
- c.unregMu.Lock()
- defer c.unregMu.Unlock()
- if c.unreg == nil {
- // Unregister already called.
- return nil
- }
-
- var err error
- err, c.unreg = c.unreg(), nil
- return err
-}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go
deleted file mode 100644
index 38560ff99..000000000
--- a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package global // import "go.opentelemetry.io/otel/internal/global"
-
-import (
- "context"
- "sync"
-
- "go.opentelemetry.io/otel/propagation"
-)
-
-// textMapPropagator is a default TextMapPropagator that delegates calls to a
-// registered delegate if one is set, otherwise it defaults to delegating the
-// calls to a the default no-op propagation.TextMapPropagator.
-type textMapPropagator struct {
- mtx sync.Mutex
- once sync.Once
- delegate propagation.TextMapPropagator
- noop propagation.TextMapPropagator
-}
-
-// Compile-time guarantee that textMapPropagator implements the
-// propagation.TextMapPropagator interface.
-var _ propagation.TextMapPropagator = (*textMapPropagator)(nil)
-
-func newTextMapPropagator() *textMapPropagator {
- return &textMapPropagator{
- noop: propagation.NewCompositeTextMapPropagator(),
- }
-}
-
-// SetDelegate sets a delegate propagation.TextMapPropagator that all calls are
-// forwarded to. Delegation can only be performed once, all subsequent calls
-// perform no delegation.
-func (p *textMapPropagator) SetDelegate(delegate propagation.TextMapPropagator) {
- if delegate == nil {
- return
- }
-
- p.mtx.Lock()
- p.once.Do(func() { p.delegate = delegate })
- p.mtx.Unlock()
-}
-
-// effectiveDelegate returns the current delegate of p if one is set,
-// otherwise the default noop TextMapPropagator is returned. This method
-// can be called concurrently.
-func (p *textMapPropagator) effectiveDelegate() propagation.TextMapPropagator {
- p.mtx.Lock()
- defer p.mtx.Unlock()
- if p.delegate != nil {
- return p.delegate
- }
- return p.noop
-}
-
-// Inject set cross-cutting concerns from the Context into the carrier.
-func (p *textMapPropagator) Inject(ctx context.Context, carrier propagation.TextMapCarrier) {
- p.effectiveDelegate().Inject(ctx, carrier)
-}
-
-// Extract reads cross-cutting concerns from the carrier into a Context.
-func (p *textMapPropagator) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context {
- return p.effectiveDelegate().Extract(ctx, carrier)
-}
-
-// Fields returns the keys whose values are set with Inject.
-func (p *textMapPropagator) Fields() []string {
- return p.effectiveDelegate().Fields()
-}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/state.go b/vendor/go.opentelemetry.io/otel/internal/global/state.go
deleted file mode 100644
index 204ea142a..000000000
--- a/vendor/go.opentelemetry.io/otel/internal/global/state.go
+++ /dev/null
@@ -1,199 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package global // import "go.opentelemetry.io/otel/internal/global"
-
-import (
- "errors"
- "sync"
- "sync/atomic"
-
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/propagation"
- "go.opentelemetry.io/otel/trace"
-)
-
-type (
- errorHandlerHolder struct {
- eh ErrorHandler
- }
-
- tracerProviderHolder struct {
- tp trace.TracerProvider
- }
-
- propagatorsHolder struct {
- tm propagation.TextMapPropagator
- }
-
- meterProviderHolder struct {
- mp metric.MeterProvider
- }
-)
-
-var (
- globalErrorHandler = defaultErrorHandler()
- globalTracer = defaultTracerValue()
- globalPropagators = defaultPropagatorsValue()
- globalMeterProvider = defaultMeterProvider()
-
- delegateErrorHandlerOnce sync.Once
- delegateTraceOnce sync.Once
- delegateTextMapPropagatorOnce sync.Once
- delegateMeterOnce sync.Once
-)
-
-// GetErrorHandler returns the global ErrorHandler instance.
-//
-// The default ErrorHandler instance returned will log all errors to STDERR
-// until an override ErrorHandler is set with SetErrorHandler. All
-// ErrorHandler returned prior to this will automatically forward errors to
-// the set instance instead of logging.
-//
-// Subsequent calls to SetErrorHandler after the first will not forward errors
-// to the new ErrorHandler for prior returned instances.
-func GetErrorHandler() ErrorHandler {
- return globalErrorHandler.Load().(errorHandlerHolder).eh
-}
-
-// SetErrorHandler sets the global ErrorHandler to h.
-//
-// The first time this is called all ErrorHandler previously returned from
-// GetErrorHandler will send errors to h instead of the default logging
-// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not
-// delegate errors to h.
-func SetErrorHandler(h ErrorHandler) {
- current := GetErrorHandler()
-
- if _, cOk := current.(*ErrDelegator); cOk {
- if _, ehOk := h.(*ErrDelegator); ehOk && current == h {
- // Do not assign to the delegate of the default ErrDelegator to be
- // itself.
- Error(
- errors.New("no ErrorHandler delegate configured"),
- "ErrorHandler remains its current value.",
- )
- return
- }
- }
-
- delegateErrorHandlerOnce.Do(func() {
- if def, ok := current.(*ErrDelegator); ok {
- def.setDelegate(h)
- }
- })
- globalErrorHandler.Store(errorHandlerHolder{eh: h})
-}
-
-// TracerProvider is the internal implementation for global.TracerProvider.
-func TracerProvider() trace.TracerProvider {
- return globalTracer.Load().(tracerProviderHolder).tp
-}
-
-// SetTracerProvider is the internal implementation for global.SetTracerProvider.
-func SetTracerProvider(tp trace.TracerProvider) {
- current := TracerProvider()
-
- if _, cOk := current.(*tracerProvider); cOk {
- if _, tpOk := tp.(*tracerProvider); tpOk && current == tp {
- // Do not assign the default delegating TracerProvider to delegate
- // to itself.
- Error(
- errors.New("no delegate configured in tracer provider"),
- "Setting tracer provider to its current value. No delegate will be configured",
- )
- return
- }
- }
-
- delegateTraceOnce.Do(func() {
- if def, ok := current.(*tracerProvider); ok {
- def.setDelegate(tp)
- }
- })
- globalTracer.Store(tracerProviderHolder{tp: tp})
-}
-
-// TextMapPropagator is the internal implementation for global.TextMapPropagator.
-func TextMapPropagator() propagation.TextMapPropagator {
- return globalPropagators.Load().(propagatorsHolder).tm
-}
-
-// SetTextMapPropagator is the internal implementation for global.SetTextMapPropagator.
-func SetTextMapPropagator(p propagation.TextMapPropagator) {
- current := TextMapPropagator()
-
- if _, cOk := current.(*textMapPropagator); cOk {
- if _, pOk := p.(*textMapPropagator); pOk && current == p {
- // Do not assign the default delegating TextMapPropagator to
- // delegate to itself.
- Error(
- errors.New("no delegate configured in text map propagator"),
- "Setting text map propagator to its current value. No delegate will be configured",
- )
- return
- }
- }
-
- // For the textMapPropagator already returned by TextMapPropagator
- // delegate to p.
- delegateTextMapPropagatorOnce.Do(func() {
- if def, ok := current.(*textMapPropagator); ok {
- def.SetDelegate(p)
- }
- })
- // Return p when subsequent calls to TextMapPropagator are made.
- globalPropagators.Store(propagatorsHolder{tm: p})
-}
-
-// MeterProvider is the internal implementation for global.MeterProvider.
-func MeterProvider() metric.MeterProvider {
- return globalMeterProvider.Load().(meterProviderHolder).mp
-}
-
-// SetMeterProvider is the internal implementation for global.SetMeterProvider.
-func SetMeterProvider(mp metric.MeterProvider) {
- current := MeterProvider()
- if _, cOk := current.(*meterProvider); cOk {
- if _, mpOk := mp.(*meterProvider); mpOk && current == mp {
- // Do not assign the default delegating MeterProvider to delegate
- // to itself.
- Error(
- errors.New("no delegate configured in meter provider"),
- "Setting meter provider to its current value. No delegate will be configured",
- )
- return
- }
- }
-
- delegateMeterOnce.Do(func() {
- if def, ok := current.(*meterProvider); ok {
- def.setDelegate(mp)
- }
- })
- globalMeterProvider.Store(meterProviderHolder{mp: mp})
-}
-
-func defaultErrorHandler() *atomic.Value {
- v := &atomic.Value{}
- v.Store(errorHandlerHolder{eh: &ErrDelegator{}})
- return v
-}
-
-func defaultTracerValue() *atomic.Value {
- v := &atomic.Value{}
- v.Store(tracerProviderHolder{tp: &tracerProvider{}})
- return v
-}
-
-func defaultPropagatorsValue() *atomic.Value {
- v := &atomic.Value{}
- v.Store(propagatorsHolder{tm: newTextMapPropagator()})
- return v
-}
-
-func defaultMeterProvider() *atomic.Value {
- v := &atomic.Value{}
- v.Store(meterProviderHolder{mp: &meterProvider{}})
- return v
-}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go
deleted file mode 100644
index 8982aa0dc..000000000
--- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go
+++ /dev/null
@@ -1,220 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package global // import "go.opentelemetry.io/otel/internal/global"
-
-/*
-This file contains the forwarding implementation of the TracerProvider used as
-the default global instance. Prior to initialization of an SDK, Tracers
-returned by the global TracerProvider will provide no-op functionality. This
-means that all Span created prior to initialization are no-op Spans.
-
-Once an SDK has been initialized, all provided no-op Tracers are swapped for
-Tracers provided by the SDK defined TracerProvider. However, any Span started
-prior to this initialization does not change its behavior. Meaning, the Span
-remains a no-op Span.
-
-The implementation to track and swap Tracers locks all new Tracer creation
-until the swap is complete. This assumes that this operation is not
-performance-critical. If that assumption is incorrect, be sure to configure an
-SDK prior to any Tracer creation.
-*/
-
-import (
- "context"
- "sync"
- "sync/atomic"
-
- "go.opentelemetry.io/auto/sdk"
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/codes"
- "go.opentelemetry.io/otel/trace"
- "go.opentelemetry.io/otel/trace/embedded"
-)
-
-// tracerProvider is a placeholder for a configured SDK TracerProvider.
-//
-// All TracerProvider functionality is forwarded to a delegate once
-// configured.
-type tracerProvider struct {
- embedded.TracerProvider
-
- mtx sync.Mutex
- tracers map[il]*tracer
- delegate trace.TracerProvider
-}
-
-// Compile-time guarantee that tracerProvider implements the TracerProvider
-// interface.
-var _ trace.TracerProvider = &tracerProvider{}
-
-// setDelegate configures p to delegate all TracerProvider functionality to
-// provider.
-//
-// All Tracers provided prior to this function call are switched out to be
-// Tracers provided by provider.
-//
-// It is guaranteed by the caller that this happens only once.
-func (p *tracerProvider) setDelegate(provider trace.TracerProvider) {
- p.mtx.Lock()
- defer p.mtx.Unlock()
-
- p.delegate = provider
-
- if len(p.tracers) == 0 {
- return
- }
-
- for _, t := range p.tracers {
- t.setDelegate(provider)
- }
-
- p.tracers = nil
-}
-
-// Tracer implements TracerProvider.
-func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer {
- p.mtx.Lock()
- defer p.mtx.Unlock()
-
- if p.delegate != nil {
- return p.delegate.Tracer(name, opts...)
- }
-
- // At this moment it is guaranteed that no sdk is installed, save the tracer in the tracers map.
-
- c := trace.NewTracerConfig(opts...)
- key := il{
- name: name,
- version: c.InstrumentationVersion(),
- schema: c.SchemaURL(),
- attrs: c.InstrumentationAttributes(),
- }
-
- if p.tracers == nil {
- p.tracers = make(map[il]*tracer)
- }
-
- if val, ok := p.tracers[key]; ok {
- return val
- }
-
- t := &tracer{name: name, opts: opts, provider: p}
- p.tracers[key] = t
- return t
-}
-
-type il struct {
- name string
- version string
- schema string
- attrs attribute.Set
-}
-
-// tracer is a placeholder for a trace.Tracer.
-//
-// All Tracer functionality is forwarded to a delegate once configured.
-// Otherwise, all functionality is forwarded to a NoopTracer.
-type tracer struct {
- embedded.Tracer
-
- name string
- opts []trace.TracerOption
- provider *tracerProvider
-
- delegate atomic.Value
-}
-
-// Compile-time guarantee that tracer implements the trace.Tracer interface.
-var _ trace.Tracer = &tracer{}
-
-// setDelegate configures t to delegate all Tracer functionality to Tracers
-// created by provider.
-//
-// All subsequent calls to the Tracer methods will be passed to the delegate.
-//
-// It is guaranteed by the caller that this happens only once.
-func (t *tracer) setDelegate(provider trace.TracerProvider) {
- t.delegate.Store(provider.Tracer(t.name, t.opts...))
-}
-
-// Start implements trace.Tracer by forwarding the call to t.delegate if
-// set, otherwise it forwards the call to a NoopTracer.
-func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) {
- delegate := t.delegate.Load()
- if delegate != nil {
- return delegate.(trace.Tracer).Start(ctx, name, opts...)
- }
-
- return t.newSpan(ctx, autoInstEnabled, name, opts)
-}
-
-// autoInstEnabled determines if the auto-instrumentation SDK span is returned
-// from the tracer when not backed by a delegate and auto-instrumentation has
-// attached to this process.
-//
-// The auto-instrumentation is expected to overwrite this value to true when it
-// attaches. By default, this will point to false and mean a tracer will return
-// a nonRecordingSpan by default.
-var autoInstEnabled = new(bool)
-
-func (t *tracer) newSpan(ctx context.Context, autoSpan *bool, name string, opts []trace.SpanStartOption) (context.Context, trace.Span) {
- // autoInstEnabled is passed to newSpan via the autoSpan parameter. This is
- // so the auto-instrumentation can define a uprobe for (*t).newSpan and be
- // provided with the address of the bool autoInstEnabled points to. It
- // needs to be a parameter so that pointer can be reliably determined, it
- // should not be read from the global.
-
- if *autoSpan {
- tracer := sdk.TracerProvider().Tracer(t.name, t.opts...)
- return tracer.Start(ctx, name, opts...)
- }
-
- s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t}
- ctx = trace.ContextWithSpan(ctx, s)
- return ctx, s
-}
-
-// nonRecordingSpan is a minimal implementation of a Span that wraps a
-// SpanContext. It performs no operations other than to return the wrapped
-// SpanContext.
-type nonRecordingSpan struct {
- embedded.Span
-
- sc trace.SpanContext
- tracer *tracer
-}
-
-var _ trace.Span = nonRecordingSpan{}
-
-// SpanContext returns the wrapped SpanContext.
-func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc }
-
-// IsRecording always returns false.
-func (nonRecordingSpan) IsRecording() bool { return false }
-
-// SetStatus does nothing.
-func (nonRecordingSpan) SetStatus(codes.Code, string) {}
-
-// SetError does nothing.
-func (nonRecordingSpan) SetError(bool) {}
-
-// SetAttributes does nothing.
-func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {}
-
-// End does nothing.
-func (nonRecordingSpan) End(...trace.SpanEndOption) {}
-
-// RecordError does nothing.
-func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {}
-
-// AddEvent does nothing.
-func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {}
-
-// AddLink does nothing.
-func (nonRecordingSpan) AddLink(trace.Link) {}
-
-// SetName does nothing.
-func (nonRecordingSpan) SetName(string) {}
-
-func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider }
diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go
deleted file mode 100644
index b2fe3e41d..000000000
--- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package internal // import "go.opentelemetry.io/otel/internal"
-
-import (
- "math"
- "unsafe"
-)
-
-func BoolToRaw(b bool) uint64 { // nolint:revive // b is not a control flag.
- if b {
- return 1
- }
- return 0
-}
-
-func RawToBool(r uint64) bool {
- return r != 0
-}
-
-func Int64ToRaw(i int64) uint64 {
- // Assumes original was a valid int64 (overflow not checked).
- return uint64(i) // nolint: gosec
-}
-
-func RawToInt64(r uint64) int64 {
- // Assumes original was a valid int64 (overflow not checked).
- return int64(r) // nolint: gosec
-}
-
-func Float64ToRaw(f float64) uint64 {
- return math.Float64bits(f)
-}
-
-func RawToFloat64(r uint64) float64 {
- return math.Float64frombits(r)
-}
-
-func RawPtrToFloat64Ptr(r *uint64) *float64 {
- // Assumes original was a valid *float64 (overflow not checked).
- return (*float64)(unsafe.Pointer(r)) // nolint: gosec
-}
-
-func RawPtrToInt64Ptr(r *uint64) *int64 {
- // Assumes original was a valid *int64 (overflow not checked).
- return (*int64)(unsafe.Pointer(r)) // nolint: gosec
-}
diff --git a/vendor/go.opentelemetry.io/otel/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal_logging.go
deleted file mode 100644
index 6de7f2e4d..000000000
--- a/vendor/go.opentelemetry.io/otel/internal_logging.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package otel // import "go.opentelemetry.io/otel"
-
-import (
- "github.com/go-logr/logr"
-
- "go.opentelemetry.io/otel/internal/global"
-)
-
-// SetLogger configures the logger used internally to opentelemetry.
-func SetLogger(logger logr.Logger) {
- global.SetLogger(logger)
-}
diff --git a/vendor/go.opentelemetry.io/otel/metric.go b/vendor/go.opentelemetry.io/otel/metric.go
deleted file mode 100644
index 1e6473b32..000000000
--- a/vendor/go.opentelemetry.io/otel/metric.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package otel // import "go.opentelemetry.io/otel"
-
-import (
- "go.opentelemetry.io/otel/internal/global"
- "go.opentelemetry.io/otel/metric"
-)
-
-// Meter returns a Meter from the global MeterProvider. The name must be the
-// name of the library providing instrumentation. This name may be the same as
-// the instrumented code only if that code provides built-in instrumentation.
-// If the name is empty, then a implementation defined default name will be
-// used instead.
-//
-// If this is called before a global MeterProvider is registered the returned
-// Meter will be a No-op implementation of a Meter. When a global MeterProvider
-// is registered for the first time, the returned Meter, and all the
-// instruments it has created or will create, are recreated automatically from
-// the new MeterProvider.
-//
-// This is short for GetMeterProvider().Meter(name).
-func Meter(name string, opts ...metric.MeterOption) metric.Meter {
- return GetMeterProvider().Meter(name, opts...)
-}
-
-// GetMeterProvider returns the registered global meter provider.
-//
-// If no global GetMeterProvider has been registered, a No-op GetMeterProvider
-// implementation is returned. When a global GetMeterProvider is registered for
-// the first time, the returned GetMeterProvider, and all the Meters it has
-// created or will create, are recreated automatically from the new
-// GetMeterProvider.
-func GetMeterProvider() metric.MeterProvider {
- return global.MeterProvider()
-}
-
-// SetMeterProvider registers mp as the global MeterProvider.
-func SetMeterProvider(mp metric.MeterProvider) {
- global.SetMeterProvider(mp)
-}
diff --git a/vendor/go.opentelemetry.io/otel/metric/LICENSE b/vendor/go.opentelemetry.io/otel/metric/LICENSE
deleted file mode 100644
index 261eeb9e9..000000000
--- a/vendor/go.opentelemetry.io/otel/metric/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/metric/README.md b/vendor/go.opentelemetry.io/otel/metric/README.md
deleted file mode 100644
index 0cf902e01..000000000
--- a/vendor/go.opentelemetry.io/otel/metric/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Metric API
-
-[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric)](https://pkg.go.dev/go.opentelemetry.io/otel/metric)
diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go
deleted file mode 100644
index f8435d8f2..000000000
--- a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go
+++ /dev/null
@@ -1,260 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package metric // import "go.opentelemetry.io/otel/metric"
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/metric/embedded"
-)
-
-// Float64Observable describes a set of instruments used asynchronously to
-// record float64 measurements once per collection cycle. Observations of
-// these instruments are only made within a callback.
-//
-// Warning: Methods may be added to this interface in minor releases.
-type Float64Observable interface {
- Observable
-
- float64Observable()
-}
-
-// Float64ObservableCounter is an instrument used to asynchronously record
-// increasing float64 measurements once per collection cycle. Observations are
-// only made within a callback for this instrument. The value observed is
-// assumed the to be the cumulative sum of the count.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for
-// unimplemented methods.
-type Float64ObservableCounter interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Float64ObservableCounter
-
- Float64Observable
-}
-
-// Float64ObservableCounterConfig contains options for asynchronous counter
-// instruments that record float64 values.
-type Float64ObservableCounterConfig struct {
- description string
- unit string
- callbacks []Float64Callback
-}
-
-// NewFloat64ObservableCounterConfig returns a new
-// [Float64ObservableCounterConfig] with all opts applied.
-func NewFloat64ObservableCounterConfig(opts ...Float64ObservableCounterOption) Float64ObservableCounterConfig {
- var config Float64ObservableCounterConfig
- for _, o := range opts {
- config = o.applyFloat64ObservableCounter(config)
- }
- return config
-}
-
-// Description returns the configured description.
-func (c Float64ObservableCounterConfig) Description() string {
- return c.description
-}
-
-// Unit returns the configured unit.
-func (c Float64ObservableCounterConfig) Unit() string {
- return c.unit
-}
-
-// Callbacks returns the configured callbacks.
-func (c Float64ObservableCounterConfig) Callbacks() []Float64Callback {
- return c.callbacks
-}
-
-// Float64ObservableCounterOption applies options to a
-// [Float64ObservableCounterConfig]. See [Float64ObservableOption] and
-// [InstrumentOption] for other options that can be used as a
-// Float64ObservableCounterOption.
-type Float64ObservableCounterOption interface {
- applyFloat64ObservableCounter(Float64ObservableCounterConfig) Float64ObservableCounterConfig
-}
-
-// Float64ObservableUpDownCounter is an instrument used to asynchronously
-// record float64 measurements once per collection cycle. Observations are only
-// made within a callback for this instrument. The value observed is assumed
-// the to be the cumulative sum of the count.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Float64ObservableUpDownCounter interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Float64ObservableUpDownCounter
-
- Float64Observable
-}
-
-// Float64ObservableUpDownCounterConfig contains options for asynchronous
-// counter instruments that record float64 values.
-type Float64ObservableUpDownCounterConfig struct {
- description string
- unit string
- callbacks []Float64Callback
-}
-
-// NewFloat64ObservableUpDownCounterConfig returns a new
-// [Float64ObservableUpDownCounterConfig] with all opts applied.
-func NewFloat64ObservableUpDownCounterConfig(opts ...Float64ObservableUpDownCounterOption) Float64ObservableUpDownCounterConfig {
- var config Float64ObservableUpDownCounterConfig
- for _, o := range opts {
- config = o.applyFloat64ObservableUpDownCounter(config)
- }
- return config
-}
-
-// Description returns the configured description.
-func (c Float64ObservableUpDownCounterConfig) Description() string {
- return c.description
-}
-
-// Unit returns the configured unit.
-func (c Float64ObservableUpDownCounterConfig) Unit() string {
- return c.unit
-}
-
-// Callbacks returns the configured callbacks.
-func (c Float64ObservableUpDownCounterConfig) Callbacks() []Float64Callback {
- return c.callbacks
-}
-
-// Float64ObservableUpDownCounterOption applies options to a
-// [Float64ObservableUpDownCounterConfig]. See [Float64ObservableOption] and
-// [InstrumentOption] for other options that can be used as a
-// Float64ObservableUpDownCounterOption.
-type Float64ObservableUpDownCounterOption interface {
- applyFloat64ObservableUpDownCounter(Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig
-}
-
-// Float64ObservableGauge is an instrument used to asynchronously record
-// instantaneous float64 measurements once per collection cycle. Observations
-// are only made within a callback for this instrument.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Float64ObservableGauge interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Float64ObservableGauge
-
- Float64Observable
-}
-
-// Float64ObservableGaugeConfig contains options for asynchronous counter
-// instruments that record float64 values.
-type Float64ObservableGaugeConfig struct {
- description string
- unit string
- callbacks []Float64Callback
-}
-
-// NewFloat64ObservableGaugeConfig returns a new [Float64ObservableGaugeConfig]
-// with all opts applied.
-func NewFloat64ObservableGaugeConfig(opts ...Float64ObservableGaugeOption) Float64ObservableGaugeConfig {
- var config Float64ObservableGaugeConfig
- for _, o := range opts {
- config = o.applyFloat64ObservableGauge(config)
- }
- return config
-}
-
-// Description returns the configured description.
-func (c Float64ObservableGaugeConfig) Description() string {
- return c.description
-}
-
-// Unit returns the configured unit.
-func (c Float64ObservableGaugeConfig) Unit() string {
- return c.unit
-}
-
-// Callbacks returns the configured callbacks.
-func (c Float64ObservableGaugeConfig) Callbacks() []Float64Callback {
- return c.callbacks
-}
-
-// Float64ObservableGaugeOption applies options to a
-// [Float64ObservableGaugeConfig]. See [Float64ObservableOption] and
-// [InstrumentOption] for other options that can be used as a
-// Float64ObservableGaugeOption.
-type Float64ObservableGaugeOption interface {
- applyFloat64ObservableGauge(Float64ObservableGaugeConfig) Float64ObservableGaugeConfig
-}
-
-// Float64Observer is a recorder of float64 measurements.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Float64Observer interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Float64Observer
-
- // Observe records the float64 value.
- //
- // Use the WithAttributeSet (or, if performance is not a concern,
- // the WithAttributes) option to include measurement attributes.
- Observe(value float64, options ...ObserveOption)
-}
-
-// Float64Callback is a function registered with a Meter that makes
-// observations for a Float64Observable instrument it is registered with.
-// Calls to the Float64Observer record measurement values for the
-// Float64Observable.
-//
-// The function needs to complete in a finite amount of time and the deadline
-// of the passed context is expected to be honored.
-//
-// The function needs to make unique observations across all registered
-// Float64Callbacks. Meaning, it should not report measurements with the same
-// attributes as another Float64Callbacks also registered for the same
-// instrument.
-//
-// The function needs to be concurrent safe.
-type Float64Callback func(context.Context, Float64Observer) error
-
-// Float64ObservableOption applies options to float64 Observer instruments.
-type Float64ObservableOption interface {
- Float64ObservableCounterOption
- Float64ObservableUpDownCounterOption
- Float64ObservableGaugeOption
-}
-
-type float64CallbackOpt struct {
- cback Float64Callback
-}
-
-func (o float64CallbackOpt) applyFloat64ObservableCounter(cfg Float64ObservableCounterConfig) Float64ObservableCounterConfig {
- cfg.callbacks = append(cfg.callbacks, o.cback)
- return cfg
-}
-
-func (o float64CallbackOpt) applyFloat64ObservableUpDownCounter(cfg Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig {
- cfg.callbacks = append(cfg.callbacks, o.cback)
- return cfg
-}
-
-func (o float64CallbackOpt) applyFloat64ObservableGauge(cfg Float64ObservableGaugeConfig) Float64ObservableGaugeConfig {
- cfg.callbacks = append(cfg.callbacks, o.cback)
- return cfg
-}
-
-// WithFloat64Callback adds callback to be called for an instrument.
-func WithFloat64Callback(callback Float64Callback) Float64ObservableOption {
- return float64CallbackOpt{callback}
-}
diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go
deleted file mode 100644
index e079aaef1..000000000
--- a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go
+++ /dev/null
@@ -1,258 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package metric // import "go.opentelemetry.io/otel/metric"
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/metric/embedded"
-)
-
-// Int64Observable describes a set of instruments used asynchronously to record
-// int64 measurements once per collection cycle. Observations of these
-// instruments are only made within a callback.
-//
-// Warning: Methods may be added to this interface in minor releases.
-type Int64Observable interface {
- Observable
-
- int64Observable()
-}
-
-// Int64ObservableCounter is an instrument used to asynchronously record
-// increasing int64 measurements once per collection cycle. Observations are
-// only made within a callback for this instrument. The value observed is
-// assumed the to be the cumulative sum of the count.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Int64ObservableCounter interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Int64ObservableCounter
-
- Int64Observable
-}
-
-// Int64ObservableCounterConfig contains options for asynchronous counter
-// instruments that record int64 values.
-type Int64ObservableCounterConfig struct {
- description string
- unit string
- callbacks []Int64Callback
-}
-
-// NewInt64ObservableCounterConfig returns a new [Int64ObservableCounterConfig]
-// with all opts applied.
-func NewInt64ObservableCounterConfig(opts ...Int64ObservableCounterOption) Int64ObservableCounterConfig {
- var config Int64ObservableCounterConfig
- for _, o := range opts {
- config = o.applyInt64ObservableCounter(config)
- }
- return config
-}
-
-// Description returns the configured description.
-func (c Int64ObservableCounterConfig) Description() string {
- return c.description
-}
-
-// Unit returns the configured unit.
-func (c Int64ObservableCounterConfig) Unit() string {
- return c.unit
-}
-
-// Callbacks returns the configured callbacks.
-func (c Int64ObservableCounterConfig) Callbacks() []Int64Callback {
- return c.callbacks
-}
-
-// Int64ObservableCounterOption applies options to a
-// [Int64ObservableCounterConfig]. See [Int64ObservableOption] and
-// [InstrumentOption] for other options that can be used as an
-// Int64ObservableCounterOption.
-type Int64ObservableCounterOption interface {
- applyInt64ObservableCounter(Int64ObservableCounterConfig) Int64ObservableCounterConfig
-}
-
-// Int64ObservableUpDownCounter is an instrument used to asynchronously record
-// int64 measurements once per collection cycle. Observations are only made
-// within a callback for this instrument. The value observed is assumed the to
-// be the cumulative sum of the count.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Int64ObservableUpDownCounter interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Int64ObservableUpDownCounter
-
- Int64Observable
-}
-
-// Int64ObservableUpDownCounterConfig contains options for asynchronous counter
-// instruments that record int64 values.
-type Int64ObservableUpDownCounterConfig struct {
- description string
- unit string
- callbacks []Int64Callback
-}
-
-// NewInt64ObservableUpDownCounterConfig returns a new
-// [Int64ObservableUpDownCounterConfig] with all opts applied.
-func NewInt64ObservableUpDownCounterConfig(opts ...Int64ObservableUpDownCounterOption) Int64ObservableUpDownCounterConfig {
- var config Int64ObservableUpDownCounterConfig
- for _, o := range opts {
- config = o.applyInt64ObservableUpDownCounter(config)
- }
- return config
-}
-
-// Description returns the configured description.
-func (c Int64ObservableUpDownCounterConfig) Description() string {
- return c.description
-}
-
-// Unit returns the configured unit.
-func (c Int64ObservableUpDownCounterConfig) Unit() string {
- return c.unit
-}
-
-// Callbacks returns the configured callbacks.
-func (c Int64ObservableUpDownCounterConfig) Callbacks() []Int64Callback {
- return c.callbacks
-}
-
-// Int64ObservableUpDownCounterOption applies options to a
-// [Int64ObservableUpDownCounterConfig]. See [Int64ObservableOption] and
-// [InstrumentOption] for other options that can be used as an
-// Int64ObservableUpDownCounterOption.
-type Int64ObservableUpDownCounterOption interface {
- applyInt64ObservableUpDownCounter(Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig
-}
-
-// Int64ObservableGauge is an instrument used to asynchronously record
-// instantaneous int64 measurements once per collection cycle. Observations are
-// only made within a callback for this instrument.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Int64ObservableGauge interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Int64ObservableGauge
-
- Int64Observable
-}
-
-// Int64ObservableGaugeConfig contains options for asynchronous counter
-// instruments that record int64 values.
-type Int64ObservableGaugeConfig struct {
- description string
- unit string
- callbacks []Int64Callback
-}
-
-// NewInt64ObservableGaugeConfig returns a new [Int64ObservableGaugeConfig]
-// with all opts applied.
-func NewInt64ObservableGaugeConfig(opts ...Int64ObservableGaugeOption) Int64ObservableGaugeConfig {
- var config Int64ObservableGaugeConfig
- for _, o := range opts {
- config = o.applyInt64ObservableGauge(config)
- }
- return config
-}
-
-// Description returns the configured description.
-func (c Int64ObservableGaugeConfig) Description() string {
- return c.description
-}
-
-// Unit returns the configured unit.
-func (c Int64ObservableGaugeConfig) Unit() string {
- return c.unit
-}
-
-// Callbacks returns the configured callbacks.
-func (c Int64ObservableGaugeConfig) Callbacks() []Int64Callback {
- return c.callbacks
-}
-
-// Int64ObservableGaugeOption applies options to a
-// [Int64ObservableGaugeConfig]. See [Int64ObservableOption] and
-// [InstrumentOption] for other options that can be used as an
-// Int64ObservableGaugeOption.
-type Int64ObservableGaugeOption interface {
- applyInt64ObservableGauge(Int64ObservableGaugeConfig) Int64ObservableGaugeConfig
-}
-
-// Int64Observer is a recorder of int64 measurements.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Int64Observer interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Int64Observer
-
- // Observe records the int64 value.
- //
- // Use the WithAttributeSet (or, if performance is not a concern,
- // the WithAttributes) option to include measurement attributes.
- Observe(value int64, options ...ObserveOption)
-}
-
-// Int64Callback is a function registered with a Meter that makes observations
-// for an Int64Observable instrument it is registered with. Calls to the
-// Int64Observer record measurement values for the Int64Observable.
-//
-// The function needs to complete in a finite amount of time and the deadline
-// of the passed context is expected to be honored.
-//
-// The function needs to make unique observations across all registered
-// Int64Callbacks. Meaning, it should not report measurements with the same
-// attributes as another Int64Callbacks also registered for the same
-// instrument.
-//
-// The function needs to be concurrent safe.
-type Int64Callback func(context.Context, Int64Observer) error
-
-// Int64ObservableOption applies options to int64 Observer instruments.
-type Int64ObservableOption interface {
- Int64ObservableCounterOption
- Int64ObservableUpDownCounterOption
- Int64ObservableGaugeOption
-}
-
-type int64CallbackOpt struct {
- cback Int64Callback
-}
-
-func (o int64CallbackOpt) applyInt64ObservableCounter(cfg Int64ObservableCounterConfig) Int64ObservableCounterConfig {
- cfg.callbacks = append(cfg.callbacks, o.cback)
- return cfg
-}
-
-func (o int64CallbackOpt) applyInt64ObservableUpDownCounter(cfg Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig {
- cfg.callbacks = append(cfg.callbacks, o.cback)
- return cfg
-}
-
-func (o int64CallbackOpt) applyInt64ObservableGauge(cfg Int64ObservableGaugeConfig) Int64ObservableGaugeConfig {
- cfg.callbacks = append(cfg.callbacks, o.cback)
- return cfg
-}
-
-// WithInt64Callback adds callback to be called for an instrument.
-func WithInt64Callback(callback Int64Callback) Int64ObservableOption {
- return int64CallbackOpt{callback}
-}
diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go
deleted file mode 100644
index d9e3b13e4..000000000
--- a/vendor/go.opentelemetry.io/otel/metric/config.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package metric // import "go.opentelemetry.io/otel/metric"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// MeterConfig contains options for Meters.
-type MeterConfig struct {
- instrumentationVersion string
- schemaURL string
- attrs attribute.Set
-
- // Ensure forward compatibility by explicitly making this not comparable.
- noCmp [0]func() //nolint: unused // This is indeed used.
-}
-
-// InstrumentationVersion returns the version of the library providing
-// instrumentation.
-func (cfg MeterConfig) InstrumentationVersion() string {
- return cfg.instrumentationVersion
-}
-
-// InstrumentationAttributes returns the attributes associated with the library
-// providing instrumentation.
-func (cfg MeterConfig) InstrumentationAttributes() attribute.Set {
- return cfg.attrs
-}
-
-// SchemaURL is the schema_url of the library providing instrumentation.
-func (cfg MeterConfig) SchemaURL() string {
- return cfg.schemaURL
-}
-
-// MeterOption is an interface for applying Meter options.
-type MeterOption interface {
- // applyMeter is used to set a MeterOption value of a MeterConfig.
- applyMeter(MeterConfig) MeterConfig
-}
-
-// NewMeterConfig creates a new MeterConfig and applies
-// all the given options.
-func NewMeterConfig(opts ...MeterOption) MeterConfig {
- var config MeterConfig
- for _, o := range opts {
- config = o.applyMeter(config)
- }
- return config
-}
-
-type meterOptionFunc func(MeterConfig) MeterConfig
-
-func (fn meterOptionFunc) applyMeter(cfg MeterConfig) MeterConfig {
- return fn(cfg)
-}
-
-// WithInstrumentationVersion sets the instrumentation version.
-func WithInstrumentationVersion(version string) MeterOption {
- return meterOptionFunc(func(config MeterConfig) MeterConfig {
- config.instrumentationVersion = version
- return config
- })
-}
-
-// WithInstrumentationAttributes sets the instrumentation attributes.
-//
-// The passed attributes will be de-duplicated.
-func WithInstrumentationAttributes(attr ...attribute.KeyValue) MeterOption {
- return meterOptionFunc(func(config MeterConfig) MeterConfig {
- config.attrs = attribute.NewSet(attr...)
- return config
- })
-}
-
-// WithSchemaURL sets the schema URL.
-func WithSchemaURL(schemaURL string) MeterOption {
- return meterOptionFunc(func(config MeterConfig) MeterConfig {
- config.schemaURL = schemaURL
- return config
- })
-}
diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go
deleted file mode 100644
index f153745b0..000000000
--- a/vendor/go.opentelemetry.io/otel/metric/doc.go
+++ /dev/null
@@ -1,177 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-/*
-Package metric provides the OpenTelemetry API used to measure metrics about
-source code operation.
-
-This API is separate from its implementation so the instrumentation built from
-it is reusable. See [go.opentelemetry.io/otel/sdk/metric] for the official
-OpenTelemetry implementation of this API.
-
-All measurements made with this package are made via instruments. These
-instruments are created by a [Meter] which itself is created by a
-[MeterProvider]. Applications need to accept a [MeterProvider] implementation
-as a starting point when instrumenting. This can be done directly, or by using
-the OpenTelemetry global MeterProvider via [GetMeterProvider]. Using an
-appropriately named [Meter] from the accepted [MeterProvider], instrumentation
-can then be built from the [Meter]'s instruments.
-
-# Instruments
-
-Each instrument is designed to make measurements of a particular type. Broadly,
-all instruments fall into two overlapping logical categories: asynchronous or
-synchronous, and int64 or float64.
-
-All synchronous instruments ([Int64Counter], [Int64UpDownCounter],
-[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and
-[Float64Histogram]) are used to measure the operation and performance of source
-code during the source code execution. These instruments only make measurements
-when the source code they instrument is run.
-
-All asynchronous instruments ([Int64ObservableCounter],
-[Int64ObservableUpDownCounter], [Int64ObservableGauge],
-[Float64ObservableCounter], [Float64ObservableUpDownCounter], and
-[Float64ObservableGauge]) are used to measure metrics outside of the execution
-of source code. They are said to make "observations" via a callback function
-called once every measurement collection cycle.
-
-Each instrument is also grouped by the value type it measures. Either int64 or
-float64. The value being measured will dictate which instrument in these
-categories to use.
-
-Outside of these two broad categories, instruments are described by the
-function they are designed to serve. All Counters ([Int64Counter],
-[Float64Counter], [Int64ObservableCounter], and [Float64ObservableCounter]) are
-designed to measure values that never decrease in value, but instead only
-incrementally increase in value. UpDownCounters ([Int64UpDownCounter],
-[Float64UpDownCounter], [Int64ObservableUpDownCounter], and
-[Float64ObservableUpDownCounter]) on the other hand, are designed to measure
-values that can increase and decrease. When more information needs to be
-conveyed about all the synchronous measurements made during a collection cycle,
-a Histogram ([Int64Histogram] and [Float64Histogram]) should be used. Finally,
-when just the most recent measurement needs to be conveyed about an
-asynchronous measurement, a Gauge ([Int64ObservableGauge] and
-[Float64ObservableGauge]) should be used.
-
-See the [OpenTelemetry documentation] for more information about instruments
-and their intended use.
-
-# Instrument Name
-
-OpenTelemetry defines an [instrument name syntax] that restricts what
-instrument names are allowed.
-
-Instrument names should ...
-
- - Not be empty.
- - Have an alphabetic character as their first letter.
- - Have any letter after the first be an alphanumeric character, ‘_’, ‘.’,
- ‘-’, or ‘/’.
- - Have a maximum length of 255 letters.
-
-To ensure compatibility with observability platforms, all instruments created
-need to conform to this syntax. Not all implementations of the API will validate
-these names, it is the callers responsibility to ensure compliance.
-
-# Measurements
-
-Measurements are made by recording values and information about the values with
-an instrument. How these measurements are recorded depends on the instrument.
-
-Measurements for synchronous instruments ([Int64Counter], [Int64UpDownCounter],
-[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and
-[Float64Histogram]) are recorded using the instrument methods directly. All
-counter instruments have an Add method that is used to measure an increment
-value, and all histogram instruments have a Record method to measure a data
-point.
-
-Asynchronous instruments ([Int64ObservableCounter],
-[Int64ObservableUpDownCounter], [Int64ObservableGauge],
-[Float64ObservableCounter], [Float64ObservableUpDownCounter], and
-[Float64ObservableGauge]) record measurements within a callback function. The
-callback is registered with the Meter which ensures the callback is called once
-per collection cycle. A callback can be registered two ways: during the
-instrument's creation using an option, or later using the RegisterCallback
-method of the [Meter] that created the instrument.
-
-If the following criteria are met, an option ([WithInt64Callback] or
-[WithFloat64Callback]) can be used during the asynchronous instrument's
-creation to register a callback ([Int64Callback] or [Float64Callback],
-respectively):
-
- - The measurement process is known when the instrument is created
- - Only that instrument will make a measurement within the callback
- - The callback never needs to be unregistered
-
-If the criteria are not met, use the RegisterCallback method of the [Meter] that
-created the instrument to register a [Callback].
-
-# API Implementations
-
-This package does not conform to the standard Go versioning policy, all of its
-interfaces may have methods added to them without a package major version bump.
-This non-standard API evolution could surprise an uninformed implementation
-author. They could unknowingly build their implementation in a way that would
-result in a runtime panic for their users that update to the new API.
-
-The API is designed to help inform an instrumentation author about this
-non-standard API evolution. It requires them to choose a default behavior for
-unimplemented interface methods. There are three behavior choices they can
-make:
-
- - Compilation failure
- - Panic
- - Default to another implementation
-
-All interfaces in this API embed a corresponding interface from
-[go.opentelemetry.io/otel/metric/embedded]. If an author wants the default
-behavior of their implementations to be a compilation failure, signaling to
-their users they need to update to the latest version of that implementation,
-they need to embed the corresponding interface from
-[go.opentelemetry.io/otel/metric/embedded] in their implementation. For
-example,
-
- import "go.opentelemetry.io/otel/metric/embedded"
-
- type MeterProvider struct {
- embedded.MeterProvider
- // ...
- }
-
-If an author wants the default behavior of their implementations to a panic,
-they need to embed the API interface directly.
-
- import "go.opentelemetry.io/otel/metric"
-
- type MeterProvider struct {
- metric.MeterProvider
- // ...
- }
-
-This is not a recommended behavior as it could lead to publishing packages that
-contain runtime panics when users update other package that use newer versions
-of [go.opentelemetry.io/otel/metric].
-
-Finally, an author can embed another implementation in theirs. The embedded
-implementation will be used for methods not defined by the author. For example,
-an author who wants to default to silently dropping the call can use
-[go.opentelemetry.io/otel/metric/noop]:
-
- import "go.opentelemetry.io/otel/metric/noop"
-
- type MeterProvider struct {
- noop.MeterProvider
- // ...
- }
-
-It is strongly recommended that authors only embed
-[go.opentelemetry.io/otel/metric/noop] if they choose this default behavior.
-That implementation is the only one OpenTelemetry authors can guarantee will
-fully implement all the API interfaces when a user updates their API.
-
-[instrument name syntax]: https://opentelemetry.io/docs/specs/otel/metrics/api/#instrument-name-syntax
-[OpenTelemetry documentation]: https://opentelemetry.io/docs/concepts/signals/metrics/
-[GetMeterProvider]: https://pkg.go.dev/go.opentelemetry.io/otel#GetMeterProvider
-*/
-package metric // import "go.opentelemetry.io/otel/metric"
diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/README.md b/vendor/go.opentelemetry.io/otel/metric/embedded/README.md
deleted file mode 100644
index 1f6e0efa7..000000000
--- a/vendor/go.opentelemetry.io/otel/metric/embedded/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Metric Embedded
-
-[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric/embedded)](https://pkg.go.dev/go.opentelemetry.io/otel/metric/embedded)
diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go
deleted file mode 100644
index 1a9dc6809..000000000
--- a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go
+++ /dev/null
@@ -1,243 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Package embedded provides interfaces embedded within the [OpenTelemetry
-// metric API].
-//
-// Implementers of the [OpenTelemetry metric API] can embed the relevant type
-// from this package into their implementation directly. Doing so will result
-// in a compilation error for users when the [OpenTelemetry metric API] is
-// extended (which is something that can happen without a major version bump of
-// the API package).
-//
-// [OpenTelemetry metric API]: https://pkg.go.dev/go.opentelemetry.io/otel/metric
-package embedded // import "go.opentelemetry.io/otel/metric/embedded"
-
-// MeterProvider is embedded in
-// [go.opentelemetry.io/otel/metric.MeterProvider].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/metric.MeterProvider] if you want users to
-// experience a compilation error, signaling they need to update to your latest
-// implementation, when the [go.opentelemetry.io/otel/metric.MeterProvider]
-// interface is extended (which is something that can happen without a major
-// version bump of the API package).
-type MeterProvider interface{ meterProvider() }
-
-// Meter is embedded in [go.opentelemetry.io/otel/metric.Meter].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/metric.Meter] if you want users to experience a
-// compilation error, signaling they need to update to your latest
-// implementation, when the [go.opentelemetry.io/otel/metric.Meter] interface
-// is extended (which is something that can happen without a major version bump
-// of the API package).
-type Meter interface{ meter() }
-
-// Float64Observer is embedded in
-// [go.opentelemetry.io/otel/metric.Float64Observer].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/metric.Float64Observer] if you want
-// users to experience a compilation error, signaling they need to update to
-// your latest implementation, when the
-// [go.opentelemetry.io/otel/metric.Float64Observer] interface is
-// extended (which is something that can happen without a major version bump of
-// the API package).
-type Float64Observer interface{ float64Observer() }
-
-// Int64Observer is embedded in
-// [go.opentelemetry.io/otel/metric.Int64Observer].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/metric.Int64Observer] if you want users
-// to experience a compilation error, signaling they need to update to your
-// latest implementation, when the
-// [go.opentelemetry.io/otel/metric.Int64Observer] interface is
-// extended (which is something that can happen without a major version bump of
-// the API package).
-type Int64Observer interface{ int64Observer() }
-
-// Observer is embedded in [go.opentelemetry.io/otel/metric.Observer].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/metric.Observer] if you want users to experience a
-// compilation error, signaling they need to update to your latest
-// implementation, when the [go.opentelemetry.io/otel/metric.Observer]
-// interface is extended (which is something that can happen without a major
-// version bump of the API package).
-type Observer interface{ observer() }
-
-// Registration is embedded in [go.opentelemetry.io/otel/metric.Registration].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/metric.Registration] if you want users to
-// experience a compilation error, signaling they need to update to your latest
-// implementation, when the [go.opentelemetry.io/otel/metric.Registration]
-// interface is extended (which is something that can happen without a major
-// version bump of the API package).
-type Registration interface{ registration() }
-
-// Float64Counter is embedded in
-// [go.opentelemetry.io/otel/metric.Float64Counter].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/metric.Float64Counter] if you want
-// users to experience a compilation error, signaling they need to update to
-// your latest implementation, when the
-// [go.opentelemetry.io/otel/metric.Float64Counter] interface is
-// extended (which is something that can happen without a major version bump of
-// the API package).
-type Float64Counter interface{ float64Counter() }
-
-// Float64Histogram is embedded in
-// [go.opentelemetry.io/otel/metric.Float64Histogram].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/metric.Float64Histogram] if you want
-// users to experience a compilation error, signaling they need to update to
-// your latest implementation, when the
-// [go.opentelemetry.io/otel/metric.Float64Histogram] interface is
-// extended (which is something that can happen without a major version bump of
-// the API package).
-type Float64Histogram interface{ float64Histogram() }
-
-// Float64Gauge is embedded in [go.opentelemetry.io/otel/metric.Float64Gauge].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/metric.Float64Gauge] if you want users to
-// experience a compilation error, signaling they need to update to your latest
-// implementation, when the [go.opentelemetry.io/otel/metric.Float64Gauge]
-// interface is extended (which is something that can happen without a major
-// version bump of the API package).
-type Float64Gauge interface{ float64Gauge() }
-
-// Float64ObservableCounter is embedded in
-// [go.opentelemetry.io/otel/metric.Float64ObservableCounter].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/metric.Float64ObservableCounter] if you
-// want users to experience a compilation error, signaling they need to update
-// to your latest implementation, when the
-// [go.opentelemetry.io/otel/metric.Float64ObservableCounter]
-// interface is extended (which is something that can happen without a major
-// version bump of the API package).
-type Float64ObservableCounter interface{ float64ObservableCounter() }
-
-// Float64ObservableGauge is embedded in
-// [go.opentelemetry.io/otel/metric.Float64ObservableGauge].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/metric.Float64ObservableGauge] if you
-// want users to experience a compilation error, signaling they need to update
-// to your latest implementation, when the
-// [go.opentelemetry.io/otel/metric.Float64ObservableGauge]
-// interface is extended (which is something that can happen without a major
-// version bump of the API package).
-type Float64ObservableGauge interface{ float64ObservableGauge() }
-
-// Float64ObservableUpDownCounter is embedded in
-// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter]
-// if you want users to experience a compilation error, signaling they need to
-// update to your latest implementation, when the
-// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter]
-// interface is extended (which is something that can happen without a major
-// version bump of the API package).
-type Float64ObservableUpDownCounter interface{ float64ObservableUpDownCounter() }
-
-// Float64UpDownCounter is embedded in
-// [go.opentelemetry.io/otel/metric.Float64UpDownCounter].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] if you
-// want users to experience a compilation error, signaling they need to update
-// to your latest implementation, when the
-// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] interface
-// is extended (which is something that can happen without a major version bump
-// of the API package).
-type Float64UpDownCounter interface{ float64UpDownCounter() }
-
-// Int64Counter is embedded in
-// [go.opentelemetry.io/otel/metric.Int64Counter].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/metric.Int64Counter] if you want users
-// to experience a compilation error, signaling they need to update to your
-// latest implementation, when the
-// [go.opentelemetry.io/otel/metric.Int64Counter] interface is
-// extended (which is something that can happen without a major version bump of
-// the API package).
-type Int64Counter interface{ int64Counter() }
-
-// Int64Histogram is embedded in
-// [go.opentelemetry.io/otel/metric.Int64Histogram].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/metric.Int64Histogram] if you want
-// users to experience a compilation error, signaling they need to update to
-// your latest implementation, when the
-// [go.opentelemetry.io/otel/metric.Int64Histogram] interface is
-// extended (which is something that can happen without a major version bump of
-// the API package).
-type Int64Histogram interface{ int64Histogram() }
-
-// Int64Gauge is embedded in [go.opentelemetry.io/otel/metric.Int64Gauge].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/metric.Int64Gauge] if you want users to experience
-// a compilation error, signaling they need to update to your latest
-// implementation, when the [go.opentelemetry.io/otel/metric.Int64Gauge]
-// interface is extended (which is something that can happen without a major
-// version bump of the API package).
-type Int64Gauge interface{ int64Gauge() }
-
-// Int64ObservableCounter is embedded in
-// [go.opentelemetry.io/otel/metric.Int64ObservableCounter].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/metric.Int64ObservableCounter] if you
-// want users to experience a compilation error, signaling they need to update
-// to your latest implementation, when the
-// [go.opentelemetry.io/otel/metric.Int64ObservableCounter]
-// interface is extended (which is something that can happen without a major
-// version bump of the API package).
-type Int64ObservableCounter interface{ int64ObservableCounter() }
-
-// Int64ObservableGauge is embedded in
-// [go.opentelemetry.io/otel/metric.Int64ObservableGauge].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] if you
-// want users to experience a compilation error, signaling they need to update
-// to your latest implementation, when the
-// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] interface
-// is extended (which is something that can happen without a major version bump
-// of the API package).
-type Int64ObservableGauge interface{ int64ObservableGauge() }
-
-// Int64ObservableUpDownCounter is embedded in
-// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter] if
-// you want users to experience a compilation error, signaling they need to
-// update to your latest implementation, when the
-// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter]
-// interface is extended (which is something that can happen without a major
-// version bump of the API package).
-type Int64ObservableUpDownCounter interface{ int64ObservableUpDownCounter() }
-
-// Int64UpDownCounter is embedded in
-// [go.opentelemetry.io/otel/metric.Int64UpDownCounter].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] if you want
-// users to experience a compilation error, signaling they need to update to
-// your latest implementation, when the
-// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] interface is
-// extended (which is something that can happen without a major version bump of
-// the API package).
-type Int64UpDownCounter interface{ int64UpDownCounter() }
diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go
deleted file mode 100644
index a535782e1..000000000
--- a/vendor/go.opentelemetry.io/otel/metric/instrument.go
+++ /dev/null
@@ -1,368 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package metric // import "go.opentelemetry.io/otel/metric"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// Observable is used as a grouping mechanism for all instruments that are
-// updated within a Callback.
-type Observable interface {
- observable()
-}
-
-// InstrumentOption applies options to all instruments.
-type InstrumentOption interface {
- Int64CounterOption
- Int64UpDownCounterOption
- Int64HistogramOption
- Int64GaugeOption
- Int64ObservableCounterOption
- Int64ObservableUpDownCounterOption
- Int64ObservableGaugeOption
-
- Float64CounterOption
- Float64UpDownCounterOption
- Float64HistogramOption
- Float64GaugeOption
- Float64ObservableCounterOption
- Float64ObservableUpDownCounterOption
- Float64ObservableGaugeOption
-}
-
-// HistogramOption applies options to histogram instruments.
-type HistogramOption interface {
- Int64HistogramOption
- Float64HistogramOption
-}
-
-type descOpt string
-
-func (o descOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig {
- c.description = string(o)
- return c
-}
-
-func (o descOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig {
- c.description = string(o)
- return c
-}
-
-func (o descOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig {
- c.description = string(o)
- return c
-}
-
-func (o descOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig {
- c.description = string(o)
- return c
-}
-
-func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig {
- c.description = string(o)
- return c
-}
-
-func (o descOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig {
- c.description = string(o)
- return c
-}
-
-func (o descOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig {
- c.description = string(o)
- return c
-}
-
-func (o descOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig {
- c.description = string(o)
- return c
-}
-
-func (o descOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig {
- c.description = string(o)
- return c
-}
-
-func (o descOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig {
- c.description = string(o)
- return c
-}
-
-func (o descOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig {
- c.description = string(o)
- return c
-}
-
-func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig {
- c.description = string(o)
- return c
-}
-
-func (o descOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig {
- c.description = string(o)
- return c
-}
-
-func (o descOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig {
- c.description = string(o)
- return c
-}
-
-// WithDescription sets the instrument description.
-func WithDescription(desc string) InstrumentOption { return descOpt(desc) }
-
-type unitOpt string
-
-func (o unitOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig {
- c.unit = string(o)
- return c
-}
-
-func (o unitOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig {
- c.unit = string(o)
- return c
-}
-
-func (o unitOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig {
- c.unit = string(o)
- return c
-}
-
-func (o unitOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig {
- c.unit = string(o)
- return c
-}
-
-func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig {
- c.unit = string(o)
- return c
-}
-
-func (o unitOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig {
- c.unit = string(o)
- return c
-}
-
-func (o unitOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig {
- c.unit = string(o)
- return c
-}
-
-func (o unitOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig {
- c.unit = string(o)
- return c
-}
-
-func (o unitOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig {
- c.unit = string(o)
- return c
-}
-
-func (o unitOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig {
- c.unit = string(o)
- return c
-}
-
-func (o unitOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig {
- c.unit = string(o)
- return c
-}
-
-func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig {
- c.unit = string(o)
- return c
-}
-
-func (o unitOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig {
- c.unit = string(o)
- return c
-}
-
-func (o unitOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig {
- c.unit = string(o)
- return c
-}
-
-// WithUnit sets the instrument unit.
-//
-// The unit u should be defined using the appropriate [UCUM](https://ucum.org) case-sensitive code.
-func WithUnit(u string) InstrumentOption { return unitOpt(u) }
-
-// WithExplicitBucketBoundaries sets the instrument explicit bucket boundaries.
-//
-// This option is considered "advisory", and may be ignored by API implementations.
-func WithExplicitBucketBoundaries(bounds ...float64) HistogramOption { return bucketOpt(bounds) }
-
-type bucketOpt []float64
-
-func (o bucketOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig {
- c.explicitBucketBoundaries = o
- return c
-}
-
-func (o bucketOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig {
- c.explicitBucketBoundaries = o
- return c
-}
-
-// AddOption applies options to an addition measurement. See
-// [MeasurementOption] for other options that can be used as an AddOption.
-type AddOption interface {
- applyAdd(AddConfig) AddConfig
-}
-
-// AddConfig contains options for an addition measurement.
-type AddConfig struct {
- attrs attribute.Set
-}
-
-// NewAddConfig returns a new [AddConfig] with all opts applied.
-func NewAddConfig(opts []AddOption) AddConfig {
- config := AddConfig{attrs: *attribute.EmptySet()}
- for _, o := range opts {
- config = o.applyAdd(config)
- }
- return config
-}
-
-// Attributes returns the configured attribute set.
-func (c AddConfig) Attributes() attribute.Set {
- return c.attrs
-}
-
-// RecordOption applies options to an addition measurement. See
-// [MeasurementOption] for other options that can be used as a RecordOption.
-type RecordOption interface {
- applyRecord(RecordConfig) RecordConfig
-}
-
-// RecordConfig contains options for a recorded measurement.
-type RecordConfig struct {
- attrs attribute.Set
-}
-
-// NewRecordConfig returns a new [RecordConfig] with all opts applied.
-func NewRecordConfig(opts []RecordOption) RecordConfig {
- config := RecordConfig{attrs: *attribute.EmptySet()}
- for _, o := range opts {
- config = o.applyRecord(config)
- }
- return config
-}
-
-// Attributes returns the configured attribute set.
-func (c RecordConfig) Attributes() attribute.Set {
- return c.attrs
-}
-
-// ObserveOption applies options to an addition measurement. See
-// [MeasurementOption] for other options that can be used as a ObserveOption.
-type ObserveOption interface {
- applyObserve(ObserveConfig) ObserveConfig
-}
-
-// ObserveConfig contains options for an observed measurement.
-type ObserveConfig struct {
- attrs attribute.Set
-}
-
-// NewObserveConfig returns a new [ObserveConfig] with all opts applied.
-func NewObserveConfig(opts []ObserveOption) ObserveConfig {
- config := ObserveConfig{attrs: *attribute.EmptySet()}
- for _, o := range opts {
- config = o.applyObserve(config)
- }
- return config
-}
-
-// Attributes returns the configured attribute set.
-func (c ObserveConfig) Attributes() attribute.Set {
- return c.attrs
-}
-
-// MeasurementOption applies options to all instrument measurement.
-type MeasurementOption interface {
- AddOption
- RecordOption
- ObserveOption
-}
-
-type attrOpt struct {
- set attribute.Set
-}
-
-// mergeSets returns the union of keys between a and b. Any duplicate keys will
-// use the value associated with b.
-func mergeSets(a, b attribute.Set) attribute.Set {
- // NewMergeIterator uses the first value for any duplicates.
- iter := attribute.NewMergeIterator(&b, &a)
- merged := make([]attribute.KeyValue, 0, a.Len()+b.Len())
- for iter.Next() {
- merged = append(merged, iter.Attribute())
- }
- return attribute.NewSet(merged...)
-}
-
-func (o attrOpt) applyAdd(c AddConfig) AddConfig {
- switch {
- case o.set.Len() == 0:
- case c.attrs.Len() == 0:
- c.attrs = o.set
- default:
- c.attrs = mergeSets(c.attrs, o.set)
- }
- return c
-}
-
-func (o attrOpt) applyRecord(c RecordConfig) RecordConfig {
- switch {
- case o.set.Len() == 0:
- case c.attrs.Len() == 0:
- c.attrs = o.set
- default:
- c.attrs = mergeSets(c.attrs, o.set)
- }
- return c
-}
-
-func (o attrOpt) applyObserve(c ObserveConfig) ObserveConfig {
- switch {
- case o.set.Len() == 0:
- case c.attrs.Len() == 0:
- c.attrs = o.set
- default:
- c.attrs = mergeSets(c.attrs, o.set)
- }
- return c
-}
-
-// WithAttributeSet sets the attribute Set associated with a measurement is
-// made with.
-//
-// If multiple WithAttributeSet or WithAttributes options are passed the
-// attributes will be merged together in the order they are passed. Attributes
-// with duplicate keys will use the last value passed.
-func WithAttributeSet(attributes attribute.Set) MeasurementOption {
- return attrOpt{set: attributes}
-}
-
-// WithAttributes converts attributes into an attribute Set and sets the Set to
-// be associated with a measurement. This is shorthand for:
-//
-// cp := make([]attribute.KeyValue, len(attributes))
-// copy(cp, attributes)
-// WithAttributeSet(attribute.NewSet(cp...))
-//
-// [attribute.NewSet] may modify the passed attributes so this will make a copy
-// of attributes before creating a set in order to ensure this function is
-// concurrent safe. This makes this option function less optimized in
-// comparison to [WithAttributeSet]. Therefore, [WithAttributeSet] should be
-// preferred for performance sensitive code.
-//
-// See [WithAttributeSet] for information about how multiple WithAttributes are
-// merged.
-func WithAttributes(attributes ...attribute.KeyValue) MeasurementOption {
- cp := make([]attribute.KeyValue, len(attributes))
- copy(cp, attributes)
- return attrOpt{set: attribute.NewSet(cp...)}
-}
diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go
deleted file mode 100644
index 14e08c24a..000000000
--- a/vendor/go.opentelemetry.io/otel/metric/meter.go
+++ /dev/null
@@ -1,278 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package metric // import "go.opentelemetry.io/otel/metric"
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/metric/embedded"
-)
-
-// MeterProvider provides access to named Meter instances, for instrumenting
-// an application or package.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type MeterProvider interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.MeterProvider
-
- // Meter returns a new Meter with the provided name and configuration.
- //
- // A Meter should be scoped at most to a single package. The name needs to
- // be unique so it does not collide with other names used by
- // an application, nor other applications. To achieve this, the import path
- // of the instrumentation package is recommended to be used as name.
- //
- // If the name is empty, then an implementation defined default name will
- // be used instead.
- Meter(name string, opts ...MeterOption) Meter
-}
-
-// Meter provides access to instrument instances for recording metrics.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Meter interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Meter
-
- // Int64Counter returns a new Int64Counter instrument identified by name
- // and configured with options. The instrument is used to synchronously
- // record increasing int64 measurements during a computational operation.
- //
- // The name needs to conform to the OpenTelemetry instrument name syntax.
- // See the Instrument Name section of the package documentation for more
- // information.
- Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error)
-
- // Int64UpDownCounter returns a new Int64UpDownCounter instrument
- // identified by name and configured with options. The instrument is used
- // to synchronously record int64 measurements during a computational
- // operation.
- //
- // The name needs to conform to the OpenTelemetry instrument name syntax.
- // See the Instrument Name section of the package documentation for more
- // information.
- Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error)
-
- // Int64Histogram returns a new Int64Histogram instrument identified by
- // name and configured with options. The instrument is used to
- // synchronously record the distribution of int64 measurements during a
- // computational operation.
- //
- // The name needs to conform to the OpenTelemetry instrument name syntax.
- // See the Instrument Name section of the package documentation for more
- // information.
- Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error)
-
- // Int64Gauge returns a new Int64Gauge instrument identified by name and
- // configured with options. The instrument is used to synchronously record
- // instantaneous int64 measurements during a computational operation.
- //
- // The name needs to conform to the OpenTelemetry instrument name syntax.
- // See the Instrument Name section of the package documentation for more
- // information.
- Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error)
-
- // Int64ObservableCounter returns a new Int64ObservableCounter identified
- // by name and configured with options. The instrument is used to
- // asynchronously record increasing int64 measurements once per a
- // measurement collection cycle.
- //
- // Measurements for the returned instrument are made via a callback. Use
- // the WithInt64Callback option to register the callback here, or use the
- // RegisterCallback method of this Meter to register one later. See the
- // Measurements section of the package documentation for more information.
- //
- // The name needs to conform to the OpenTelemetry instrument name syntax.
- // See the Instrument Name section of the package documentation for more
- // information.
- Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error)
-
- // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter
- // instrument identified by name and configured with options. The
- // instrument is used to asynchronously record int64 measurements once per
- // a measurement collection cycle.
- //
- // Measurements for the returned instrument are made via a callback. Use
- // the WithInt64Callback option to register the callback here, or use the
- // RegisterCallback method of this Meter to register one later. See the
- // Measurements section of the package documentation for more information.
- //
- // The name needs to conform to the OpenTelemetry instrument name syntax.
- // See the Instrument Name section of the package documentation for more
- // information.
- Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error)
-
- // Int64ObservableGauge returns a new Int64ObservableGauge instrument
- // identified by name and configured with options. The instrument is used
- // to asynchronously record instantaneous int64 measurements once per a
- // measurement collection cycle.
- //
- // Measurements for the returned instrument are made via a callback. Use
- // the WithInt64Callback option to register the callback here, or use the
- // RegisterCallback method of this Meter to register one later. See the
- // Measurements section of the package documentation for more information.
- //
- // The name needs to conform to the OpenTelemetry instrument name syntax.
- // See the Instrument Name section of the package documentation for more
- // information.
- Int64ObservableGauge(name string, options ...Int64ObservableGaugeOption) (Int64ObservableGauge, error)
-
- // Float64Counter returns a new Float64Counter instrument identified by
- // name and configured with options. The instrument is used to
- // synchronously record increasing float64 measurements during a
- // computational operation.
- //
- // The name needs to conform to the OpenTelemetry instrument name syntax.
- // See the Instrument Name section of the package documentation for more
- // information.
- Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error)
-
- // Float64UpDownCounter returns a new Float64UpDownCounter instrument
- // identified by name and configured with options. The instrument is used
- // to synchronously record float64 measurements during a computational
- // operation.
- //
- // The name needs to conform to the OpenTelemetry instrument name syntax.
- // See the Instrument Name section of the package documentation for more
- // information.
- Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error)
-
- // Float64Histogram returns a new Float64Histogram instrument identified by
- // name and configured with options. The instrument is used to
- // synchronously record the distribution of float64 measurements during a
- // computational operation.
- //
- // The name needs to conform to the OpenTelemetry instrument name syntax.
- // See the Instrument Name section of the package documentation for more
- // information.
- Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error)
-
- // Float64Gauge returns a new Float64Gauge instrument identified by name and
- // configured with options. The instrument is used to synchronously record
- // instantaneous float64 measurements during a computational operation.
- //
- // The name needs to conform to the OpenTelemetry instrument name syntax.
- // See the Instrument Name section of the package documentation for more
- // information.
- Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error)
-
- // Float64ObservableCounter returns a new Float64ObservableCounter
- // instrument identified by name and configured with options. The
- // instrument is used to asynchronously record increasing float64
- // measurements once per a measurement collection cycle.
- //
- // Measurements for the returned instrument are made via a callback. Use
- // the WithFloat64Callback option to register the callback here, or use the
- // RegisterCallback method of this Meter to register one later. See the
- // Measurements section of the package documentation for more information.
- //
- // The name needs to conform to the OpenTelemetry instrument name syntax.
- // See the Instrument Name section of the package documentation for more
- // information.
- Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error)
-
- // Float64ObservableUpDownCounter returns a new
- // Float64ObservableUpDownCounter instrument identified by name and
- // configured with options. The instrument is used to asynchronously record
- // float64 measurements once per a measurement collection cycle.
- //
- // Measurements for the returned instrument are made via a callback. Use
- // the WithFloat64Callback option to register the callback here, or use the
- // RegisterCallback method of this Meter to register one later. See the
- // Measurements section of the package documentation for more information.
- //
- // The name needs to conform to the OpenTelemetry instrument name syntax.
- // See the Instrument Name section of the package documentation for more
- // information.
- Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error)
-
- // Float64ObservableGauge returns a new Float64ObservableGauge instrument
- // identified by name and configured with options. The instrument is used
- // to asynchronously record instantaneous float64 measurements once per a
- // measurement collection cycle.
- //
- // Measurements for the returned instrument are made via a callback. Use
- // the WithFloat64Callback option to register the callback here, or use the
- // RegisterCallback method of this Meter to register one later. See the
- // Measurements section of the package documentation for more information.
- //
- // The name needs to conform to the OpenTelemetry instrument name syntax.
- // See the Instrument Name section of the package documentation for more
- // information.
- Float64ObservableGauge(name string, options ...Float64ObservableGaugeOption) (Float64ObservableGauge, error)
-
- // RegisterCallback registers f to be called during the collection of a
- // measurement cycle.
- //
- // If Unregister of the returned Registration is called, f needs to be
- // unregistered and not called during collection.
- //
- // The instruments f is registered with are the only instruments that f may
- // observe values for.
- //
- // If no instruments are passed, f should not be registered nor called
- // during collection.
- //
- // The function f needs to be concurrent safe.
- RegisterCallback(f Callback, instruments ...Observable) (Registration, error)
-}
-
-// Callback is a function registered with a Meter that makes observations for
-// the set of instruments it is registered with. The Observer parameter is used
-// to record measurement observations for these instruments.
-//
-// The function needs to complete in a finite amount of time and the deadline
-// of the passed context is expected to be honored.
-//
-// The function needs to make unique observations across all registered
-// Callbacks. Meaning, it should not report measurements for an instrument with
-// the same attributes as another Callback will report.
-//
-// The function needs to be concurrent safe.
-type Callback func(context.Context, Observer) error
-
-// Observer records measurements for multiple instruments in a Callback.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Observer interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Observer
-
- // ObserveFloat64 records the float64 value for obsrv.
- ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption)
-
- // ObserveInt64 records the int64 value for obsrv.
- ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption)
-}
-
-// Registration is an token representing the unique registration of a callback
-// for a set of instruments with a Meter.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Registration interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Registration
-
- // Unregister removes the callback registration from a Meter.
- //
- // This method needs to be idempotent and concurrent safe.
- Unregister() error
-}
diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/README.md b/vendor/go.opentelemetry.io/otel/metric/noop/README.md
deleted file mode 100644
index bb8969435..000000000
--- a/vendor/go.opentelemetry.io/otel/metric/noop/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Metric Noop
-
-[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric/noop)](https://pkg.go.dev/go.opentelemetry.io/otel/metric/noop)
diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/noop.go b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go
deleted file mode 100644
index ca6fcbdc0..000000000
--- a/vendor/go.opentelemetry.io/otel/metric/noop/noop.go
+++ /dev/null
@@ -1,281 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Package noop provides an implementation of the OpenTelemetry metric API that
-// produces no telemetry and minimizes used computation resources.
-//
-// Using this package to implement the OpenTelemetry metric API will
-// effectively disable OpenTelemetry.
-//
-// This implementation can be embedded in other implementations of the
-// OpenTelemetry metric API. Doing so will mean the implementation defaults to
-// no operation for methods it does not implement.
-package noop // import "go.opentelemetry.io/otel/metric/noop"
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/metric/embedded"
-)
-
-var (
- // Compile-time check this implements the OpenTelemetry API.
-
- _ metric.MeterProvider = MeterProvider{}
- _ metric.Meter = Meter{}
- _ metric.Observer = Observer{}
- _ metric.Registration = Registration{}
- _ metric.Int64Counter = Int64Counter{}
- _ metric.Float64Counter = Float64Counter{}
- _ metric.Int64UpDownCounter = Int64UpDownCounter{}
- _ metric.Float64UpDownCounter = Float64UpDownCounter{}
- _ metric.Int64Histogram = Int64Histogram{}
- _ metric.Float64Histogram = Float64Histogram{}
- _ metric.Int64Gauge = Int64Gauge{}
- _ metric.Float64Gauge = Float64Gauge{}
- _ metric.Int64ObservableCounter = Int64ObservableCounter{}
- _ metric.Float64ObservableCounter = Float64ObservableCounter{}
- _ metric.Int64ObservableGauge = Int64ObservableGauge{}
- _ metric.Float64ObservableGauge = Float64ObservableGauge{}
- _ metric.Int64ObservableUpDownCounter = Int64ObservableUpDownCounter{}
- _ metric.Float64ObservableUpDownCounter = Float64ObservableUpDownCounter{}
- _ metric.Int64Observer = Int64Observer{}
- _ metric.Float64Observer = Float64Observer{}
-)
-
-// MeterProvider is an OpenTelemetry No-Op MeterProvider.
-type MeterProvider struct{ embedded.MeterProvider }
-
-// NewMeterProvider returns a MeterProvider that does not record any telemetry.
-func NewMeterProvider() MeterProvider {
- return MeterProvider{}
-}
-
-// Meter returns an OpenTelemetry Meter that does not record any telemetry.
-func (MeterProvider) Meter(string, ...metric.MeterOption) metric.Meter {
- return Meter{}
-}
-
-// Meter is an OpenTelemetry No-Op Meter.
-type Meter struct{ embedded.Meter }
-
-// Int64Counter returns a Counter used to record int64 measurements that
-// produces no telemetry.
-func (Meter) Int64Counter(string, ...metric.Int64CounterOption) (metric.Int64Counter, error) {
- return Int64Counter{}, nil
-}
-
-// Int64UpDownCounter returns an UpDownCounter used to record int64
-// measurements that produces no telemetry.
-func (Meter) Int64UpDownCounter(string, ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) {
- return Int64UpDownCounter{}, nil
-}
-
-// Int64Histogram returns a Histogram used to record int64 measurements that
-// produces no telemetry.
-func (Meter) Int64Histogram(string, ...metric.Int64HistogramOption) (metric.Int64Histogram, error) {
- return Int64Histogram{}, nil
-}
-
-// Int64Gauge returns a Gauge used to record int64 measurements that
-// produces no telemetry.
-func (Meter) Int64Gauge(string, ...metric.Int64GaugeOption) (metric.Int64Gauge, error) {
- return Int64Gauge{}, nil
-}
-
-// Int64ObservableCounter returns an ObservableCounter used to record int64
-// measurements that produces no telemetry.
-func (Meter) Int64ObservableCounter(string, ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) {
- return Int64ObservableCounter{}, nil
-}
-
-// Int64ObservableUpDownCounter returns an ObservableUpDownCounter used to
-// record int64 measurements that produces no telemetry.
-func (Meter) Int64ObservableUpDownCounter(string, ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) {
- return Int64ObservableUpDownCounter{}, nil
-}
-
-// Int64ObservableGauge returns an ObservableGauge used to record int64
-// measurements that produces no telemetry.
-func (Meter) Int64ObservableGauge(string, ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) {
- return Int64ObservableGauge{}, nil
-}
-
-// Float64Counter returns a Counter used to record int64 measurements that
-// produces no telemetry.
-func (Meter) Float64Counter(string, ...metric.Float64CounterOption) (metric.Float64Counter, error) {
- return Float64Counter{}, nil
-}
-
-// Float64UpDownCounter returns an UpDownCounter used to record int64
-// measurements that produces no telemetry.
-func (Meter) Float64UpDownCounter(string, ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) {
- return Float64UpDownCounter{}, nil
-}
-
-// Float64Histogram returns a Histogram used to record int64 measurements that
-// produces no telemetry.
-func (Meter) Float64Histogram(string, ...metric.Float64HistogramOption) (metric.Float64Histogram, error) {
- return Float64Histogram{}, nil
-}
-
-// Float64Gauge returns a Gauge used to record float64 measurements that
-// produces no telemetry.
-func (Meter) Float64Gauge(string, ...metric.Float64GaugeOption) (metric.Float64Gauge, error) {
- return Float64Gauge{}, nil
-}
-
-// Float64ObservableCounter returns an ObservableCounter used to record int64
-// measurements that produces no telemetry.
-func (Meter) Float64ObservableCounter(string, ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) {
- return Float64ObservableCounter{}, nil
-}
-
-// Float64ObservableUpDownCounter returns an ObservableUpDownCounter used to
-// record int64 measurements that produces no telemetry.
-func (Meter) Float64ObservableUpDownCounter(string, ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) {
- return Float64ObservableUpDownCounter{}, nil
-}
-
-// Float64ObservableGauge returns an ObservableGauge used to record int64
-// measurements that produces no telemetry.
-func (Meter) Float64ObservableGauge(string, ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) {
- return Float64ObservableGauge{}, nil
-}
-
-// RegisterCallback performs no operation.
-func (Meter) RegisterCallback(metric.Callback, ...metric.Observable) (metric.Registration, error) {
- return Registration{}, nil
-}
-
-// Observer acts as a recorder of measurements for multiple instruments in a
-// Callback, it performing no operation.
-type Observer struct{ embedded.Observer }
-
-// ObserveFloat64 performs no operation.
-func (Observer) ObserveFloat64(metric.Float64Observable, float64, ...metric.ObserveOption) {
-}
-
-// ObserveInt64 performs no operation.
-func (Observer) ObserveInt64(metric.Int64Observable, int64, ...metric.ObserveOption) {
-}
-
-// Registration is the registration of a Callback with a No-Op Meter.
-type Registration struct{ embedded.Registration }
-
-// Unregister unregisters the Callback the Registration represents with the
-// No-Op Meter. This will always return nil because the No-Op Meter performs no
-// operation, including hold any record of registrations.
-func (Registration) Unregister() error { return nil }
-
-// Int64Counter is an OpenTelemetry Counter used to record int64 measurements.
-// It produces no telemetry.
-type Int64Counter struct{ embedded.Int64Counter }
-
-// Add performs no operation.
-func (Int64Counter) Add(context.Context, int64, ...metric.AddOption) {}
-
-// Float64Counter is an OpenTelemetry Counter used to record float64
-// measurements. It produces no telemetry.
-type Float64Counter struct{ embedded.Float64Counter }
-
-// Add performs no operation.
-func (Float64Counter) Add(context.Context, float64, ...metric.AddOption) {}
-
-// Int64UpDownCounter is an OpenTelemetry UpDownCounter used to record int64
-// measurements. It produces no telemetry.
-type Int64UpDownCounter struct{ embedded.Int64UpDownCounter }
-
-// Add performs no operation.
-func (Int64UpDownCounter) Add(context.Context, int64, ...metric.AddOption) {}
-
-// Float64UpDownCounter is an OpenTelemetry UpDownCounter used to record
-// float64 measurements. It produces no telemetry.
-type Float64UpDownCounter struct{ embedded.Float64UpDownCounter }
-
-// Add performs no operation.
-func (Float64UpDownCounter) Add(context.Context, float64, ...metric.AddOption) {}
-
-// Int64Histogram is an OpenTelemetry Histogram used to record int64
-// measurements. It produces no telemetry.
-type Int64Histogram struct{ embedded.Int64Histogram }
-
-// Record performs no operation.
-func (Int64Histogram) Record(context.Context, int64, ...metric.RecordOption) {}
-
-// Float64Histogram is an OpenTelemetry Histogram used to record float64
-// measurements. It produces no telemetry.
-type Float64Histogram struct{ embedded.Float64Histogram }
-
-// Record performs no operation.
-func (Float64Histogram) Record(context.Context, float64, ...metric.RecordOption) {}
-
-// Int64Gauge is an OpenTelemetry Gauge used to record instantaneous int64
-// measurements. It produces no telemetry.
-type Int64Gauge struct{ embedded.Int64Gauge }
-
-// Record performs no operation.
-func (Int64Gauge) Record(context.Context, int64, ...metric.RecordOption) {}
-
-// Float64Gauge is an OpenTelemetry Gauge used to record instantaneous float64
-// measurements. It produces no telemetry.
-type Float64Gauge struct{ embedded.Float64Gauge }
-
-// Record performs no operation.
-func (Float64Gauge) Record(context.Context, float64, ...metric.RecordOption) {}
-
-// Int64ObservableCounter is an OpenTelemetry ObservableCounter used to record
-// int64 measurements. It produces no telemetry.
-type Int64ObservableCounter struct {
- metric.Int64Observable
- embedded.Int64ObservableCounter
-}
-
-// Float64ObservableCounter is an OpenTelemetry ObservableCounter used to record
-// float64 measurements. It produces no telemetry.
-type Float64ObservableCounter struct {
- metric.Float64Observable
- embedded.Float64ObservableCounter
-}
-
-// Int64ObservableGauge is an OpenTelemetry ObservableGauge used to record
-// int64 measurements. It produces no telemetry.
-type Int64ObservableGauge struct {
- metric.Int64Observable
- embedded.Int64ObservableGauge
-}
-
-// Float64ObservableGauge is an OpenTelemetry ObservableGauge used to record
-// float64 measurements. It produces no telemetry.
-type Float64ObservableGauge struct {
- metric.Float64Observable
- embedded.Float64ObservableGauge
-}
-
-// Int64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter
-// used to record int64 measurements. It produces no telemetry.
-type Int64ObservableUpDownCounter struct {
- metric.Int64Observable
- embedded.Int64ObservableUpDownCounter
-}
-
-// Float64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter
-// used to record float64 measurements. It produces no telemetry.
-type Float64ObservableUpDownCounter struct {
- metric.Float64Observable
- embedded.Float64ObservableUpDownCounter
-}
-
-// Int64Observer is a recorder of int64 measurements that performs no operation.
-type Int64Observer struct{ embedded.Int64Observer }
-
-// Observe performs no operation.
-func (Int64Observer) Observe(int64, ...metric.ObserveOption) {}
-
-// Float64Observer is a recorder of float64 measurements that performs no
-// operation.
-type Float64Observer struct{ embedded.Float64Observer }
-
-// Observe performs no operation.
-func (Float64Observer) Observe(float64, ...metric.ObserveOption) {}
diff --git a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go
deleted file mode 100644
index 8403a4bad..000000000
--- a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go
+++ /dev/null
@@ -1,226 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package metric // import "go.opentelemetry.io/otel/metric"
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/metric/embedded"
-)
-
-// Float64Counter is an instrument that records increasing float64 values.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Float64Counter interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Float64Counter
-
- // Add records a change to the counter.
- //
- // Use the WithAttributeSet (or, if performance is not a concern,
- // the WithAttributes) option to include measurement attributes.
- Add(ctx context.Context, incr float64, options ...AddOption)
-}
-
-// Float64CounterConfig contains options for synchronous counter instruments that
-// record float64 values.
-type Float64CounterConfig struct {
- description string
- unit string
-}
-
-// NewFloat64CounterConfig returns a new [Float64CounterConfig] with all opts
-// applied.
-func NewFloat64CounterConfig(opts ...Float64CounterOption) Float64CounterConfig {
- var config Float64CounterConfig
- for _, o := range opts {
- config = o.applyFloat64Counter(config)
- }
- return config
-}
-
-// Description returns the configured description.
-func (c Float64CounterConfig) Description() string {
- return c.description
-}
-
-// Unit returns the configured unit.
-func (c Float64CounterConfig) Unit() string {
- return c.unit
-}
-
-// Float64CounterOption applies options to a [Float64CounterConfig]. See
-// [InstrumentOption] for other options that can be used as a
-// Float64CounterOption.
-type Float64CounterOption interface {
- applyFloat64Counter(Float64CounterConfig) Float64CounterConfig
-}
-
-// Float64UpDownCounter is an instrument that records increasing or decreasing
-// float64 values.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Float64UpDownCounter interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Float64UpDownCounter
-
- // Add records a change to the counter.
- //
- // Use the WithAttributeSet (or, if performance is not a concern,
- // the WithAttributes) option to include measurement attributes.
- Add(ctx context.Context, incr float64, options ...AddOption)
-}
-
-// Float64UpDownCounterConfig contains options for synchronous counter
-// instruments that record float64 values.
-type Float64UpDownCounterConfig struct {
- description string
- unit string
-}
-
-// NewFloat64UpDownCounterConfig returns a new [Float64UpDownCounterConfig]
-// with all opts applied.
-func NewFloat64UpDownCounterConfig(opts ...Float64UpDownCounterOption) Float64UpDownCounterConfig {
- var config Float64UpDownCounterConfig
- for _, o := range opts {
- config = o.applyFloat64UpDownCounter(config)
- }
- return config
-}
-
-// Description returns the configured description.
-func (c Float64UpDownCounterConfig) Description() string {
- return c.description
-}
-
-// Unit returns the configured unit.
-func (c Float64UpDownCounterConfig) Unit() string {
- return c.unit
-}
-
-// Float64UpDownCounterOption applies options to a
-// [Float64UpDownCounterConfig]. See [InstrumentOption] for other options that
-// can be used as a Float64UpDownCounterOption.
-type Float64UpDownCounterOption interface {
- applyFloat64UpDownCounter(Float64UpDownCounterConfig) Float64UpDownCounterConfig
-}
-
-// Float64Histogram is an instrument that records a distribution of float64
-// values.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Float64Histogram interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Float64Histogram
-
- // Record adds an additional value to the distribution.
- //
- // Use the WithAttributeSet (or, if performance is not a concern,
- // the WithAttributes) option to include measurement attributes.
- Record(ctx context.Context, incr float64, options ...RecordOption)
-}
-
-// Float64HistogramConfig contains options for synchronous histogram
-// instruments that record float64 values.
-type Float64HistogramConfig struct {
- description string
- unit string
- explicitBucketBoundaries []float64
-}
-
-// NewFloat64HistogramConfig returns a new [Float64HistogramConfig] with all
-// opts applied.
-func NewFloat64HistogramConfig(opts ...Float64HistogramOption) Float64HistogramConfig {
- var config Float64HistogramConfig
- for _, o := range opts {
- config = o.applyFloat64Histogram(config)
- }
- return config
-}
-
-// Description returns the configured description.
-func (c Float64HistogramConfig) Description() string {
- return c.description
-}
-
-// Unit returns the configured unit.
-func (c Float64HistogramConfig) Unit() string {
- return c.unit
-}
-
-// ExplicitBucketBoundaries returns the configured explicit bucket boundaries.
-func (c Float64HistogramConfig) ExplicitBucketBoundaries() []float64 {
- return c.explicitBucketBoundaries
-}
-
-// Float64HistogramOption applies options to a [Float64HistogramConfig]. See
-// [InstrumentOption] for other options that can be used as a
-// Float64HistogramOption.
-type Float64HistogramOption interface {
- applyFloat64Histogram(Float64HistogramConfig) Float64HistogramConfig
-}
-
-// Float64Gauge is an instrument that records instantaneous float64 values.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Float64Gauge interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Float64Gauge
-
- // Record records the instantaneous value.
- //
- // Use the WithAttributeSet (or, if performance is not a concern,
- // the WithAttributes) option to include measurement attributes.
- Record(ctx context.Context, value float64, options ...RecordOption)
-}
-
-// Float64GaugeConfig contains options for synchronous gauge instruments that
-// record float64 values.
-type Float64GaugeConfig struct {
- description string
- unit string
-}
-
-// NewFloat64GaugeConfig returns a new [Float64GaugeConfig] with all opts
-// applied.
-func NewFloat64GaugeConfig(opts ...Float64GaugeOption) Float64GaugeConfig {
- var config Float64GaugeConfig
- for _, o := range opts {
- config = o.applyFloat64Gauge(config)
- }
- return config
-}
-
-// Description returns the configured description.
-func (c Float64GaugeConfig) Description() string {
- return c.description
-}
-
-// Unit returns the configured unit.
-func (c Float64GaugeConfig) Unit() string {
- return c.unit
-}
-
-// Float64GaugeOption applies options to a [Float64GaugeConfig]. See
-// [InstrumentOption] for other options that can be used as a
-// Float64GaugeOption.
-type Float64GaugeOption interface {
- applyFloat64Gauge(Float64GaugeConfig) Float64GaugeConfig
-}
diff --git a/vendor/go.opentelemetry.io/otel/metric/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/syncint64.go
deleted file mode 100644
index 783fdfba7..000000000
--- a/vendor/go.opentelemetry.io/otel/metric/syncint64.go
+++ /dev/null
@@ -1,226 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package metric // import "go.opentelemetry.io/otel/metric"
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/metric/embedded"
-)
-
-// Int64Counter is an instrument that records increasing int64 values.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Int64Counter interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Int64Counter
-
- // Add records a change to the counter.
- //
- // Use the WithAttributeSet (or, if performance is not a concern,
- // the WithAttributes) option to include measurement attributes.
- Add(ctx context.Context, incr int64, options ...AddOption)
-}
-
-// Int64CounterConfig contains options for synchronous counter instruments that
-// record int64 values.
-type Int64CounterConfig struct {
- description string
- unit string
-}
-
-// NewInt64CounterConfig returns a new [Int64CounterConfig] with all opts
-// applied.
-func NewInt64CounterConfig(opts ...Int64CounterOption) Int64CounterConfig {
- var config Int64CounterConfig
- for _, o := range opts {
- config = o.applyInt64Counter(config)
- }
- return config
-}
-
-// Description returns the configured description.
-func (c Int64CounterConfig) Description() string {
- return c.description
-}
-
-// Unit returns the configured unit.
-func (c Int64CounterConfig) Unit() string {
- return c.unit
-}
-
-// Int64CounterOption applies options to a [Int64CounterConfig]. See
-// [InstrumentOption] for other options that can be used as an
-// Int64CounterOption.
-type Int64CounterOption interface {
- applyInt64Counter(Int64CounterConfig) Int64CounterConfig
-}
-
-// Int64UpDownCounter is an instrument that records increasing or decreasing
-// int64 values.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Int64UpDownCounter interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Int64UpDownCounter
-
- // Add records a change to the counter.
- //
- // Use the WithAttributeSet (or, if performance is not a concern,
- // the WithAttributes) option to include measurement attributes.
- Add(ctx context.Context, incr int64, options ...AddOption)
-}
-
-// Int64UpDownCounterConfig contains options for synchronous counter
-// instruments that record int64 values.
-type Int64UpDownCounterConfig struct {
- description string
- unit string
-}
-
-// NewInt64UpDownCounterConfig returns a new [Int64UpDownCounterConfig] with
-// all opts applied.
-func NewInt64UpDownCounterConfig(opts ...Int64UpDownCounterOption) Int64UpDownCounterConfig {
- var config Int64UpDownCounterConfig
- for _, o := range opts {
- config = o.applyInt64UpDownCounter(config)
- }
- return config
-}
-
-// Description returns the configured description.
-func (c Int64UpDownCounterConfig) Description() string {
- return c.description
-}
-
-// Unit returns the configured unit.
-func (c Int64UpDownCounterConfig) Unit() string {
- return c.unit
-}
-
-// Int64UpDownCounterOption applies options to a [Int64UpDownCounterConfig].
-// See [InstrumentOption] for other options that can be used as an
-// Int64UpDownCounterOption.
-type Int64UpDownCounterOption interface {
- applyInt64UpDownCounter(Int64UpDownCounterConfig) Int64UpDownCounterConfig
-}
-
-// Int64Histogram is an instrument that records a distribution of int64
-// values.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Int64Histogram interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Int64Histogram
-
- // Record adds an additional value to the distribution.
- //
- // Use the WithAttributeSet (or, if performance is not a concern,
- // the WithAttributes) option to include measurement attributes.
- Record(ctx context.Context, incr int64, options ...RecordOption)
-}
-
-// Int64HistogramConfig contains options for synchronous histogram instruments
-// that record int64 values.
-type Int64HistogramConfig struct {
- description string
- unit string
- explicitBucketBoundaries []float64
-}
-
-// NewInt64HistogramConfig returns a new [Int64HistogramConfig] with all opts
-// applied.
-func NewInt64HistogramConfig(opts ...Int64HistogramOption) Int64HistogramConfig {
- var config Int64HistogramConfig
- for _, o := range opts {
- config = o.applyInt64Histogram(config)
- }
- return config
-}
-
-// Description returns the configured description.
-func (c Int64HistogramConfig) Description() string {
- return c.description
-}
-
-// Unit returns the configured unit.
-func (c Int64HistogramConfig) Unit() string {
- return c.unit
-}
-
-// ExplicitBucketBoundaries returns the configured explicit bucket boundaries.
-func (c Int64HistogramConfig) ExplicitBucketBoundaries() []float64 {
- return c.explicitBucketBoundaries
-}
-
-// Int64HistogramOption applies options to a [Int64HistogramConfig]. See
-// [InstrumentOption] for other options that can be used as an
-// Int64HistogramOption.
-type Int64HistogramOption interface {
- applyInt64Histogram(Int64HistogramConfig) Int64HistogramConfig
-}
-
-// Int64Gauge is an instrument that records instantaneous int64 values.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Int64Gauge interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Int64Gauge
-
- // Record records the instantaneous value.
- //
- // Use the WithAttributeSet (or, if performance is not a concern,
- // the WithAttributes) option to include measurement attributes.
- Record(ctx context.Context, value int64, options ...RecordOption)
-}
-
-// Int64GaugeConfig contains options for synchronous gauge instruments that
-// record int64 values.
-type Int64GaugeConfig struct {
- description string
- unit string
-}
-
-// NewInt64GaugeConfig returns a new [Int64GaugeConfig] with all opts
-// applied.
-func NewInt64GaugeConfig(opts ...Int64GaugeOption) Int64GaugeConfig {
- var config Int64GaugeConfig
- for _, o := range opts {
- config = o.applyInt64Gauge(config)
- }
- return config
-}
-
-// Description returns the configured description.
-func (c Int64GaugeConfig) Description() string {
- return c.description
-}
-
-// Unit returns the configured unit.
-func (c Int64GaugeConfig) Unit() string {
- return c.unit
-}
-
-// Int64GaugeOption applies options to a [Int64GaugeConfig]. See
-// [InstrumentOption] for other options that can be used as a
-// Int64GaugeOption.
-type Int64GaugeOption interface {
- applyInt64Gauge(Int64GaugeConfig) Int64GaugeConfig
-}
diff --git a/vendor/go.opentelemetry.io/otel/propagation.go b/vendor/go.opentelemetry.io/otel/propagation.go
deleted file mode 100644
index 2fd949733..000000000
--- a/vendor/go.opentelemetry.io/otel/propagation.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package otel // import "go.opentelemetry.io/otel"
-
-import (
- "go.opentelemetry.io/otel/internal/global"
- "go.opentelemetry.io/otel/propagation"
-)
-
-// GetTextMapPropagator returns the global TextMapPropagator. If none has been
-// set, a No-Op TextMapPropagator is returned.
-func GetTextMapPropagator() propagation.TextMapPropagator {
- return global.TextMapPropagator()
-}
-
-// SetTextMapPropagator sets propagator as the global TextMapPropagator.
-func SetTextMapPropagator(propagator propagation.TextMapPropagator) {
- global.SetTextMapPropagator(propagator)
-}
diff --git a/vendor/go.opentelemetry.io/otel/propagation/README.md b/vendor/go.opentelemetry.io/otel/propagation/README.md
deleted file mode 100644
index e2959ac74..000000000
--- a/vendor/go.opentelemetry.io/otel/propagation/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Propagation
-
-[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/propagation)](https://pkg.go.dev/go.opentelemetry.io/otel/propagation)
diff --git a/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/vendor/go.opentelemetry.io/otel/propagation/baggage.go
deleted file mode 100644
index 552263ba7..000000000
--- a/vendor/go.opentelemetry.io/otel/propagation/baggage.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package propagation // import "go.opentelemetry.io/otel/propagation"
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/baggage"
-)
-
-const baggageHeader = "baggage"
-
-// Baggage is a propagator that supports the W3C Baggage format.
-//
-// This propagates user-defined baggage associated with a trace. The complete
-// specification is defined at https://www.w3.org/TR/baggage/.
-type Baggage struct{}
-
-var _ TextMapPropagator = Baggage{}
-
-// Inject sets baggage key-values from ctx into the carrier.
-func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) {
- bStr := baggage.FromContext(ctx).String()
- if bStr != "" {
- carrier.Set(baggageHeader, bStr)
- }
-}
-
-// Extract returns a copy of parent with the baggage from the carrier added.
-func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context {
- bStr := carrier.Get(baggageHeader)
- if bStr == "" {
- return parent
- }
-
- bag, err := baggage.Parse(bStr)
- if err != nil {
- return parent
- }
- return baggage.ContextWithBaggage(parent, bag)
-}
-
-// Fields returns the keys who's values are set with Inject.
-func (b Baggage) Fields() []string {
- return []string{baggageHeader}
-}
diff --git a/vendor/go.opentelemetry.io/otel/propagation/doc.go b/vendor/go.opentelemetry.io/otel/propagation/doc.go
deleted file mode 100644
index 33a3baf15..000000000
--- a/vendor/go.opentelemetry.io/otel/propagation/doc.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-/*
-Package propagation contains OpenTelemetry context propagators.
-
-OpenTelemetry propagators are used to extract and inject context data from and
-into messages exchanged by applications. The propagator supported by this
-package is the W3C Trace Context encoding
-(https://www.w3.org/TR/trace-context/), and W3C Baggage
-(https://www.w3.org/TR/baggage/).
-*/
-package propagation // import "go.opentelemetry.io/otel/propagation"
diff --git a/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/vendor/go.opentelemetry.io/otel/propagation/propagation.go
deleted file mode 100644
index 8c8286aab..000000000
--- a/vendor/go.opentelemetry.io/otel/propagation/propagation.go
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package propagation // import "go.opentelemetry.io/otel/propagation"
-
-import (
- "context"
- "net/http"
-)
-
-// TextMapCarrier is the storage medium used by a TextMapPropagator.
-type TextMapCarrier interface {
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-
- // Get returns the value associated with the passed key.
- Get(key string) string
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-
- // Set stores the key-value pair.
- Set(key string, value string)
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-
- // Keys lists the keys stored in this carrier.
- Keys() []string
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-}
-
-// MapCarrier is a TextMapCarrier that uses a map held in memory as a storage
-// medium for propagated key-value pairs.
-type MapCarrier map[string]string
-
-// Compile time check that MapCarrier implements the TextMapCarrier.
-var _ TextMapCarrier = MapCarrier{}
-
-// Get returns the value associated with the passed key.
-func (c MapCarrier) Get(key string) string {
- return c[key]
-}
-
-// Set stores the key-value pair.
-func (c MapCarrier) Set(key, value string) {
- c[key] = value
-}
-
-// Keys lists the keys stored in this carrier.
-func (c MapCarrier) Keys() []string {
- keys := make([]string, 0, len(c))
- for k := range c {
- keys = append(keys, k)
- }
- return keys
-}
-
-// HeaderCarrier adapts http.Header to satisfy the TextMapCarrier interface.
-type HeaderCarrier http.Header
-
-// Get returns the value associated with the passed key.
-func (hc HeaderCarrier) Get(key string) string {
- return http.Header(hc).Get(key)
-}
-
-// Set stores the key-value pair.
-func (hc HeaderCarrier) Set(key string, value string) {
- http.Header(hc).Set(key, value)
-}
-
-// Keys lists the keys stored in this carrier.
-func (hc HeaderCarrier) Keys() []string {
- keys := make([]string, 0, len(hc))
- for k := range hc {
- keys = append(keys, k)
- }
- return keys
-}
-
-// TextMapPropagator propagates cross-cutting concerns as key-value text
-// pairs within a carrier that travels in-band across process boundaries.
-type TextMapPropagator interface {
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-
- // Inject set cross-cutting concerns from the Context into the carrier.
- Inject(ctx context.Context, carrier TextMapCarrier)
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-
- // Extract reads cross-cutting concerns from the carrier into a Context.
- Extract(ctx context.Context, carrier TextMapCarrier) context.Context
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-
- // Fields returns the keys whose values are set with Inject.
- Fields() []string
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-}
-
-type compositeTextMapPropagator []TextMapPropagator
-
-func (p compositeTextMapPropagator) Inject(ctx context.Context, carrier TextMapCarrier) {
- for _, i := range p {
- i.Inject(ctx, carrier)
- }
-}
-
-func (p compositeTextMapPropagator) Extract(ctx context.Context, carrier TextMapCarrier) context.Context {
- for _, i := range p {
- ctx = i.Extract(ctx, carrier)
- }
- return ctx
-}
-
-func (p compositeTextMapPropagator) Fields() []string {
- unique := make(map[string]struct{})
- for _, i := range p {
- for _, k := range i.Fields() {
- unique[k] = struct{}{}
- }
- }
-
- fields := make([]string, 0, len(unique))
- for k := range unique {
- fields = append(fields, k)
- }
- return fields
-}
-
-// NewCompositeTextMapPropagator returns a unified TextMapPropagator from the
-// group of passed TextMapPropagator. This allows different cross-cutting
-// concerns to be propagates in a unified manner.
-//
-// The returned TextMapPropagator will inject and extract cross-cutting
-// concerns in the order the TextMapPropagators were provided. Additionally,
-// the Fields method will return a de-duplicated slice of the keys that are
-// set with the Inject method.
-func NewCompositeTextMapPropagator(p ...TextMapPropagator) TextMapPropagator {
- return compositeTextMapPropagator(p)
-}
diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
deleted file mode 100644
index 6870e316d..000000000
--- a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package propagation // import "go.opentelemetry.io/otel/propagation"
-
-import (
- "context"
- "encoding/hex"
- "fmt"
- "strings"
-
- "go.opentelemetry.io/otel/trace"
-)
-
-const (
- supportedVersion = 0
- maxVersion = 254
- traceparentHeader = "traceparent"
- tracestateHeader = "tracestate"
- delimiter = "-"
-)
-
-// TraceContext is a propagator that supports the W3C Trace Context format
-// (https://www.w3.org/TR/trace-context/)
-//
-// This propagator will propagate the traceparent and tracestate headers to
-// guarantee traces are not broken. It is up to the users of this propagator
-// to choose if they want to participate in a trace by modifying the
-// traceparent header and relevant parts of the tracestate header containing
-// their proprietary information.
-type TraceContext struct{}
-
-var (
- _ TextMapPropagator = TraceContext{}
- versionPart = fmt.Sprintf("%.2X", supportedVersion)
-)
-
-// Inject injects the trace context from ctx into carrier.
-func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) {
- sc := trace.SpanContextFromContext(ctx)
- if !sc.IsValid() {
- return
- }
-
- if ts := sc.TraceState().String(); ts != "" {
- carrier.Set(tracestateHeader, ts)
- }
-
- // Clear all flags other than the trace-context supported sampling bit.
- flags := sc.TraceFlags() & trace.FlagsSampled
-
- var sb strings.Builder
- sb.Grow(2 + 32 + 16 + 2 + 3)
- _, _ = sb.WriteString(versionPart)
- traceID := sc.TraceID()
- spanID := sc.SpanID()
- flagByte := [1]byte{byte(flags)}
- var buf [32]byte
- for _, src := range [][]byte{traceID[:], spanID[:], flagByte[:]} {
- _ = sb.WriteByte(delimiter[0])
- n := hex.Encode(buf[:], src)
- _, _ = sb.Write(buf[:n])
- }
- carrier.Set(traceparentHeader, sb.String())
-}
-
-// Extract reads tracecontext from the carrier into a returned Context.
-//
-// The returned Context will be a copy of ctx and contain the extracted
-// tracecontext as the remote SpanContext. If the extracted tracecontext is
-// invalid, the passed ctx will be returned directly instead.
-func (tc TraceContext) Extract(ctx context.Context, carrier TextMapCarrier) context.Context {
- sc := tc.extract(carrier)
- if !sc.IsValid() {
- return ctx
- }
- return trace.ContextWithRemoteSpanContext(ctx, sc)
-}
-
-func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext {
- h := carrier.Get(traceparentHeader)
- if h == "" {
- return trace.SpanContext{}
- }
-
- var ver [1]byte
- if !extractPart(ver[:], &h, 2) {
- return trace.SpanContext{}
- }
- version := int(ver[0])
- if version > maxVersion {
- return trace.SpanContext{}
- }
-
- var scc trace.SpanContextConfig
- if !extractPart(scc.TraceID[:], &h, 32) {
- return trace.SpanContext{}
- }
- if !extractPart(scc.SpanID[:], &h, 16) {
- return trace.SpanContext{}
- }
-
- var opts [1]byte
- if !extractPart(opts[:], &h, 2) {
- return trace.SpanContext{}
- }
- if version == 0 && (h != "" || opts[0] > 2) {
- // version 0 not allow extra
- // version 0 not allow other flag
- return trace.SpanContext{}
- }
-
- // Clear all flags other than the trace-context supported sampling bit.
- scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled
-
- // Ignore the error returned here. Failure to parse tracestate MUST NOT
- // affect the parsing of traceparent according to the W3C tracecontext
- // specification.
- scc.TraceState, _ = trace.ParseTraceState(carrier.Get(tracestateHeader))
- scc.Remote = true
-
- sc := trace.NewSpanContext(scc)
- if !sc.IsValid() {
- return trace.SpanContext{}
- }
-
- return sc
-}
-
-// upperHex detect hex is upper case Unicode characters.
-func upperHex(v string) bool {
- for _, c := range v {
- if c >= 'A' && c <= 'F' {
- return true
- }
- }
- return false
-}
-
-func extractPart(dst []byte, h *string, n int) bool {
- part, left, _ := strings.Cut(*h, delimiter)
- *h = left
- // hex.Decode decodes unsupported upper-case characters, so exclude explicitly.
- if len(part) != n || upperHex(part) {
- return false
- }
- if p, err := hex.Decode(dst, []byte(part)); err != nil || p != n/2 {
- return false
- }
- return true
-}
-
-// Fields returns the keys who's values are set with Inject.
-func (tc TraceContext) Fields() []string {
- return []string{traceparentHeader, tracestateHeader}
-}
diff --git a/vendor/go.opentelemetry.io/otel/renovate.json b/vendor/go.opentelemetry.io/otel/renovate.json
deleted file mode 100644
index 4f80c898a..000000000
--- a/vendor/go.opentelemetry.io/otel/renovate.json
+++ /dev/null
@@ -1,26 +0,0 @@
-{
- "$schema": "https://docs.renovatebot.com/renovate-schema.json",
- "extends": [
- "config:recommended"
- ],
- "ignorePaths": [],
- "labels": ["Skip Changelog", "dependencies"],
- "postUpdateOptions" : [
- "gomodTidy"
- ],
- "packageRules": [
- {
- "matchManagers": ["gomod"],
- "matchDepTypes": ["indirect"],
- "enabled": true
- },
- {
- "matchPackageNames": ["google.golang.org/genproto/googleapis/**"],
- "groupName": "googleapis"
- },
- {
- "matchPackageNames": ["golang.org/x/**"],
- "groupName": "golang.org/x"
- }
- ]
-}
diff --git a/vendor/go.opentelemetry.io/otel/requirements.txt b/vendor/go.opentelemetry.io/otel/requirements.txt
deleted file mode 100644
index ab09daf9d..000000000
--- a/vendor/go.opentelemetry.io/otel/requirements.txt
+++ /dev/null
@@ -1 +0,0 @@
-codespell==2.3.0
diff --git a/vendor/go.opentelemetry.io/otel/sdk/LICENSE b/vendor/go.opentelemetry.io/otel/sdk/LICENSE
deleted file mode 100644
index 261eeb9e9..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/README.md b/vendor/go.opentelemetry.io/otel/sdk/README.md
deleted file mode 100644
index f81b1576a..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# SDK
-
-[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md
deleted file mode 100644
index 06e6d8685..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# SDK Instrumentation
-
-[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/instrumentation)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/instrumentation)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go
deleted file mode 100644
index a4faa6a03..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Package instrumentation provides types to represent the code libraries that
-// provide OpenTelemetry instrumentation. These types are used in the
-// OpenTelemetry signal pipelines to identify the source of telemetry.
-//
-// See
-// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0083-component.md
-// and
-// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0201-scope-attributes.md
-// for more information.
-package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go
deleted file mode 100644
index f2cdf3c65..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
-
-// Library represents the instrumentation library.
-//
-// Deprecated: use [Scope] instead.
-type Library = Scope
diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go
deleted file mode 100644
index 34852a47b..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// Scope represents the instrumentation scope.
-type Scope struct {
- // Name is the name of the instrumentation scope. This should be the
- // Go package name of that scope.
- Name string
- // Version is the version of the instrumentation scope.
- Version string
- // SchemaURL of the telemetry emitted by the scope.
- SchemaURL string
- // Attributes of the telemetry emitted by the scope.
- Attributes attribute.Set
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go
deleted file mode 100644
index 07923ed8d..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go
+++ /dev/null
@@ -1,166 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package env // import "go.opentelemetry.io/otel/sdk/internal/env"
-
-import (
- "os"
- "strconv"
-
- "go.opentelemetry.io/otel/internal/global"
-)
-
-// Environment variable names.
-const (
- // BatchSpanProcessorScheduleDelayKey is the delay interval between two
- // consecutive exports (i.e. 5000).
- BatchSpanProcessorScheduleDelayKey = "OTEL_BSP_SCHEDULE_DELAY"
- // BatchSpanProcessorExportTimeoutKey is the maximum allowed time to
- // export data (i.e. 3000).
- BatchSpanProcessorExportTimeoutKey = "OTEL_BSP_EXPORT_TIMEOUT"
- // BatchSpanProcessorMaxQueueSizeKey is the maximum queue size (i.e. 2048).
- BatchSpanProcessorMaxQueueSizeKey = "OTEL_BSP_MAX_QUEUE_SIZE"
- // BatchSpanProcessorMaxExportBatchSizeKey is the maximum batch size (i.e.
- // 512). Note: it must be less than or equal to
- // BatchSpanProcessorMaxQueueSize.
- BatchSpanProcessorMaxExportBatchSizeKey = "OTEL_BSP_MAX_EXPORT_BATCH_SIZE"
-
- // AttributeValueLengthKey is the maximum allowed attribute value size.
- AttributeValueLengthKey = "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT"
-
- // AttributeCountKey is the maximum allowed span attribute count.
- AttributeCountKey = "OTEL_ATTRIBUTE_COUNT_LIMIT"
-
- // SpanAttributeValueLengthKey is the maximum allowed attribute value size
- // for a span.
- SpanAttributeValueLengthKey = "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT"
-
- // SpanAttributeCountKey is the maximum allowed span attribute count for a
- // span.
- SpanAttributeCountKey = "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT"
-
- // SpanEventCountKey is the maximum allowed span event count.
- SpanEventCountKey = "OTEL_SPAN_EVENT_COUNT_LIMIT"
-
- // SpanEventAttributeCountKey is the maximum allowed attribute per span
- // event count.
- SpanEventAttributeCountKey = "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"
-
- // SpanLinkCountKey is the maximum allowed span link count.
- SpanLinkCountKey = "OTEL_SPAN_LINK_COUNT_LIMIT"
-
- // SpanLinkAttributeCountKey is the maximum allowed attribute per span
- // link count.
- SpanLinkAttributeCountKey = "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"
-)
-
-// firstInt returns the value of the first matching environment variable from
-// keys. If the value is not an integer or no match is found, defaultValue is
-// returned.
-func firstInt(defaultValue int, keys ...string) int {
- for _, key := range keys {
- value := os.Getenv(key)
- if value == "" {
- continue
- }
-
- intValue, err := strconv.Atoi(value)
- if err != nil {
- global.Info("Got invalid value, number value expected.", key, value)
- return defaultValue
- }
-
- return intValue
- }
-
- return defaultValue
-}
-
-// IntEnvOr returns the int value of the environment variable with name key if
-// it exists, it is not empty, and the value is an int. Otherwise, defaultValue is returned.
-func IntEnvOr(key string, defaultValue int) int {
- value := os.Getenv(key)
- if value == "" {
- return defaultValue
- }
-
- intValue, err := strconv.Atoi(value)
- if err != nil {
- global.Info("Got invalid value, number value expected.", key, value)
- return defaultValue
- }
-
- return intValue
-}
-
-// BatchSpanProcessorScheduleDelay returns the environment variable value for
-// the OTEL_BSP_SCHEDULE_DELAY key if it exists, otherwise defaultValue is
-// returned.
-func BatchSpanProcessorScheduleDelay(defaultValue int) int {
- return IntEnvOr(BatchSpanProcessorScheduleDelayKey, defaultValue)
-}
-
-// BatchSpanProcessorExportTimeout returns the environment variable value for
-// the OTEL_BSP_EXPORT_TIMEOUT key if it exists, otherwise defaultValue is
-// returned.
-func BatchSpanProcessorExportTimeout(defaultValue int) int {
- return IntEnvOr(BatchSpanProcessorExportTimeoutKey, defaultValue)
-}
-
-// BatchSpanProcessorMaxQueueSize returns the environment variable value for
-// the OTEL_BSP_MAX_QUEUE_SIZE key if it exists, otherwise defaultValue is
-// returned.
-func BatchSpanProcessorMaxQueueSize(defaultValue int) int {
- return IntEnvOr(BatchSpanProcessorMaxQueueSizeKey, defaultValue)
-}
-
-// BatchSpanProcessorMaxExportBatchSize returns the environment variable value for
-// the OTEL_BSP_MAX_EXPORT_BATCH_SIZE key if it exists, otherwise defaultValue
-// is returned.
-func BatchSpanProcessorMaxExportBatchSize(defaultValue int) int {
- return IntEnvOr(BatchSpanProcessorMaxExportBatchSizeKey, defaultValue)
-}
-
-// SpanAttributeValueLength returns the environment variable value for the
-// OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the
-// environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT is
-// returned or defaultValue if that is not set.
-func SpanAttributeValueLength(defaultValue int) int {
- return firstInt(defaultValue, SpanAttributeValueLengthKey, AttributeValueLengthKey)
-}
-
-// SpanAttributeCount returns the environment variable value for the
-// OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the
-// environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT is returned or
-// defaultValue if that is not set.
-func SpanAttributeCount(defaultValue int) int {
- return firstInt(defaultValue, SpanAttributeCountKey, AttributeCountKey)
-}
-
-// SpanEventCount returns the environment variable value for the
-// OTEL_SPAN_EVENT_COUNT_LIMIT key if it exists, otherwise defaultValue is
-// returned.
-func SpanEventCount(defaultValue int) int {
- return IntEnvOr(SpanEventCountKey, defaultValue)
-}
-
-// SpanEventAttributeCount returns the environment variable value for the
-// OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key if it exists, otherwise defaultValue
-// is returned.
-func SpanEventAttributeCount(defaultValue int) int {
- return IntEnvOr(SpanEventAttributeCountKey, defaultValue)
-}
-
-// SpanLinkCount returns the environment variable value for the
-// OTEL_SPAN_LINK_COUNT_LIMIT key if it exists, otherwise defaultValue is
-// returned.
-func SpanLinkCount(defaultValue int) int {
- return IntEnvOr(SpanLinkCountKey, defaultValue)
-}
-
-// SpanLinkAttributeCount returns the environment variable value for the
-// OTEL_LINK_ATTRIBUTE_COUNT_LIMIT key if it exists, otherwise defaultValue is
-// returned.
-func SpanLinkAttributeCount(defaultValue int) int {
- return IntEnvOr(SpanLinkAttributeCountKey, defaultValue)
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md b/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md
deleted file mode 100644
index fab61647c..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md
+++ /dev/null
@@ -1,46 +0,0 @@
-# Experimental Features
-
-The SDK contains features that have not yet stabilized in the OpenTelemetry specification.
-These features are added to the OpenTelemetry Go SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback.
-
-These feature may change in backwards incompatible ways as feedback is applied.
-See the [Compatibility and Stability](#compatibility-and-stability) section for more information.
-
-## Features
-
-- [Resource](#resource)
-
-### Resource
-
-[OpenTelemetry resource semantic conventions] include many attribute definitions that are defined as experimental.
-To have experimental semantic conventions be added by [resource detectors] set the `OTEL_GO_X_RESOURCE` environment variable.
-The value set must be the case-insensitive string of `"true"` to enable the feature.
-All other values are ignored.
-
-<!-- TODO: document what attributes are added by which detector -->
-
-[OpenTelemetry resource semantic conventions]: https://opentelemetry.io/docs/specs/semconv/resource/
-[resource detectors]: https://pkg.go.dev/go.opentelemetry.io/otel/sdk/resource#Detector
-
-#### Examples
-
-Enable experimental resource semantic conventions.
-
-```console
-export OTEL_GO_X_RESOURCE=true
-```
-
-Disable experimental resource semantic conventions.
-
-```console
-unset OTEL_GO_X_RESOURCE
-```
-
-## Compatibility and Stability
-
-Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../VERSIONING.md).
-These features may be removed or modified in successive version releases, including patch versions.
-
-When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release.
-There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version.
-If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go
deleted file mode 100644
index 68d296cbe..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Package x contains support for OTel SDK experimental features.
-//
-// This package should only be used for features defined in the specification.
-// It should not be used for experiments or new project ideas.
-package x // import "go.opentelemetry.io/otel/sdk/internal/x"
-
-import (
- "os"
- "strings"
-)
-
-// Resource is an experimental feature flag that defines if resource detectors
-// should be included experimental semantic conventions.
-//
-// To enable this feature set the OTEL_GO_X_RESOURCE environment variable
-// to the case-insensitive string value of "true" (i.e. "True" and "TRUE"
-// will also enable this).
-var Resource = newFeature("RESOURCE", func(v string) (string, bool) {
- if strings.ToLower(v) == "true" {
- return v, true
- }
- return "", false
-})
-
-// Feature is an experimental feature control flag. It provides a uniform way
-// to interact with these feature flags and parse their values.
-type Feature[T any] struct {
- key string
- parse func(v string) (T, bool)
-}
-
-func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] {
- const envKeyRoot = "OTEL_GO_X_"
- return Feature[T]{
- key: envKeyRoot + suffix,
- parse: parse,
- }
-}
-
-// Key returns the environment variable key that needs to be set to enable the
-// feature.
-func (f Feature[T]) Key() string { return f.key }
-
-// Lookup returns the user configured value for the feature and true if the
-// user has enabled the feature. Otherwise, if the feature is not enabled, a
-// zero-value and false are returned.
-func (f Feature[T]) Lookup() (v T, ok bool) {
- // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value
- //
- // > The SDK MUST interpret an empty value of an environment variable the
- // > same way as when the variable is unset.
- vRaw := os.Getenv(f.key)
- if vRaw == "" {
- return v, ok
- }
- return f.parse(vRaw)
-}
-
-// Enabled returns if the feature is enabled.
-func (f Feature[T]) Enabled() bool {
- _, ok := f.Lookup()
- return ok
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE b/vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE
deleted file mode 100644
index 261eeb9e9..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/README.md b/vendor/go.opentelemetry.io/otel/sdk/metric/README.md
deleted file mode 100644
index 017f072a5..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Metric SDK
-
-[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/metric)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/aggregation.go b/vendor/go.opentelemetry.io/otel/sdk/metric/aggregation.go
deleted file mode 100644
index e6f5cfb2a..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/aggregation.go
+++ /dev/null
@@ -1,189 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package metric // import "go.opentelemetry.io/otel/sdk/metric"
-
-import (
- "errors"
- "fmt"
- "slices"
-)
-
-// errAgg is wrapped by misconfigured aggregations.
-var errAgg = errors.New("aggregation")
-
-// Aggregation is the aggregation used to summarize recorded measurements.
-type Aggregation interface {
- // copy returns a deep copy of the Aggregation.
- copy() Aggregation
-
- // err returns an error for any misconfigured Aggregation.
- err() error
-}
-
-// AggregationDrop is an Aggregation that drops all recorded data.
-type AggregationDrop struct{} // AggregationDrop has no parameters.
-
-var _ Aggregation = AggregationDrop{}
-
-// copy returns a deep copy of d.
-func (d AggregationDrop) copy() Aggregation { return d }
-
-// err returns an error for any misconfiguration. A drop aggregation has no
-// parameters and cannot be misconfigured, therefore this always returns nil.
-func (AggregationDrop) err() error { return nil }
-
-// AggregationDefault is an Aggregation that uses the default instrument kind selection
-// mapping to select another Aggregation. A metric reader can be configured to
-// make an aggregation selection based on instrument kind that differs from
-// the default. This Aggregation ensures the default is used.
-//
-// See the [DefaultAggregationSelector] for information about the default
-// instrument kind selection mapping.
-type AggregationDefault struct{} // AggregationDefault has no parameters.
-
-var _ Aggregation = AggregationDefault{}
-
-// copy returns a deep copy of d.
-func (d AggregationDefault) copy() Aggregation { return d }
-
-// err returns an error for any misconfiguration. A default aggregation has no
-// parameters and cannot be misconfigured, therefore this always returns nil.
-func (AggregationDefault) err() error { return nil }
-
-// AggregationSum is an Aggregation that summarizes a set of measurements as their
-// arithmetic sum.
-type AggregationSum struct{} // AggregationSum has no parameters.
-
-var _ Aggregation = AggregationSum{}
-
-// copy returns a deep copy of s.
-func (s AggregationSum) copy() Aggregation { return s }
-
-// err returns an error for any misconfiguration. A sum aggregation has no
-// parameters and cannot be misconfigured, therefore this always returns nil.
-func (AggregationSum) err() error { return nil }
-
-// AggregationLastValue is an Aggregation that summarizes a set of measurements as the
-// last one made.
-type AggregationLastValue struct{} // AggregationLastValue has no parameters.
-
-var _ Aggregation = AggregationLastValue{}
-
-// copy returns a deep copy of l.
-func (l AggregationLastValue) copy() Aggregation { return l }
-
-// err returns an error for any misconfiguration. A last-value aggregation has
-// no parameters and cannot be misconfigured, therefore this always returns
-// nil.
-func (AggregationLastValue) err() error { return nil }
-
-// AggregationExplicitBucketHistogram is an Aggregation that summarizes a set of
-// measurements as an histogram with explicitly defined buckets.
-type AggregationExplicitBucketHistogram struct {
- // Boundaries are the increasing bucket boundary values. Boundary values
- // define bucket upper bounds. Buckets are exclusive of their lower
- // boundary and inclusive of their upper bound (except at positive
- // infinity). A measurement is defined to fall into the greatest-numbered
- // bucket with a boundary that is greater than or equal to the
- // measurement. As an example, boundaries defined as:
- //
- // []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 1000}
- //
- // Will define these buckets:
- //
- // (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, 25.0], (25.0, 50.0],
- // (50.0, 75.0], (75.0, 100.0], (100.0, 250.0], (250.0, 500.0],
- // (500.0, 1000.0], (1000.0, +∞)
- Boundaries []float64
- // NoMinMax indicates whether to not record the min and max of the
- // distribution. By default, these extrema are recorded.
- //
- // Recording these extrema for cumulative data is expected to have little
- // value, they will represent the entire life of the instrument instead of
- // just the current collection cycle. It is recommended to set this to true
- // for that type of data to avoid computing the low-value extrema.
- NoMinMax bool
-}
-
-var _ Aggregation = AggregationExplicitBucketHistogram{}
-
-// errHist is returned by misconfigured ExplicitBucketHistograms.
-var errHist = fmt.Errorf("%w: explicit bucket histogram", errAgg)
-
-// err returns an error for any misconfiguration.
-func (h AggregationExplicitBucketHistogram) err() error {
- if len(h.Boundaries) <= 1 {
- return nil
- }
-
- // Check boundaries are monotonic.
- i := h.Boundaries[0]
- for _, j := range h.Boundaries[1:] {
- if i >= j {
- return fmt.Errorf("%w: non-monotonic boundaries: %v", errHist, h.Boundaries)
- }
- i = j
- }
-
- return nil
-}
-
-// copy returns a deep copy of h.
-func (h AggregationExplicitBucketHistogram) copy() Aggregation {
- return AggregationExplicitBucketHistogram{
- Boundaries: slices.Clone(h.Boundaries),
- NoMinMax: h.NoMinMax,
- }
-}
-
-// AggregationBase2ExponentialHistogram is an Aggregation that summarizes a set of
-// measurements as an histogram with bucket widths that grow exponentially.
-type AggregationBase2ExponentialHistogram struct {
- // MaxSize is the maximum number of buckets to use for the histogram.
- MaxSize int32
- // MaxScale is the maximum resolution scale to use for the histogram.
- //
- // MaxScale has a maximum value of 20. Using a value of 20 means the
- // maximum number of buckets that can fit within the range of a
- // signed 32-bit integer index could be used.
- //
- // MaxScale has a minimum value of -10. Using a value of -10 means only
- // two buckets will be used.
- MaxScale int32
-
- // NoMinMax indicates whether to not record the min and max of the
- // distribution. By default, these extrema are recorded.
- //
- // Recording these extrema for cumulative data is expected to have little
- // value, they will represent the entire life of the instrument instead of
- // just the current collection cycle. It is recommended to set this to true
- // for that type of data to avoid computing the low-value extrema.
- NoMinMax bool
-}
-
-var _ Aggregation = AggregationBase2ExponentialHistogram{}
-
-// copy returns a deep copy of the Aggregation.
-func (e AggregationBase2ExponentialHistogram) copy() Aggregation {
- return e
-}
-
-const (
- expoMaxScale = 20
- expoMinScale = -10
-)
-
-// errExpoHist is returned by misconfigured Base2ExponentialBucketHistograms.
-var errExpoHist = fmt.Errorf("%w: exponential histogram", errAgg)
-
-// err returns an error for any misconfigured Aggregation.
-func (e AggregationBase2ExponentialHistogram) err() error {
- if e.MaxScale > expoMaxScale {
- return fmt.Errorf("%w: max size %d is greater than maximum scale %d", errExpoHist, e.MaxSize, expoMaxScale)
- }
- if e.MaxSize <= 0 {
- return fmt.Errorf("%w: max size %d is less than or equal to zero", errExpoHist, e.MaxSize)
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/cache.go b/vendor/go.opentelemetry.io/otel/sdk/metric/cache.go
deleted file mode 100644
index 63b88f086..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/cache.go
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package metric // import "go.opentelemetry.io/otel/sdk/metric"
-
-import (
- "sync"
-)
-
-// cache is a locking storage used to quickly return already computed values.
-//
-// The zero value of a cache is empty and ready to use.
-//
-// A cache must not be copied after first use.
-//
-// All methods of a cache are safe to call concurrently.
-type cache[K comparable, V any] struct {
- sync.Mutex
- data map[K]V
-}
-
-// Lookup returns the value stored in the cache with the associated key if it
-// exists. Otherwise, f is called and its returned value is set in the cache
-// for key and returned.
-//
-// Lookup is safe to call concurrently. It will hold the cache lock, so f
-// should not block excessively.
-func (c *cache[K, V]) Lookup(key K, f func() V) V {
- c.Lock()
- defer c.Unlock()
-
- if c.data == nil {
- val := f()
- c.data = map[K]V{key: val}
- return val
- }
- if v, ok := c.data[key]; ok {
- return v
- }
- val := f()
- c.data[key] = val
- return val
-}
-
-// HasKey returns true if Lookup has previously been called with that key
-//
-// HasKey is safe to call concurrently.
-func (c *cache[K, V]) HasKey(key K) bool {
- c.Lock()
- defer c.Unlock()
- _, ok := c.data[key]
- return ok
-}
-
-// cacheWithErr is a locking storage used to quickly return already computed values and an error.
-//
-// The zero value of a cacheWithErr is empty and ready to use.
-//
-// A cacheWithErr must not be copied after first use.
-//
-// All methods of a cacheWithErr are safe to call concurrently.
-type cacheWithErr[K comparable, V any] struct {
- cache[K, valAndErr[V]]
-}
-
-type valAndErr[V any] struct {
- val V
- err error
-}
-
-// Lookup returns the value stored in the cacheWithErr with the associated key
-// if it exists. Otherwise, f is called and its returned value is set in the
-// cacheWithErr for key and returned.
-//
-// Lookup is safe to call concurrently. It will hold the cacheWithErr lock, so f
-// should not block excessively.
-func (c *cacheWithErr[K, V]) Lookup(key K, f func() (V, error)) (V, error) {
- combined := c.cache.Lookup(key, func() valAndErr[V] {
- val, err := f()
- return valAndErr[V]{val: val, err: err}
- })
- return combined.val, combined.err
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/config.go b/vendor/go.opentelemetry.io/otel/sdk/metric/config.go
deleted file mode 100644
index 203cd9d65..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/config.go
+++ /dev/null
@@ -1,172 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package metric // import "go.opentelemetry.io/otel/sdk/metric"
-
-import (
- "context"
- "errors"
- "os"
- "strings"
- "sync"
-
- "go.opentelemetry.io/otel"
- "go.opentelemetry.io/otel/sdk/metric/exemplar"
- "go.opentelemetry.io/otel/sdk/resource"
-)
-
-// config contains configuration options for a MeterProvider.
-type config struct {
- res *resource.Resource
- readers []Reader
- views []View
- exemplarFilter exemplar.Filter
-}
-
-// readerSignals returns a force-flush and shutdown function for a
-// MeterProvider to call in their respective options. All Readers c contains
-// will have their force-flush and shutdown methods unified into returned
-// single functions.
-func (c config) readerSignals() (forceFlush, shutdown func(context.Context) error) {
- var fFuncs, sFuncs []func(context.Context) error
- for _, r := range c.readers {
- sFuncs = append(sFuncs, r.Shutdown)
- if f, ok := r.(interface{ ForceFlush(context.Context) error }); ok {
- fFuncs = append(fFuncs, f.ForceFlush)
- }
- }
-
- return unify(fFuncs), unifyShutdown(sFuncs)
-}
-
-// unify unifies calling all of funcs into a single function call. All errors
-// returned from calls to funcs will be unify into a single error return
-// value.
-func unify(funcs []func(context.Context) error) func(context.Context) error {
- return func(ctx context.Context) error {
- var err error
- for _, f := range funcs {
- if e := f(ctx); e != nil {
- err = errors.Join(err, e)
- }
- }
- return err
- }
-}
-
-// unifyShutdown unifies calling all of funcs once for a shutdown. If called
-// more than once, an ErrReaderShutdown error is returned.
-func unifyShutdown(funcs []func(context.Context) error) func(context.Context) error {
- f := unify(funcs)
- var once sync.Once
- return func(ctx context.Context) error {
- err := ErrReaderShutdown
- once.Do(func() { err = f(ctx) })
- return err
- }
-}
-
-// newConfig returns a config configured with options.
-func newConfig(options []Option) config {
- conf := config{
- res: resource.Default(),
- exemplarFilter: exemplar.TraceBasedFilter,
- }
- for _, o := range meterProviderOptionsFromEnv() {
- conf = o.apply(conf)
- }
- for _, o := range options {
- conf = o.apply(conf)
- }
- return conf
-}
-
-// Option applies a configuration option value to a MeterProvider.
-type Option interface {
- apply(config) config
-}
-
-// optionFunc applies a set of options to a config.
-type optionFunc func(config) config
-
-// apply returns a config with option(s) applied.
-func (o optionFunc) apply(conf config) config {
- return o(conf)
-}
-
-// WithResource associates a Resource with a MeterProvider. This Resource
-// represents the entity producing telemetry and is associated with all Meters
-// the MeterProvider will create.
-//
-// By default, if this Option is not used, the default Resource from the
-// go.opentelemetry.io/otel/sdk/resource package will be used.
-func WithResource(res *resource.Resource) Option {
- return optionFunc(func(conf config) config {
- var err error
- conf.res, err = resource.Merge(resource.Environment(), res)
- if err != nil {
- otel.Handle(err)
- }
- return conf
- })
-}
-
-// WithReader associates Reader r with a MeterProvider.
-//
-// By default, if this option is not used, the MeterProvider will perform no
-// operations; no data will be exported without a Reader.
-func WithReader(r Reader) Option {
- return optionFunc(func(cfg config) config {
- if r == nil {
- return cfg
- }
- cfg.readers = append(cfg.readers, r)
- return cfg
- })
-}
-
-// WithView associates views with a MeterProvider.
-//
-// Views are appended to existing ones in a MeterProvider if this option is
-// used multiple times.
-//
-// By default, if this option is not used, the MeterProvider will use the
-// default view.
-func WithView(views ...View) Option {
- return optionFunc(func(cfg config) config {
- cfg.views = append(cfg.views, views...)
- return cfg
- })
-}
-
-// WithExemplarFilter configures the exemplar filter.
-//
-// The exemplar filter determines which measurements are offered to the
-// exemplar reservoir, but the exemplar reservoir makes the final decision of
-// whether to store an exemplar.
-//
-// By default, the [exemplar.SampledFilter]
-// is used. Exemplars can be entirely disabled by providing the
-// [exemplar.AlwaysOffFilter].
-func WithExemplarFilter(filter exemplar.Filter) Option {
- return optionFunc(func(cfg config) config {
- cfg.exemplarFilter = filter
- return cfg
- })
-}
-
-func meterProviderOptionsFromEnv() []Option {
- var opts []Option
- // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/configuration/sdk-environment-variables.md#exemplar
- const filterEnvKey = "OTEL_METRICS_EXEMPLAR_FILTER"
-
- switch strings.ToLower(strings.TrimSpace(os.Getenv(filterEnvKey))) {
- case "always_on":
- opts = append(opts, WithExemplarFilter(exemplar.AlwaysOnFilter))
- case "always_off":
- opts = append(opts, WithExemplarFilter(exemplar.AlwaysOffFilter))
- case "trace_based":
- opts = append(opts, WithExemplarFilter(exemplar.TraceBasedFilter))
- }
- return opts
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go
deleted file mode 100644
index 90a4ae16c..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Package metric provides an implementation of the OpenTelemetry metrics SDK.
-//
-// See https://opentelemetry.io/docs/concepts/signals/metrics/ for information
-// about the concept of OpenTelemetry metrics and
-// https://opentelemetry.io/docs/concepts/components/ for more information
-// about OpenTelemetry SDKs.
-//
-// The entry point for the metric package is the MeterProvider. It is the
-// object that all API calls use to create Meters, instruments, and ultimately
-// make metric measurements. Also, it is an object that should be used to
-// control the life-cycle (start, flush, and shutdown) of the SDK.
-//
-// A MeterProvider needs to be configured to export the measured data, this is
-// done by configuring it with a Reader implementation (using the WithReader
-// MeterProviderOption). Readers take two forms: ones that push to an endpoint
-// (NewPeriodicReader), and ones that an endpoint pulls from. See
-// [go.opentelemetry.io/otel/exporters] for exporters that can be used as
-// or with these Readers.
-//
-// Each Reader, when registered with the MeterProvider, can be augmented with a
-// View. Views allow users that run OpenTelemetry instrumented code to modify
-// the generated data of that instrumentation.
-//
-// The data generated by a MeterProvider needs to include information about its
-// origin. A MeterProvider needs to be configured with a Resource, using the
-// WithResource MeterProviderOption, to include this information. This Resource
-// should be used to describe the unique runtime environment instrumented code
-// is being run on. That way when multiple instances of the code are collected
-// at a single endpoint their origin is decipherable.
-//
-// To avoid leaking memory, the SDK returns the same instrument for calls to
-// create new instruments with the same Name, Unit, and Description.
-// Importantly, callbacks provided using metric.WithFloat64Callback or
-// metric.WithInt64Callback will only apply for the first instrument created
-// with a given Name, Unit, and Description. Instead, use
-// Meter.RegisterCallback and Registration.Unregister to add and remove
-// callbacks without leaking memory.
-//
-// See [go.opentelemetry.io/otel/metric] for more information about
-// the metric API.
-//
-// See [go.opentelemetry.io/otel/sdk/metric/internal/x] for information about
-// the experimental features.
-package metric // import "go.opentelemetry.io/otel/sdk/metric"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/env.go b/vendor/go.opentelemetry.io/otel/sdk/metric/env.go
deleted file mode 100644
index a6c403797..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/env.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package metric // import "go.opentelemetry.io/otel/sdk/metric"
-
-import (
- "os"
- "strconv"
- "time"
-
- "go.opentelemetry.io/otel/internal/global"
-)
-
-// Environment variable names.
-const (
- // The time interval (in milliseconds) between the start of two export attempts.
- envInterval = "OTEL_METRIC_EXPORT_INTERVAL"
- // Maximum allowed time (in milliseconds) to export data.
- envTimeout = "OTEL_METRIC_EXPORT_TIMEOUT"
-)
-
-// envDuration returns an environment variable's value as duration in milliseconds if it is exists,
-// or the defaultValue if the environment variable is not defined or the value is not valid.
-func envDuration(key string, defaultValue time.Duration) time.Duration {
- v := os.Getenv(key)
- if v == "" {
- return defaultValue
- }
- d, err := strconv.Atoi(v)
- if err != nil {
- global.Error(err, "parse duration", "environment variable", key, "value", v)
- return defaultValue
- }
- if d <= 0 {
- global.Error(errNonPositiveDuration, "non-positive duration", "environment variable", key, "value", v)
- return defaultValue
- }
- return time.Duration(d) * time.Millisecond
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go
deleted file mode 100644
index 0335b8ae4..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package metric // import "go.opentelemetry.io/otel/sdk/metric"
-
-import (
- "runtime"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/sdk/metric/exemplar"
- "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
-)
-
-// ExemplarReservoirProviderSelector selects the
-// [exemplar.ReservoirProvider] to use
-// based on the [Aggregation] of the metric.
-type ExemplarReservoirProviderSelector func(Aggregation) exemplar.ReservoirProvider
-
-// reservoirFunc returns the appropriately configured exemplar reservoir
-// creation func based on the passed InstrumentKind and filter configuration.
-func reservoirFunc[N int64 | float64](provider exemplar.ReservoirProvider, filter exemplar.Filter) func(attribute.Set) aggregate.FilteredExemplarReservoir[N] {
- return func(attrs attribute.Set) aggregate.FilteredExemplarReservoir[N] {
- return aggregate.NewFilteredExemplarReservoir[N](filter, provider(attrs))
- }
-}
-
-// DefaultExemplarReservoirProviderSelector returns the default
-// [exemplar.ReservoirProvider] for the
-// provided [Aggregation].
-//
-// For explicit bucket histograms with more than 1 bucket, it uses the
-// [exemplar.HistogramReservoirProvider].
-// For exponential histograms, it uses the
-// [exemplar.FixedSizeReservoirProvider]
-// with a size of min(20, max_buckets).
-// For all other aggregations, it uses the
-// [exemplar.FixedSizeReservoirProvider]
-// with a size equal to the number of CPUs.
-//
-// Exemplar default reservoirs MAY change in a minor version bump. No
-// guarantees are made on the shape or statistical properties of returned
-// exemplars.
-func DefaultExemplarReservoirProviderSelector(agg Aggregation) exemplar.ReservoirProvider {
- // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/metrics/sdk.md#exemplar-defaults
- // Explicit bucket histogram aggregation with more than 1 bucket will
- // use AlignedHistogramBucketExemplarReservoir.
- a, ok := agg.(AggregationExplicitBucketHistogram)
- if ok && len(a.Boundaries) > 0 {
- return exemplar.HistogramReservoirProvider(a.Boundaries)
- }
-
- var n int
- if a, ok := agg.(AggregationBase2ExponentialHistogram); ok {
- // Base2 Exponential Histogram Aggregation SHOULD use a
- // SimpleFixedSizeExemplarReservoir with a reservoir equal to the
- // smaller of the maximum number of buckets configured on the
- // aggregation or twenty (e.g. min(20, max_buckets)).
- n = int(a.MaxSize)
- if n > 20 {
- n = 20
- }
- } else {
- // https://github.com/open-telemetry/opentelemetry-specification/blob/e94af89e3d0c01de30127a0f423e912f6cda7bed/specification/metrics/sdk.md#simplefixedsizeexemplarreservoir
- // This Exemplar reservoir MAY take a configuration parameter for
- // the size of the reservoir. If no size configuration is
- // provided, the default size MAY be the number of possible
- // concurrent threads (e.g. number of CPUs) to help reduce
- // contention. Otherwise, a default size of 1 SHOULD be used.
- n = runtime.NumCPU()
- if n < 1 {
- // Should never be the case, but be defensive.
- n = 1
- }
- }
-
- return exemplar.FixedSizeReservoirProvider(n)
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/README.md b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/README.md
deleted file mode 100644
index d1025f5eb..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Metric SDK Exemplars
-
-[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/metric/exemplar)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric/exemplar)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/doc.go
deleted file mode 100644
index 9f2389376..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/doc.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Package exemplar provides an implementation of the OpenTelemetry exemplar
-// reservoir to be used in metric collection pipelines.
-package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/exemplar.go
deleted file mode 100644
index 1ab694678..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/exemplar.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar"
-
-import (
- "time"
-
- "go.opentelemetry.io/otel/attribute"
-)
-
-// Exemplar is a measurement sampled from a timeseries providing a typical
-// example.
-type Exemplar struct {
- // FilteredAttributes are the attributes recorded with the measurement but
- // filtered out of the timeseries' aggregated data.
- FilteredAttributes []attribute.KeyValue
- // Time is the time when the measurement was recorded.
- Time time.Time
- // Value is the measured value.
- Value Value
- // SpanID is the ID of the span that was active during the measurement. If
- // no span was active or the span was not sampled this will be empty.
- SpanID []byte `json:",omitempty"`
- // TraceID is the ID of the trace the active span belonged to during the
- // measurement. If no span was active or the span was not sampled this will
- // be empty.
- TraceID []byte `json:",omitempty"`
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/filter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/filter.go
deleted file mode 100644
index b595e2ace..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/filter.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar"
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/trace"
-)
-
-// Filter determines if a measurement should be offered.
-//
-// The passed ctx needs to contain any baggage or span that were active
-// when the measurement was made. This information may be used by the
-// Reservoir in making a sampling decision.
-type Filter func(context.Context) bool
-
-// TraceBasedFilter is a [Filter] that will only offer measurements
-// if the passed context associated with the measurement contains a sampled
-// [go.opentelemetry.io/otel/trace.SpanContext].
-func TraceBasedFilter(ctx context.Context) bool {
- return trace.SpanContextFromContext(ctx).IsSampled()
-}
-
-// AlwaysOnFilter is a [Filter] that always offers measurements.
-func AlwaysOnFilter(ctx context.Context) bool {
- return true
-}
-
-// AlwaysOffFilter is a [Filter] that never offers measurements.
-func AlwaysOffFilter(ctx context.Context) bool {
- return false
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/fixed_size_reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/fixed_size_reservoir.go
deleted file mode 100644
index d4aab0aad..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/fixed_size_reservoir.go
+++ /dev/null
@@ -1,217 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar"
-
-import (
- "context"
- "math"
- "math/rand"
- "time"
-
- "go.opentelemetry.io/otel/attribute"
-)
-
-// FixedSizeReservoirProvider returns a provider of [FixedSizeReservoir].
-func FixedSizeReservoirProvider(k int) ReservoirProvider {
- return func(_ attribute.Set) Reservoir {
- return NewFixedSizeReservoir(k)
- }
-}
-
-// NewFixedSizeReservoir returns a [FixedSizeReservoir] that samples at most
-// k exemplars. If there are k or less measurements made, the Reservoir will
-// sample each one. If there are more than k, the Reservoir will then randomly
-// sample all additional measurement with a decreasing probability.
-func NewFixedSizeReservoir(k int) *FixedSizeReservoir {
- return newFixedSizeReservoir(newStorage(k))
-}
-
-var _ Reservoir = &FixedSizeReservoir{}
-
-// FixedSizeReservoir is a [Reservoir] that samples at most k exemplars. If
-// there are k or less measurements made, the Reservoir will sample each one.
-// If there are more than k, the Reservoir will then randomly sample all
-// additional measurement with a decreasing probability.
-type FixedSizeReservoir struct {
- *storage
-
- // count is the number of measurement seen.
- count int64
- // next is the next count that will store a measurement at a random index
- // once the reservoir has been filled.
- next int64
- // w is the largest random number in a distribution that is used to compute
- // the next next.
- w float64
-
- // rng is used to make sampling decisions.
- //
- // Do not use crypto/rand. There is no reason for the decrease in performance
- // given this is not a security sensitive decision.
- rng *rand.Rand
-}
-
-func newFixedSizeReservoir(s *storage) *FixedSizeReservoir {
- r := &FixedSizeReservoir{
- storage: s,
- rng: rand.New(rand.NewSource(time.Now().UnixNano())),
- }
- r.reset()
- return r
-}
-
-// randomFloat64 returns, as a float64, a uniform pseudo-random number in the
-// open interval (0.0,1.0).
-func (r *FixedSizeReservoir) randomFloat64() float64 {
- // TODO: This does not return a uniform number. rng.Float64 returns a
- // uniformly random int in [0,2^53) that is divided by 2^53. Meaning it
- // returns multiples of 2^-53, and not all floating point numbers between 0
- // and 1 (i.e. for values less than 2^-4 the 4 last bits of the significand
- // are always going to be 0).
- //
- // An alternative algorithm should be considered that will actually return
- // a uniform number in the interval (0,1). For example, since the default
- // rand source provides a uniform distribution for Int63, this can be
- // converted following the prototypical code of Mersenne Twister 64 (Takuji
- // Nishimura and Makoto Matsumoto:
- // http://www.math.sci.hiroshima-u.ac.jp/m-mat/MT/VERSIONS/C-LANG/mt19937-64.c)
- //
- // (float64(rng.Int63()>>11) + 0.5) * (1.0 / 4503599627370496.0)
- //
- // There are likely many other methods to explore here as well.
-
- f := r.rng.Float64()
- for f == 0 {
- f = r.rng.Float64()
- }
- return f
-}
-
-// Offer accepts the parameters associated with a measurement. The
-// parameters will be stored as an exemplar if the Reservoir decides to
-// sample the measurement.
-//
-// The passed ctx needs to contain any baggage or span that were active
-// when the measurement was made. This information may be used by the
-// Reservoir in making a sampling decision.
-//
-// The time t is the time when the measurement was made. The v and a
-// parameters are the value and dropped (filtered) attributes of the
-// measurement respectively.
-func (r *FixedSizeReservoir) Offer(ctx context.Context, t time.Time, n Value, a []attribute.KeyValue) {
- // The following algorithm is "Algorithm L" from Li, Kim-Hung (4 December
- // 1994). "Reservoir-Sampling Algorithms of Time Complexity
- // O(n(1+log(N/n)))". ACM Transactions on Mathematical Software. 20 (4):
- // 481–493 (https://dl.acm.org/doi/10.1145/198429.198435).
- //
- // A high-level overview of "Algorithm L":
- // 0) Pre-calculate the random count greater than the storage size when
- // an exemplar will be replaced.
- // 1) Accept all measurements offered until the configured storage size is
- // reached.
- // 2) Loop:
- // a) When the pre-calculate count is reached, replace a random
- // existing exemplar with the offered measurement.
- // b) Calculate the next random count greater than the existing one
- // which will replace another exemplars
- //
- // The way a "replacement" count is computed is by looking at `n` number of
- // independent random numbers each corresponding to an offered measurement.
- // Of these numbers the smallest `k` (the same size as the storage
- // capacity) of them are kept as a subset. The maximum value in this
- // subset, called `w` is used to weight another random number generation
- // for the next count that will be considered.
- //
- // By weighting the next count computation like described, it is able to
- // perform a uniformly-weighted sampling algorithm based on the number of
- // samples the reservoir has seen so far. The sampling will "slow down" as
- // more and more samples are offered so as to reduce a bias towards those
- // offered just prior to the end of the collection.
- //
- // This algorithm is preferred because of its balance of simplicity and
- // performance. It will compute three random numbers (the bulk of
- // computation time) for each item that becomes part of the reservoir, but
- // it does not spend any time on items that do not. In particular it has an
- // asymptotic runtime of O(k(1 + log(n/k)) where n is the number of
- // measurements offered and k is the reservoir size.
- //
- // See https://en.wikipedia.org/wiki/Reservoir_sampling for an overview of
- // this and other reservoir sampling algorithms. See
- // https://github.com/MrAlias/reservoir-sampling for a performance
- // comparison of reservoir sampling algorithms.
-
- if int(r.count) < cap(r.store) {
- r.store[r.count] = newMeasurement(ctx, t, n, a)
- } else {
- if r.count == r.next {
- // Overwrite a random existing measurement with the one offered.
- idx := int(r.rng.Int63n(int64(cap(r.store))))
- r.store[idx] = newMeasurement(ctx, t, n, a)
- r.advance()
- }
- }
- r.count++
-}
-
-// reset resets r to the initial state.
-func (r *FixedSizeReservoir) reset() {
- // This resets the number of exemplars known.
- r.count = 0
- // Random index inserts should only happen after the storage is full.
- r.next = int64(cap(r.store))
-
- // Initial random number in the series used to generate r.next.
- //
- // This is set before r.advance to reset or initialize the random number
- // series. Without doing so it would always be 0 or never restart a new
- // random number series.
- //
- // This maps the uniform random number in (0,1) to a geometric distribution
- // over the same interval. The mean of the distribution is inversely
- // proportional to the storage capacity.
- r.w = math.Exp(math.Log(r.randomFloat64()) / float64(cap(r.store)))
-
- r.advance()
-}
-
-// advance updates the count at which the offered measurement will overwrite an
-// existing exemplar.
-func (r *FixedSizeReservoir) advance() {
- // Calculate the next value in the random number series.
- //
- // The current value of r.w is based on the max of a distribution of random
- // numbers (i.e. `w = max(u_1,u_2,...,u_k)` for `k` equal to the capacity
- // of the storage and each `u` in the interval (0,w)). To calculate the
- // next r.w we use the fact that when the next exemplar is selected to be
- // included in the storage an existing one will be dropped, and the
- // corresponding random number in the set used to calculate r.w will also
- // be replaced. The replacement random number will also be within (0,w),
- // therefore the next r.w will be based on the same distribution (i.e.
- // `max(u_1,u_2,...,u_k)`). Therefore, we can sample the next r.w by
- // computing the next random number `u` and take r.w as `w * u^(1/k)`.
- r.w *= math.Exp(math.Log(r.randomFloat64()) / float64(cap(r.store)))
- // Use the new random number in the series to calculate the count of the
- // next measurement that will be stored.
- //
- // Given 0 < r.w < 1, each iteration will result in subsequent r.w being
- // smaller. This translates here into the next next being selected against
- // a distribution with a higher mean (i.e. the expected value will increase
- // and replacements become less likely)
- //
- // Important to note, the new r.next will always be at least 1 more than
- // the last r.next.
- r.next += int64(math.Log(r.randomFloat64())/math.Log(1-r.w)) + 1
-}
-
-// Collect returns all the held exemplars.
-//
-// The Reservoir state is preserved after this call.
-func (r *FixedSizeReservoir) Collect(dest *[]Exemplar) {
- r.storage.Collect(dest)
- // Call reset here even though it will reset r.count and restart the random
- // number series. This will persist any old exemplars as long as no new
- // measurements are offered, but it will also prioritize those new
- // measurements that are made over the older collection cycle ones.
- r.reset()
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go
deleted file mode 100644
index 3b76cf305..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar"
-
-import (
- "context"
- "slices"
- "sort"
- "time"
-
- "go.opentelemetry.io/otel/attribute"
-)
-
-// HistogramReservoirProvider is a provider of [HistogramReservoir].
-func HistogramReservoirProvider(bounds []float64) ReservoirProvider {
- cp := slices.Clone(bounds)
- slices.Sort(cp)
- return func(_ attribute.Set) Reservoir {
- return NewHistogramReservoir(cp)
- }
-}
-
-// NewHistogramReservoir returns a [HistogramReservoir] that samples the last
-// measurement that falls within a histogram bucket. The histogram bucket
-// upper-boundaries are define by bounds.
-//
-// The passed bounds must be sorted before calling this function.
-func NewHistogramReservoir(bounds []float64) *HistogramReservoir {
- return &HistogramReservoir{
- bounds: bounds,
- storage: newStorage(len(bounds) + 1),
- }
-}
-
-var _ Reservoir = &HistogramReservoir{}
-
-// HistogramReservoir is a [Reservoir] that samples the last measurement that
-// falls within a histogram bucket. The histogram bucket upper-boundaries are
-// define by bounds.
-type HistogramReservoir struct {
- *storage
-
- // bounds are bucket bounds in ascending order.
- bounds []float64
-}
-
-// Offer accepts the parameters associated with a measurement. The
-// parameters will be stored as an exemplar if the Reservoir decides to
-// sample the measurement.
-//
-// The passed ctx needs to contain any baggage or span that were active
-// when the measurement was made. This information may be used by the
-// Reservoir in making a sampling decision.
-//
-// The time t is the time when the measurement was made. The v and a
-// parameters are the value and dropped (filtered) attributes of the
-// measurement respectively.
-func (r *HistogramReservoir) Offer(ctx context.Context, t time.Time, v Value, a []attribute.KeyValue) {
- var x float64
- switch v.Type() {
- case Int64ValueType:
- x = float64(v.Int64())
- case Float64ValueType:
- x = v.Float64()
- default:
- panic("unknown value type")
- }
- r.store[sort.SearchFloat64s(r.bounds, x)] = newMeasurement(ctx, t, v, a)
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/reservoir.go
deleted file mode 100644
index ba5cd1a6b..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/reservoir.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar"
-
-import (
- "context"
- "time"
-
- "go.opentelemetry.io/otel/attribute"
-)
-
-// Reservoir holds the sampled exemplar of measurements made.
-type Reservoir interface {
- // Offer accepts the parameters associated with a measurement. The
- // parameters will be stored as an exemplar if the Reservoir decides to
- // sample the measurement.
- //
- // The passed ctx needs to contain any baggage or span that were active
- // when the measurement was made. This information may be used by the
- // Reservoir in making a sampling decision.
- //
- // The time t is the time when the measurement was made. The val and attr
- // parameters are the value and dropped (filtered) attributes of the
- // measurement respectively.
- Offer(ctx context.Context, t time.Time, val Value, attr []attribute.KeyValue)
-
- // Collect returns all the held exemplars.
- //
- // The Reservoir state is preserved after this call.
- Collect(dest *[]Exemplar)
-}
-
-// ReservoirProvider creates new [Reservoir]s.
-//
-// The attributes provided are attributes which are kept by the aggregation, and
-// are exclusive with attributes passed to Offer. The combination of these
-// attributes and the attributes passed to Offer is the complete set of
-// attributes a measurement was made with.
-type ReservoirProvider func(attr attribute.Set) Reservoir
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/storage.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/storage.go
deleted file mode 100644
index 0e2e26dfb..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/storage.go
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar"
-
-import (
- "context"
- "time"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/trace"
-)
-
-// storage is an exemplar storage for [Reservoir] implementations.
-type storage struct {
- // store are the measurements sampled.
- //
- // This does not use []metricdata.Exemplar because it potentially would
- // require an allocation for trace and span IDs in the hot path of Offer.
- store []measurement
-}
-
-func newStorage(n int) *storage {
- return &storage{store: make([]measurement, n)}
-}
-
-// Collect returns all the held exemplars.
-//
-// The Reservoir state is preserved after this call.
-func (r *storage) Collect(dest *[]Exemplar) {
- *dest = reset(*dest, len(r.store), len(r.store))
- var n int
- for _, m := range r.store {
- if !m.valid {
- continue
- }
-
- m.exemplar(&(*dest)[n])
- n++
- }
- *dest = (*dest)[:n]
-}
-
-// measurement is a measurement made by a telemetry system.
-type measurement struct {
- // FilteredAttributes are the attributes dropped during the measurement.
- FilteredAttributes []attribute.KeyValue
- // Time is the time when the measurement was made.
- Time time.Time
- // Value is the value of the measurement.
- Value Value
- // SpanContext is the SpanContext active when a measurement was made.
- SpanContext trace.SpanContext
-
- valid bool
-}
-
-// newMeasurement returns a new non-empty Measurement.
-func newMeasurement(ctx context.Context, ts time.Time, v Value, droppedAttr []attribute.KeyValue) measurement {
- return measurement{
- FilteredAttributes: droppedAttr,
- Time: ts,
- Value: v,
- SpanContext: trace.SpanContextFromContext(ctx),
- valid: true,
- }
-}
-
-// exemplar returns m as an [Exemplar].
-func (m measurement) exemplar(dest *Exemplar) {
- dest.FilteredAttributes = m.FilteredAttributes
- dest.Time = m.Time
- dest.Value = m.Value
-
- if m.SpanContext.HasTraceID() {
- traceID := m.SpanContext.TraceID()
- dest.TraceID = traceID[:]
- } else {
- dest.TraceID = dest.TraceID[:0]
- }
-
- if m.SpanContext.HasSpanID() {
- spanID := m.SpanContext.SpanID()
- dest.SpanID = spanID[:]
- } else {
- dest.SpanID = dest.SpanID[:0]
- }
-}
-
-func reset[T any](s []T, length, capacity int) []T {
- if cap(s) < capacity {
- return make([]T, length, capacity)
- }
- return s[:length]
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/value.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/value.go
deleted file mode 100644
index 590b089a8..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/value.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar"
-
-import "math"
-
-// ValueType identifies the type of value used in exemplar data.
-type ValueType uint8
-
-const (
- // UnknownValueType should not be used. It represents a misconfigured
- // Value.
- UnknownValueType ValueType = 0
- // Int64ValueType represents a Value with int64 data.
- Int64ValueType ValueType = 1
- // Float64ValueType represents a Value with float64 data.
- Float64ValueType ValueType = 2
-)
-
-// Value is the value of data held by an exemplar.
-type Value struct {
- t ValueType
- val uint64
-}
-
-// NewValue returns a new [Value] for the provided value.
-func NewValue[N int64 | float64](value N) Value {
- switch v := any(value).(type) {
- case int64:
- // This can be later converted back to int64 (overflow not checked).
- return Value{t: Int64ValueType, val: uint64(v)} // nolint:gosec
- case float64:
- return Value{t: Float64ValueType, val: math.Float64bits(v)}
- }
- return Value{}
-}
-
-// Type returns the [ValueType] of data held by v.
-func (v Value) Type() ValueType { return v.t }
-
-// Int64 returns the value of v as an int64. If the ValueType of v is not an
-// Int64ValueType, 0 is returned.
-func (v Value) Int64() int64 {
- if v.t == Int64ValueType {
- // Assumes the correct int64 was stored in v.val based on type.
- return int64(v.val) // nolint: gosec
- }
- return 0
-}
-
-// Float64 returns the value of v as an float64. If the ValueType of v is not
-// an Float64ValueType, 0 is returned.
-func (v Value) Float64() float64 {
- if v.t == Float64ValueType {
- return math.Float64frombits(v.val)
- }
- return 0
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go
deleted file mode 100644
index 1969cb42c..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package metric // import "go.opentelemetry.io/otel/sdk/metric"
-
-import (
- "context"
- "errors"
-
- "go.opentelemetry.io/otel/sdk/metric/metricdata"
-)
-
-// ErrExporterShutdown is returned if Export or Shutdown are called after an
-// Exporter has been Shutdown.
-var ErrExporterShutdown = errors.New("exporter is shutdown")
-
-// Exporter handles the delivery of metric data to external receivers. This is
-// the final component in the metric push pipeline.
-type Exporter interface {
- // Temporality returns the Temporality to use for an instrument kind.
- //
- // This method needs to be concurrent safe with itself and all the other
- // Exporter methods.
- Temporality(InstrumentKind) metricdata.Temporality
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-
- // Aggregation returns the Aggregation to use for an instrument kind.
- //
- // This method needs to be concurrent safe with itself and all the other
- // Exporter methods.
- Aggregation(InstrumentKind) Aggregation
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-
- // Export serializes and transmits metric data to a receiver.
- //
- // This is called synchronously, there is no concurrency safety
- // requirement. Because of this, it is critical that all timeouts and
- // cancellations of the passed context be honored.
- //
- // All retry logic must be contained in this function. The SDK does not
- // implement any retry logic. All errors returned by this function are
- // considered unrecoverable and will be reported to a configured error
- // Handler.
- //
- // The passed ResourceMetrics may be reused when the call completes. If an
- // exporter needs to hold this data after it returns, it needs to make a
- // copy.
- Export(context.Context, *metricdata.ResourceMetrics) error
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-
- // ForceFlush flushes any metric data held by an exporter.
- //
- // The deadline or cancellation of the passed context must be honored. An
- // appropriate error should be returned in these situations.
- //
- // This method needs to be concurrent safe.
- ForceFlush(context.Context) error
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-
- // Shutdown flushes all metric data held by an exporter and releases any
- // held computational resources.
- //
- // The deadline or cancellation of the passed context must be honored. An
- // appropriate error should be returned in these situations.
- //
- // After Shutdown is called, calls to Export will perform no operation and
- // instead will return an error indicating the shutdown state.
- //
- // This method needs to be concurrent safe.
- Shutdown(context.Context) error
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go b/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go
deleted file mode 100644
index c33e1a28c..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go
+++ /dev/null
@@ -1,364 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-//go:generate stringer -type=InstrumentKind -trimprefix=InstrumentKind
-
-package metric // import "go.opentelemetry.io/otel/sdk/metric"
-
-import (
- "context"
- "errors"
- "fmt"
- "strings"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/metric/embedded"
- "go.opentelemetry.io/otel/sdk/instrumentation"
- "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
- "go.opentelemetry.io/otel/sdk/metric/internal/x"
-)
-
-var zeroScope instrumentation.Scope
-
-// InstrumentKind is the identifier of a group of instruments that all
-// performing the same function.
-type InstrumentKind uint8
-
-const (
- // instrumentKindUndefined is an undefined instrument kind, it should not
- // be used by any initialized type.
- instrumentKindUndefined InstrumentKind = 0 // nolint:deadcode,varcheck,unused
- // InstrumentKindCounter identifies a group of instruments that record
- // increasing values synchronously with the code path they are measuring.
- InstrumentKindCounter InstrumentKind = 1
- // InstrumentKindUpDownCounter identifies a group of instruments that
- // record increasing and decreasing values synchronously with the code path
- // they are measuring.
- InstrumentKindUpDownCounter InstrumentKind = 2
- // InstrumentKindHistogram identifies a group of instruments that record a
- // distribution of values synchronously with the code path they are
- // measuring.
- InstrumentKindHistogram InstrumentKind = 3
- // InstrumentKindObservableCounter identifies a group of instruments that
- // record increasing values in an asynchronous callback.
- InstrumentKindObservableCounter InstrumentKind = 4
- // InstrumentKindObservableUpDownCounter identifies a group of instruments
- // that record increasing and decreasing values in an asynchronous
- // callback.
- InstrumentKindObservableUpDownCounter InstrumentKind = 5
- // InstrumentKindObservableGauge identifies a group of instruments that
- // record current values in an asynchronous callback.
- InstrumentKindObservableGauge InstrumentKind = 6
- // InstrumentKindGauge identifies a group of instruments that record
- // instantaneous values synchronously with the code path they are
- // measuring.
- InstrumentKindGauge InstrumentKind = 7
-)
-
-type nonComparable [0]func() // nolint: unused // This is indeed used.
-
-// Instrument describes properties an instrument is created with.
-type Instrument struct {
- // Name is the human-readable identifier of the instrument.
- Name string
- // Description describes the purpose of the instrument.
- Description string
- // Kind defines the functional group of the instrument.
- Kind InstrumentKind
- // Unit is the unit of measurement recorded by the instrument.
- Unit string
- // Scope identifies the instrumentation that created the instrument.
- Scope instrumentation.Scope
-
- // Ensure forward compatibility if non-comparable fields need to be added.
- nonComparable // nolint: unused
-}
-
-// IsEmpty returns if all Instrument fields are their zero-value.
-func (i Instrument) IsEmpty() bool {
- return i.Name == "" &&
- i.Description == "" &&
- i.Kind == instrumentKindUndefined &&
- i.Unit == "" &&
- i.Scope == zeroScope
-}
-
-// matches returns whether all the non-zero-value fields of i match the
-// corresponding fields of other. If i is empty it will match all other, and
-// true will always be returned.
-func (i Instrument) matches(other Instrument) bool {
- return i.matchesName(other) &&
- i.matchesDescription(other) &&
- i.matchesKind(other) &&
- i.matchesUnit(other) &&
- i.matchesScope(other)
-}
-
-// matchesName returns true if the Name of i is "" or it equals the Name of
-// other, otherwise false.
-func (i Instrument) matchesName(other Instrument) bool {
- return i.Name == "" || i.Name == other.Name
-}
-
-// matchesDescription returns true if the Description of i is "" or it equals
-// the Description of other, otherwise false.
-func (i Instrument) matchesDescription(other Instrument) bool {
- return i.Description == "" || i.Description == other.Description
-}
-
-// matchesKind returns true if the Kind of i is its zero-value or it equals the
-// Kind of other, otherwise false.
-func (i Instrument) matchesKind(other Instrument) bool {
- return i.Kind == instrumentKindUndefined || i.Kind == other.Kind
-}
-
-// matchesUnit returns true if the Unit of i is its zero-value or it equals the
-// Unit of other, otherwise false.
-func (i Instrument) matchesUnit(other Instrument) bool {
- return i.Unit == "" || i.Unit == other.Unit
-}
-
-// matchesScope returns true if the Scope of i is its zero-value or it equals
-// the Scope of other, otherwise false.
-func (i Instrument) matchesScope(other Instrument) bool {
- return (i.Scope.Name == "" || i.Scope.Name == other.Scope.Name) &&
- (i.Scope.Version == "" || i.Scope.Version == other.Scope.Version) &&
- (i.Scope.SchemaURL == "" || i.Scope.SchemaURL == other.Scope.SchemaURL)
-}
-
-// Stream describes the stream of data an instrument produces.
-type Stream struct {
- // Name is the human-readable identifier of the stream.
- Name string
- // Description describes the purpose of the data.
- Description string
- // Unit is the unit of measurement recorded.
- Unit string
- // Aggregation the stream uses for an instrument.
- Aggregation Aggregation
- // AttributeFilter is an attribute Filter applied to the attributes
- // recorded for an instrument's measurement. If the filter returns false
- // the attribute will not be recorded, otherwise, if it returns true, it
- // will record the attribute.
- //
- // Use NewAllowKeysFilter from "go.opentelemetry.io/otel/attribute" to
- // provide an allow-list of attribute keys here.
- AttributeFilter attribute.Filter
- // ExemplarReservoirProvider selects the
- // [go.opentelemetry.io/otel/sdk/metric/exemplar.ReservoirProvider] based
- // on the [Aggregation].
- //
- // If unspecified, [DefaultExemplarReservoirProviderSelector] is used.
- ExemplarReservoirProviderSelector ExemplarReservoirProviderSelector
-}
-
-// instID are the identifying properties of a instrument.
-type instID struct {
- // Name is the name of the stream.
- Name string
- // Description is the description of the stream.
- Description string
- // Kind defines the functional group of the instrument.
- Kind InstrumentKind
- // Unit is the unit of the stream.
- Unit string
- // Number is the number type of the stream.
- Number string
-}
-
-// Returns a normalized copy of the instID i.
-//
-// Instrument names are considered case-insensitive. Standardize the instrument
-// name to always be lowercase for the returned instID so it can be compared
-// without the name casing affecting the comparison.
-func (i instID) normalize() instID {
- i.Name = strings.ToLower(i.Name)
- return i
-}
-
-type int64Inst struct {
- measures []aggregate.Measure[int64]
-
- embedded.Int64Counter
- embedded.Int64UpDownCounter
- embedded.Int64Histogram
- embedded.Int64Gauge
-}
-
-var (
- _ metric.Int64Counter = (*int64Inst)(nil)
- _ metric.Int64UpDownCounter = (*int64Inst)(nil)
- _ metric.Int64Histogram = (*int64Inst)(nil)
- _ metric.Int64Gauge = (*int64Inst)(nil)
- _ x.EnabledInstrument = (*int64Inst)(nil)
-)
-
-func (i *int64Inst) Add(ctx context.Context, val int64, opts ...metric.AddOption) {
- c := metric.NewAddConfig(opts)
- i.aggregate(ctx, val, c.Attributes())
-}
-
-func (i *int64Inst) Record(ctx context.Context, val int64, opts ...metric.RecordOption) {
- c := metric.NewRecordConfig(opts)
- i.aggregate(ctx, val, c.Attributes())
-}
-
-func (i *int64Inst) Enabled(_ context.Context) bool {
- return len(i.measures) != 0
-}
-
-func (i *int64Inst) aggregate(ctx context.Context, val int64, s attribute.Set) { // nolint:revive // okay to shadow pkg with method.
- for _, in := range i.measures {
- in(ctx, val, s)
- }
-}
-
-type float64Inst struct {
- measures []aggregate.Measure[float64]
-
- embedded.Float64Counter
- embedded.Float64UpDownCounter
- embedded.Float64Histogram
- embedded.Float64Gauge
-}
-
-var (
- _ metric.Float64Counter = (*float64Inst)(nil)
- _ metric.Float64UpDownCounter = (*float64Inst)(nil)
- _ metric.Float64Histogram = (*float64Inst)(nil)
- _ metric.Float64Gauge = (*float64Inst)(nil)
- _ x.EnabledInstrument = (*float64Inst)(nil)
-)
-
-func (i *float64Inst) Add(ctx context.Context, val float64, opts ...metric.AddOption) {
- c := metric.NewAddConfig(opts)
- i.aggregate(ctx, val, c.Attributes())
-}
-
-func (i *float64Inst) Record(ctx context.Context, val float64, opts ...metric.RecordOption) {
- c := metric.NewRecordConfig(opts)
- i.aggregate(ctx, val, c.Attributes())
-}
-
-func (i *float64Inst) Enabled(_ context.Context) bool {
- return len(i.measures) != 0
-}
-
-func (i *float64Inst) aggregate(ctx context.Context, val float64, s attribute.Set) {
- for _, in := range i.measures {
- in(ctx, val, s)
- }
-}
-
-// observableID is a comparable unique identifier of an observable.
-type observableID[N int64 | float64] struct {
- name string
- description string
- kind InstrumentKind
- unit string
- scope instrumentation.Scope
-}
-
-type float64Observable struct {
- metric.Float64Observable
- *observable[float64]
-
- embedded.Float64ObservableCounter
- embedded.Float64ObservableUpDownCounter
- embedded.Float64ObservableGauge
-}
-
-var (
- _ metric.Float64ObservableCounter = float64Observable{}
- _ metric.Float64ObservableUpDownCounter = float64Observable{}
- _ metric.Float64ObservableGauge = float64Observable{}
-)
-
-func newFloat64Observable(m *meter, kind InstrumentKind, name, desc, u string) float64Observable {
- return float64Observable{
- observable: newObservable[float64](m, kind, name, desc, u),
- }
-}
-
-type int64Observable struct {
- metric.Int64Observable
- *observable[int64]
-
- embedded.Int64ObservableCounter
- embedded.Int64ObservableUpDownCounter
- embedded.Int64ObservableGauge
-}
-
-var (
- _ metric.Int64ObservableCounter = int64Observable{}
- _ metric.Int64ObservableUpDownCounter = int64Observable{}
- _ metric.Int64ObservableGauge = int64Observable{}
-)
-
-func newInt64Observable(m *meter, kind InstrumentKind, name, desc, u string) int64Observable {
- return int64Observable{
- observable: newObservable[int64](m, kind, name, desc, u),
- }
-}
-
-type observable[N int64 | float64] struct {
- metric.Observable
- observableID[N]
-
- meter *meter
- measures measures[N]
- dropAggregation bool
-}
-
-func newObservable[N int64 | float64](m *meter, kind InstrumentKind, name, desc, u string) *observable[N] {
- return &observable[N]{
- observableID: observableID[N]{
- name: name,
- description: desc,
- kind: kind,
- unit: u,
- scope: m.scope,
- },
- meter: m,
- }
-}
-
-// observe records the val for the set of attrs.
-func (o *observable[N]) observe(val N, s attribute.Set) {
- o.measures.observe(val, s)
-}
-
-func (o *observable[N]) appendMeasures(meas []aggregate.Measure[N]) {
- o.measures = append(o.measures, meas...)
-}
-
-type measures[N int64 | float64] []aggregate.Measure[N]
-
-// observe records the val for the set of attrs.
-func (m measures[N]) observe(val N, s attribute.Set) {
- for _, in := range m {
- in(context.Background(), val, s)
- }
-}
-
-var errEmptyAgg = errors.New("no aggregators for observable instrument")
-
-// registerable returns an error if the observable o should not be registered,
-// and nil if it should. An errEmptyAgg error is returned if o is effectively a
-// no-op because it does not have any aggregators. Also, an error is returned
-// if scope defines a Meter other than the one o was created by.
-func (o *observable[N]) registerable(m *meter) error {
- if len(o.measures) == 0 {
- return errEmptyAgg
- }
- if m != o.meter {
- return fmt.Errorf(
- "invalid registration: observable %q from Meter %q, registered with Meter %q",
- o.name,
- o.scope.Name,
- m.scope.Name,
- )
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go b/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go
deleted file mode 100644
index 25ea6244e..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Code generated by "stringer -type=InstrumentKind -trimprefix=InstrumentKind"; DO NOT EDIT.
-
-package metric
-
-import "strconv"
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[instrumentKindUndefined-0]
- _ = x[InstrumentKindCounter-1]
- _ = x[InstrumentKindUpDownCounter-2]
- _ = x[InstrumentKindHistogram-3]
- _ = x[InstrumentKindObservableCounter-4]
- _ = x[InstrumentKindObservableUpDownCounter-5]
- _ = x[InstrumentKindObservableGauge-6]
- _ = x[InstrumentKindGauge-7]
-}
-
-const _InstrumentKind_name = "instrumentKindUndefinedCounterUpDownCounterHistogramObservableCounterObservableUpDownCounterObservableGaugeGauge"
-
-var _InstrumentKind_index = [...]uint8{0, 23, 30, 43, 52, 69, 92, 107, 112}
-
-func (i InstrumentKind) String() string {
- if i >= InstrumentKind(len(_InstrumentKind_index)-1) {
- return "InstrumentKind(" + strconv.FormatInt(int64(i), 10) + ")"
- }
- return _InstrumentKind_name[_InstrumentKind_index[i]:_InstrumentKind_index[i+1]]
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go
deleted file mode 100644
index fde219333..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
-
-import (
- "context"
- "time"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/sdk/metric/metricdata"
-)
-
-// now is used to return the current local time while allowing tests to
-// override the default time.Now function.
-var now = time.Now
-
-// Measure receives measurements to be aggregated.
-type Measure[N int64 | float64] func(context.Context, N, attribute.Set)
-
-// ComputeAggregation stores the aggregate of measurements into dest and
-// returns the number of aggregate data-points output.
-type ComputeAggregation func(dest *metricdata.Aggregation) int
-
-// Builder builds an aggregate function.
-type Builder[N int64 | float64] struct {
- // Temporality is the temporality used for the returned aggregate function.
- //
- // If this is not provided a default of cumulative will be used (except for
- // the last-value aggregate function where delta is the only appropriate
- // temporality).
- Temporality metricdata.Temporality
- // Filter is the attribute filter the aggregate function will use on the
- // input of measurements.
- Filter attribute.Filter
- // ReservoirFunc is the factory function used by aggregate functions to
- // create new exemplar reservoirs for a new seen attribute set.
- //
- // If this is not provided a default factory function that returns an
- // dropReservoir reservoir will be used.
- ReservoirFunc func(attribute.Set) FilteredExemplarReservoir[N]
- // AggregationLimit is the cardinality limit of measurement attributes. Any
- // measurement for new attributes once the limit has been reached will be
- // aggregated into a single aggregate for the "otel.metric.overflow"
- // attribute.
- //
- // If AggregationLimit is less than or equal to zero there will not be an
- // aggregation limit imposed (i.e. unlimited attribute sets).
- AggregationLimit int
-}
-
-func (b Builder[N]) resFunc() func(attribute.Set) FilteredExemplarReservoir[N] {
- if b.ReservoirFunc != nil {
- return b.ReservoirFunc
- }
-
- return dropReservoir
-}
-
-type fltrMeasure[N int64 | float64] func(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue)
-
-func (b Builder[N]) filter(f fltrMeasure[N]) Measure[N] {
- if b.Filter != nil {
- fltr := b.Filter // Copy to make it immutable after assignment.
- return func(ctx context.Context, n N, a attribute.Set) {
- fAttr, dropped := a.Filter(fltr)
- f(ctx, n, fAttr, dropped)
- }
- }
- return func(ctx context.Context, n N, a attribute.Set) {
- f(ctx, n, a, nil)
- }
-}
-
-// LastValue returns a last-value aggregate function input and output.
-func (b Builder[N]) LastValue() (Measure[N], ComputeAggregation) {
- lv := newLastValue[N](b.AggregationLimit, b.resFunc())
- switch b.Temporality {
- case metricdata.DeltaTemporality:
- return b.filter(lv.measure), lv.delta
- default:
- return b.filter(lv.measure), lv.cumulative
- }
-}
-
-// PrecomputedLastValue returns a last-value aggregate function input and
-// output. The aggregation returned from the returned ComputeAggregation
-// function will always only return values from the previous collection cycle.
-func (b Builder[N]) PrecomputedLastValue() (Measure[N], ComputeAggregation) {
- lv := newPrecomputedLastValue[N](b.AggregationLimit, b.resFunc())
- switch b.Temporality {
- case metricdata.DeltaTemporality:
- return b.filter(lv.measure), lv.delta
- default:
- return b.filter(lv.measure), lv.cumulative
- }
-}
-
-// PrecomputedSum returns a sum aggregate function input and output. The
-// arguments passed to the input are expected to be the precomputed sum values.
-func (b Builder[N]) PrecomputedSum(monotonic bool) (Measure[N], ComputeAggregation) {
- s := newPrecomputedSum[N](monotonic, b.AggregationLimit, b.resFunc())
- switch b.Temporality {
- case metricdata.DeltaTemporality:
- return b.filter(s.measure), s.delta
- default:
- return b.filter(s.measure), s.cumulative
- }
-}
-
-// Sum returns a sum aggregate function input and output.
-func (b Builder[N]) Sum(monotonic bool) (Measure[N], ComputeAggregation) {
- s := newSum[N](monotonic, b.AggregationLimit, b.resFunc())
- switch b.Temporality {
- case metricdata.DeltaTemporality:
- return b.filter(s.measure), s.delta
- default:
- return b.filter(s.measure), s.cumulative
- }
-}
-
-// ExplicitBucketHistogram returns a histogram aggregate function input and
-// output.
-func (b Builder[N]) ExplicitBucketHistogram(boundaries []float64, noMinMax, noSum bool) (Measure[N], ComputeAggregation) {
- h := newHistogram[N](boundaries, noMinMax, noSum, b.AggregationLimit, b.resFunc())
- switch b.Temporality {
- case metricdata.DeltaTemporality:
- return b.filter(h.measure), h.delta
- default:
- return b.filter(h.measure), h.cumulative
- }
-}
-
-// ExponentialBucketHistogram returns a histogram aggregate function input and
-// output.
-func (b Builder[N]) ExponentialBucketHistogram(maxSize, maxScale int32, noMinMax, noSum bool) (Measure[N], ComputeAggregation) {
- h := newExponentialHistogram[N](maxSize, maxScale, noMinMax, noSum, b.AggregationLimit, b.resFunc())
- switch b.Temporality {
- case metricdata.DeltaTemporality:
- return b.filter(h.measure), h.delta
- default:
- return b.filter(h.measure), h.cumulative
- }
-}
-
-// reset ensures s has capacity and sets it length. If the capacity of s too
-// small, a new slice is returned with the specified capacity and length.
-func reset[T any](s []T, length, capacity int) []T {
- if cap(s) < capacity {
- return make([]T, length, capacity)
- }
- return s[:length]
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/doc.go
deleted file mode 100644
index 7b7225e6e..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/doc.go
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Package aggregate provides aggregate types used compute aggregations and
-// cycle the state of metric measurements made by the SDK. These types and
-// functionality are meant only for internal SDK use.
-package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/drop.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/drop.go
deleted file mode 100644
index 8396faaa4..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/drop.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/sdk/metric/exemplar"
-)
-
-// dropReservoir returns a [FilteredReservoir] that drops all measurements it is offered.
-func dropReservoir[N int64 | float64](attribute.Set) FilteredExemplarReservoir[N] {
- return &dropRes[N]{}
-}
-
-type dropRes[N int64 | float64] struct{}
-
-// Offer does nothing, all measurements offered will be dropped.
-func (r *dropRes[N]) Offer(context.Context, N, []attribute.KeyValue) {}
-
-// Collect resets dest. No exemplars will ever be returned.
-func (r *dropRes[N]) Collect(dest *[]exemplar.Exemplar) {
- clear(*dest) // Erase elements to let GC collect objects
- *dest = (*dest)[:0]
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go
deleted file mode 100644
index 25d709948..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
-
-import (
- "sync"
-
- "go.opentelemetry.io/otel/sdk/metric/exemplar"
- "go.opentelemetry.io/otel/sdk/metric/metricdata"
-)
-
-var exemplarPool = sync.Pool{
- New: func() any { return new([]exemplar.Exemplar) },
-}
-
-func collectExemplars[N int64 | float64](out *[]metricdata.Exemplar[N], f func(*[]exemplar.Exemplar)) {
- dest := exemplarPool.Get().(*[]exemplar.Exemplar)
- defer func() {
- clear(*dest) // Erase elements to let GC collect objects.
- *dest = (*dest)[:0]
- exemplarPool.Put(dest)
- }()
-
- *dest = reset(*dest, len(*out), cap(*out))
-
- f(dest)
-
- *out = reset(*out, len(*dest), cap(*dest))
- for i, e := range *dest {
- (*out)[i].FilteredAttributes = e.FilteredAttributes
- (*out)[i].Time = e.Time
- (*out)[i].SpanID = e.SpanID
- (*out)[i].TraceID = e.TraceID
-
- switch e.Value.Type() {
- case exemplar.Int64ValueType:
- (*out)[i].Value = N(e.Value.Int64())
- case exemplar.Float64ValueType:
- (*out)[i].Value = N(e.Value.Float64())
- }
- }
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go
deleted file mode 100644
index 336ea91d1..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go
+++ /dev/null
@@ -1,443 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
-
-import (
- "context"
- "errors"
- "math"
- "sync"
- "time"
-
- "go.opentelemetry.io/otel"
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/sdk/metric/metricdata"
-)
-
-const (
- expoMaxScale = 20
- expoMinScale = -10
-
- smallestNonZeroNormalFloat64 = 0x1p-1022
-
- // These redefine the Math constants with a type, so the compiler won't coerce
- // them into an int on 32 bit platforms.
- maxInt64 int64 = math.MaxInt64
- minInt64 int64 = math.MinInt64
-)
-
-// expoHistogramDataPoint is a single data point in an exponential histogram.
-type expoHistogramDataPoint[N int64 | float64] struct {
- attrs attribute.Set
- res FilteredExemplarReservoir[N]
-
- count uint64
- min N
- max N
- sum N
-
- maxSize int
- noMinMax bool
- noSum bool
-
- scale int32
-
- posBuckets expoBuckets
- negBuckets expoBuckets
- zeroCount uint64
-}
-
-func newExpoHistogramDataPoint[N int64 | float64](attrs attribute.Set, maxSize int, maxScale int32, noMinMax, noSum bool) *expoHistogramDataPoint[N] {
- f := math.MaxFloat64
- ma := N(f) // if N is int64, max will overflow to -9223372036854775808
- mi := N(-f)
- if N(maxInt64) > N(f) {
- ma = N(maxInt64)
- mi = N(minInt64)
- }
- return &expoHistogramDataPoint[N]{
- attrs: attrs,
- min: ma,
- max: mi,
- maxSize: maxSize,
- noMinMax: noMinMax,
- noSum: noSum,
- scale: maxScale,
- }
-}
-
-// record adds a new measurement to the histogram. It will rescale the buckets if needed.
-func (p *expoHistogramDataPoint[N]) record(v N) {
- p.count++
-
- if !p.noMinMax {
- if v < p.min {
- p.min = v
- }
- if v > p.max {
- p.max = v
- }
- }
- if !p.noSum {
- p.sum += v
- }
-
- absV := math.Abs(float64(v))
-
- if float64(absV) == 0.0 {
- p.zeroCount++
- return
- }
-
- bin := p.getBin(absV)
-
- bucket := &p.posBuckets
- if v < 0 {
- bucket = &p.negBuckets
- }
-
- // If the new bin would make the counts larger than maxScale, we need to
- // downscale current measurements.
- if scaleDelta := p.scaleChange(bin, bucket.startBin, len(bucket.counts)); scaleDelta > 0 {
- if p.scale-scaleDelta < expoMinScale {
- // With a scale of -10 there is only two buckets for the whole range of float64 values.
- // This can only happen if there is a max size of 1.
- otel.Handle(errors.New("exponential histogram scale underflow"))
- return
- }
- // Downscale
- p.scale -= scaleDelta
- p.posBuckets.downscale(scaleDelta)
- p.negBuckets.downscale(scaleDelta)
-
- bin = p.getBin(absV)
- }
-
- bucket.record(bin)
-}
-
-// getBin returns the bin v should be recorded into.
-func (p *expoHistogramDataPoint[N]) getBin(v float64) int32 {
- frac, expInt := math.Frexp(v)
- // 11-bit exponential.
- exp := int32(expInt) // nolint: gosec
- if p.scale <= 0 {
- // Because of the choice of fraction is always 1 power of two higher than we want.
- var correction int32 = 1
- if frac == .5 {
- // If v is an exact power of two the frac will be .5 and the exp
- // will be one higher than we want.
- correction = 2
- }
- return (exp - correction) >> (-p.scale)
- }
- return exp<<p.scale + int32(math.Log(frac)*scaleFactors[p.scale]) - 1
-}
-
-// scaleFactors are constants used in calculating the logarithm index. They are
-// equivalent to 2^index/log(2).
-var scaleFactors = [21]float64{
- math.Ldexp(math.Log2E, 0),
- math.Ldexp(math.Log2E, 1),
- math.Ldexp(math.Log2E, 2),
- math.Ldexp(math.Log2E, 3),
- math.Ldexp(math.Log2E, 4),
- math.Ldexp(math.Log2E, 5),
- math.Ldexp(math.Log2E, 6),
- math.Ldexp(math.Log2E, 7),
- math.Ldexp(math.Log2E, 8),
- math.Ldexp(math.Log2E, 9),
- math.Ldexp(math.Log2E, 10),
- math.Ldexp(math.Log2E, 11),
- math.Ldexp(math.Log2E, 12),
- math.Ldexp(math.Log2E, 13),
- math.Ldexp(math.Log2E, 14),
- math.Ldexp(math.Log2E, 15),
- math.Ldexp(math.Log2E, 16),
- math.Ldexp(math.Log2E, 17),
- math.Ldexp(math.Log2E, 18),
- math.Ldexp(math.Log2E, 19),
- math.Ldexp(math.Log2E, 20),
-}
-
-// scaleChange returns the magnitude of the scale change needed to fit bin in
-// the bucket. If no scale change is needed 0 is returned.
-func (p *expoHistogramDataPoint[N]) scaleChange(bin, startBin int32, length int) int32 {
- if length == 0 {
- // No need to rescale if there are no buckets.
- return 0
- }
-
- low := int(startBin)
- high := int(bin)
- if startBin >= bin {
- low = int(bin)
- high = int(startBin) + length - 1
- }
-
- var count int32
- for high-low >= p.maxSize {
- low = low >> 1
- high = high >> 1
- count++
- if count > expoMaxScale-expoMinScale {
- return count
- }
- }
- return count
-}
-
-// expoBuckets is a set of buckets in an exponential histogram.
-type expoBuckets struct {
- startBin int32
- counts []uint64
-}
-
-// record increments the count for the given bin, and expands the buckets if needed.
-// Size changes must be done before calling this function.
-func (b *expoBuckets) record(bin int32) {
- if len(b.counts) == 0 {
- b.counts = []uint64{1}
- b.startBin = bin
- return
- }
-
- endBin := int(b.startBin) + len(b.counts) - 1
-
- // if the new bin is inside the current range
- if bin >= b.startBin && int(bin) <= endBin {
- b.counts[bin-b.startBin]++
- return
- }
- // if the new bin is before the current start add spaces to the counts
- if bin < b.startBin {
- origLen := len(b.counts)
- newLength := endBin - int(bin) + 1
- shift := b.startBin - bin
-
- if newLength > cap(b.counts) {
- b.counts = append(b.counts, make([]uint64, newLength-len(b.counts))...)
- }
-
- copy(b.counts[shift:origLen+int(shift)], b.counts[:])
- b.counts = b.counts[:newLength]
- for i := 1; i < int(shift); i++ {
- b.counts[i] = 0
- }
- b.startBin = bin
- b.counts[0] = 1
- return
- }
- // if the new is after the end add spaces to the end
- if int(bin) > endBin {
- if int(bin-b.startBin) < cap(b.counts) {
- b.counts = b.counts[:bin-b.startBin+1]
- for i := endBin + 1 - int(b.startBin); i < len(b.counts); i++ {
- b.counts[i] = 0
- }
- b.counts[bin-b.startBin] = 1
- return
- }
-
- end := make([]uint64, int(bin-b.startBin)-len(b.counts)+1)
- b.counts = append(b.counts, end...)
- b.counts[bin-b.startBin] = 1
- }
-}
-
-// downscale shrinks a bucket by a factor of 2*s. It will sum counts into the
-// correct lower resolution bucket.
-func (b *expoBuckets) downscale(delta int32) {
- // Example
- // delta = 2
- // Original offset: -6
- // Counts: [ 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
- // bins: -6 -5, -4, -3, -2, -1, 0, 1, 2, 3, 4
- // new bins:-2, -2, -1, -1, -1, -1, 0, 0, 0, 0, 1
- // new Offset: -2
- // new Counts: [4, 14, 30, 10]
-
- if len(b.counts) <= 1 || delta < 1 {
- b.startBin = b.startBin >> delta
- return
- }
-
- steps := int32(1) << delta
- offset := b.startBin % steps
- offset = (offset + steps) % steps // to make offset positive
- for i := 1; i < len(b.counts); i++ {
- idx := i + int(offset)
- if idx%int(steps) == 0 {
- b.counts[idx/int(steps)] = b.counts[i]
- continue
- }
- b.counts[idx/int(steps)] += b.counts[i]
- }
-
- lastIdx := (len(b.counts) - 1 + int(offset)) / int(steps)
- b.counts = b.counts[:lastIdx+1]
- b.startBin = b.startBin >> delta
-}
-
-// newExponentialHistogram returns an Aggregator that summarizes a set of
-// measurements as an exponential histogram. Each histogram is scoped by attributes
-// and the aggregation cycle the measurements were made in.
-func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *expoHistogram[N] {
- return &expoHistogram[N]{
- noSum: noSum,
- noMinMax: noMinMax,
- maxSize: int(maxSize),
- maxScale: maxScale,
-
- newRes: r,
- limit: newLimiter[*expoHistogramDataPoint[N]](limit),
- values: make(map[attribute.Distinct]*expoHistogramDataPoint[N]),
-
- start: now(),
- }
-}
-
-// expoHistogram summarizes a set of measurements as an histogram with exponentially
-// defined buckets.
-type expoHistogram[N int64 | float64] struct {
- noSum bool
- noMinMax bool
- maxSize int
- maxScale int32
-
- newRes func(attribute.Set) FilteredExemplarReservoir[N]
- limit limiter[*expoHistogramDataPoint[N]]
- values map[attribute.Distinct]*expoHistogramDataPoint[N]
- valuesMu sync.Mutex
-
- start time.Time
-}
-
-func (e *expoHistogram[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) {
- // Ignore NaN and infinity.
- if math.IsInf(float64(value), 0) || math.IsNaN(float64(value)) {
- return
- }
-
- e.valuesMu.Lock()
- defer e.valuesMu.Unlock()
-
- attr := e.limit.Attributes(fltrAttr, e.values)
- v, ok := e.values[attr.Equivalent()]
- if !ok {
- v = newExpoHistogramDataPoint[N](attr, e.maxSize, e.maxScale, e.noMinMax, e.noSum)
- v.res = e.newRes(attr)
-
- e.values[attr.Equivalent()] = v
- }
- v.record(value)
- v.res.Offer(ctx, value, droppedAttr)
-}
-
-func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int {
- t := now()
-
- // If *dest is not a metricdata.ExponentialHistogram, memory reuse is missed.
- // In that case, use the zero-value h and hope for better alignment next cycle.
- h, _ := (*dest).(metricdata.ExponentialHistogram[N])
- h.Temporality = metricdata.DeltaTemporality
-
- e.valuesMu.Lock()
- defer e.valuesMu.Unlock()
-
- n := len(e.values)
- hDPts := reset(h.DataPoints, n, n)
-
- var i int
- for _, val := range e.values {
- hDPts[i].Attributes = val.attrs
- hDPts[i].StartTime = e.start
- hDPts[i].Time = t
- hDPts[i].Count = val.count
- hDPts[i].Scale = val.scale
- hDPts[i].ZeroCount = val.zeroCount
- hDPts[i].ZeroThreshold = 0.0
-
- hDPts[i].PositiveBucket.Offset = val.posBuckets.startBin
- hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(val.posBuckets.counts), len(val.posBuckets.counts))
- copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts)
-
- hDPts[i].NegativeBucket.Offset = val.negBuckets.startBin
- hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(val.negBuckets.counts), len(val.negBuckets.counts))
- copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts)
-
- if !e.noSum {
- hDPts[i].Sum = val.sum
- }
- if !e.noMinMax {
- hDPts[i].Min = metricdata.NewExtrema(val.min)
- hDPts[i].Max = metricdata.NewExtrema(val.max)
- }
-
- collectExemplars(&hDPts[i].Exemplars, val.res.Collect)
-
- i++
- }
- // Unused attribute sets do not report.
- clear(e.values)
-
- e.start = t
- h.DataPoints = hDPts
- *dest = h
- return n
-}
-
-func (e *expoHistogram[N]) cumulative(dest *metricdata.Aggregation) int {
- t := now()
-
- // If *dest is not a metricdata.ExponentialHistogram, memory reuse is missed.
- // In that case, use the zero-value h and hope for better alignment next cycle.
- h, _ := (*dest).(metricdata.ExponentialHistogram[N])
- h.Temporality = metricdata.CumulativeTemporality
-
- e.valuesMu.Lock()
- defer e.valuesMu.Unlock()
-
- n := len(e.values)
- hDPts := reset(h.DataPoints, n, n)
-
- var i int
- for _, val := range e.values {
- hDPts[i].Attributes = val.attrs
- hDPts[i].StartTime = e.start
- hDPts[i].Time = t
- hDPts[i].Count = val.count
- hDPts[i].Scale = val.scale
- hDPts[i].ZeroCount = val.zeroCount
- hDPts[i].ZeroThreshold = 0.0
-
- hDPts[i].PositiveBucket.Offset = val.posBuckets.startBin
- hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(val.posBuckets.counts), len(val.posBuckets.counts))
- copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts)
-
- hDPts[i].NegativeBucket.Offset = val.negBuckets.startBin
- hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(val.negBuckets.counts), len(val.negBuckets.counts))
- copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts)
-
- if !e.noSum {
- hDPts[i].Sum = val.sum
- }
- if !e.noMinMax {
- hDPts[i].Min = metricdata.NewExtrema(val.min)
- hDPts[i].Max = metricdata.NewExtrema(val.max)
- }
-
- collectExemplars(&hDPts[i].Exemplars, val.res.Collect)
-
- i++
- // TODO (#3006): This will use an unbounded amount of memory if there
- // are unbounded number of attribute sets being aggregated. Attribute
- // sets that become "stale" need to be forgotten so this will not
- // overload the system.
- }
-
- h.DataPoints = hDPts
- *dest = h
- return n
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/filtered_reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/filtered_reservoir.go
deleted file mode 100644
index 691a91060..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/filtered_reservoir.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
-
-import (
- "context"
- "time"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/sdk/metric/exemplar"
-)
-
-// FilteredExemplarReservoir wraps a [exemplar.Reservoir] with a filter.
-type FilteredExemplarReservoir[N int64 | float64] interface {
- // Offer accepts the parameters associated with a measurement. The
- // parameters will be stored as an exemplar if the filter decides to
- // sample the measurement.
- //
- // The passed ctx needs to contain any baggage or span that were active
- // when the measurement was made. This information may be used by the
- // Reservoir in making a sampling decision.
- Offer(ctx context.Context, val N, attr []attribute.KeyValue)
- // Collect returns all the held exemplars in the reservoir.
- Collect(dest *[]exemplar.Exemplar)
-}
-
-// filteredExemplarReservoir handles the pre-sampled exemplar of measurements made.
-type filteredExemplarReservoir[N int64 | float64] struct {
- filter exemplar.Filter
- reservoir exemplar.Reservoir
-}
-
-// NewFilteredExemplarReservoir creates a [FilteredExemplarReservoir] which only offers values
-// that are allowed by the filter.
-func NewFilteredExemplarReservoir[N int64 | float64](f exemplar.Filter, r exemplar.Reservoir) FilteredExemplarReservoir[N] {
- return &filteredExemplarReservoir[N]{
- filter: f,
- reservoir: r,
- }
-}
-
-func (f *filteredExemplarReservoir[N]) Offer(ctx context.Context, val N, attr []attribute.KeyValue) {
- if f.filter(ctx) {
- // only record the current time if we are sampling this measurement.
- f.reservoir.Offer(ctx, time.Now(), exemplar.NewValue(val), attr)
- }
-}
-
-func (f *filteredExemplarReservoir[N]) Collect(dest *[]exemplar.Exemplar) { f.reservoir.Collect(dest) }
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go
deleted file mode 100644
index d577ae2c1..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go
+++ /dev/null
@@ -1,232 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
-
-import (
- "context"
- "slices"
- "sort"
- "sync"
- "time"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/sdk/metric/metricdata"
-)
-
-type buckets[N int64 | float64] struct {
- attrs attribute.Set
- res FilteredExemplarReservoir[N]
-
- counts []uint64
- count uint64
- total N
- min, max N
-}
-
-// newBuckets returns buckets with n bins.
-func newBuckets[N int64 | float64](attrs attribute.Set, n int) *buckets[N] {
- return &buckets[N]{attrs: attrs, counts: make([]uint64, n)}
-}
-
-func (b *buckets[N]) sum(value N) { b.total += value }
-
-func (b *buckets[N]) bin(idx int, value N) {
- b.counts[idx]++
- b.count++
- if value < b.min {
- b.min = value
- } else if value > b.max {
- b.max = value
- }
-}
-
-// histValues summarizes a set of measurements as an histValues with
-// explicitly defined buckets.
-type histValues[N int64 | float64] struct {
- noSum bool
- bounds []float64
-
- newRes func(attribute.Set) FilteredExemplarReservoir[N]
- limit limiter[*buckets[N]]
- values map[attribute.Distinct]*buckets[N]
- valuesMu sync.Mutex
-}
-
-func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *histValues[N] {
- // The responsibility of keeping all buckets correctly associated with the
- // passed boundaries is ultimately this type's responsibility. Make a copy
- // here so we can always guarantee this. Or, in the case of failure, have
- // complete control over the fix.
- b := slices.Clone(bounds)
- slices.Sort(b)
- return &histValues[N]{
- noSum: noSum,
- bounds: b,
- newRes: r,
- limit: newLimiter[*buckets[N]](limit),
- values: make(map[attribute.Distinct]*buckets[N]),
- }
-}
-
-// Aggregate records the measurement value, scoped by attr, and aggregates it
-// into a histogram.
-func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) {
- // This search will return an index in the range [0, len(s.bounds)], where
- // it will return len(s.bounds) if value is greater than the last element
- // of s.bounds. This aligns with the buckets in that the length of buckets
- // is len(s.bounds)+1, with the last bucket representing:
- // (s.bounds[len(s.bounds)-1], +∞).
- idx := sort.SearchFloat64s(s.bounds, float64(value))
-
- s.valuesMu.Lock()
- defer s.valuesMu.Unlock()
-
- attr := s.limit.Attributes(fltrAttr, s.values)
- b, ok := s.values[attr.Equivalent()]
- if !ok {
- // N+1 buckets. For example:
- //
- // bounds = [0, 5, 10]
- //
- // Then,
- //
- // buckets = (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, +∞)
- b = newBuckets[N](attr, len(s.bounds)+1)
- b.res = s.newRes(attr)
-
- // Ensure min and max are recorded values (not zero), for new buckets.
- b.min, b.max = value, value
- s.values[attr.Equivalent()] = b
- }
- b.bin(idx, value)
- if !s.noSum {
- b.sum(value)
- }
- b.res.Offer(ctx, value, droppedAttr)
-}
-
-// newHistogram returns an Aggregator that summarizes a set of measurements as
-// an histogram.
-func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *histogram[N] {
- return &histogram[N]{
- histValues: newHistValues[N](boundaries, noSum, limit, r),
- noMinMax: noMinMax,
- start: now(),
- }
-}
-
-// histogram summarizes a set of measurements as an histogram with explicitly
-// defined buckets.
-type histogram[N int64 | float64] struct {
- *histValues[N]
-
- noMinMax bool
- start time.Time
-}
-
-func (s *histogram[N]) delta(dest *metricdata.Aggregation) int {
- t := now()
-
- // If *dest is not a metricdata.Histogram, memory reuse is missed. In that
- // case, use the zero-value h and hope for better alignment next cycle.
- h, _ := (*dest).(metricdata.Histogram[N])
- h.Temporality = metricdata.DeltaTemporality
-
- s.valuesMu.Lock()
- defer s.valuesMu.Unlock()
-
- // Do not allow modification of our copy of bounds.
- bounds := slices.Clone(s.bounds)
-
- n := len(s.values)
- hDPts := reset(h.DataPoints, n, n)
-
- var i int
- for _, val := range s.values {
- hDPts[i].Attributes = val.attrs
- hDPts[i].StartTime = s.start
- hDPts[i].Time = t
- hDPts[i].Count = val.count
- hDPts[i].Bounds = bounds
- hDPts[i].BucketCounts = val.counts
-
- if !s.noSum {
- hDPts[i].Sum = val.total
- }
-
- if !s.noMinMax {
- hDPts[i].Min = metricdata.NewExtrema(val.min)
- hDPts[i].Max = metricdata.NewExtrema(val.max)
- }
-
- collectExemplars(&hDPts[i].Exemplars, val.res.Collect)
-
- i++
- }
- // Unused attribute sets do not report.
- clear(s.values)
- // The delta collection cycle resets.
- s.start = t
-
- h.DataPoints = hDPts
- *dest = h
-
- return n
-}
-
-func (s *histogram[N]) cumulative(dest *metricdata.Aggregation) int {
- t := now()
-
- // If *dest is not a metricdata.Histogram, memory reuse is missed. In that
- // case, use the zero-value h and hope for better alignment next cycle.
- h, _ := (*dest).(metricdata.Histogram[N])
- h.Temporality = metricdata.CumulativeTemporality
-
- s.valuesMu.Lock()
- defer s.valuesMu.Unlock()
-
- // Do not allow modification of our copy of bounds.
- bounds := slices.Clone(s.bounds)
-
- n := len(s.values)
- hDPts := reset(h.DataPoints, n, n)
-
- var i int
- for _, val := range s.values {
- hDPts[i].Attributes = val.attrs
- hDPts[i].StartTime = s.start
- hDPts[i].Time = t
- hDPts[i].Count = val.count
- hDPts[i].Bounds = bounds
-
- // The HistogramDataPoint field values returned need to be copies of
- // the buckets value as we will keep updating them.
- //
- // TODO (#3047): Making copies for bounds and counts incurs a large
- // memory allocation footprint. Alternatives should be explored.
- hDPts[i].BucketCounts = slices.Clone(val.counts)
-
- if !s.noSum {
- hDPts[i].Sum = val.total
- }
-
- if !s.noMinMax {
- hDPts[i].Min = metricdata.NewExtrema(val.min)
- hDPts[i].Max = metricdata.NewExtrema(val.max)
- }
-
- collectExemplars(&hDPts[i].Exemplars, val.res.Collect)
-
- i++
- // TODO (#3006): This will use an unbounded amount of memory if there
- // are unbounded number of attribute sets being aggregated. Attribute
- // sets that become "stale" need to be forgotten so this will not
- // overload the system.
- }
-
- h.DataPoints = hDPts
- *dest = h
-
- return n
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go
deleted file mode 100644
index d3a93f085..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
-
-import (
- "context"
- "sync"
- "time"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/sdk/metric/metricdata"
-)
-
-// datapoint is timestamped measurement data.
-type datapoint[N int64 | float64] struct {
- attrs attribute.Set
- value N
- res FilteredExemplarReservoir[N]
-}
-
-func newLastValue[N int64 | float64](limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *lastValue[N] {
- return &lastValue[N]{
- newRes: r,
- limit: newLimiter[datapoint[N]](limit),
- values: make(map[attribute.Distinct]datapoint[N]),
- start: now(),
- }
-}
-
-// lastValue summarizes a set of measurements as the last one made.
-type lastValue[N int64 | float64] struct {
- sync.Mutex
-
- newRes func(attribute.Set) FilteredExemplarReservoir[N]
- limit limiter[datapoint[N]]
- values map[attribute.Distinct]datapoint[N]
- start time.Time
-}
-
-func (s *lastValue[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) {
- s.Lock()
- defer s.Unlock()
-
- attr := s.limit.Attributes(fltrAttr, s.values)
- d, ok := s.values[attr.Equivalent()]
- if !ok {
- d.res = s.newRes(attr)
- }
-
- d.attrs = attr
- d.value = value
- d.res.Offer(ctx, value, droppedAttr)
-
- s.values[attr.Equivalent()] = d
-}
-
-func (s *lastValue[N]) delta(dest *metricdata.Aggregation) int {
- t := now()
- // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of
- // the DataPoints is missed (better luck next time).
- gData, _ := (*dest).(metricdata.Gauge[N])
-
- s.Lock()
- defer s.Unlock()
-
- n := s.copyDpts(&gData.DataPoints, t)
- // Do not report stale values.
- clear(s.values)
- // Update start time for delta temporality.
- s.start = t
-
- *dest = gData
-
- return n
-}
-
-func (s *lastValue[N]) cumulative(dest *metricdata.Aggregation) int {
- t := now()
- // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of
- // the DataPoints is missed (better luck next time).
- gData, _ := (*dest).(metricdata.Gauge[N])
-
- s.Lock()
- defer s.Unlock()
-
- n := s.copyDpts(&gData.DataPoints, t)
- // TODO (#3006): This will use an unbounded amount of memory if there
- // are unbounded number of attribute sets being aggregated. Attribute
- // sets that become "stale" need to be forgotten so this will not
- // overload the system.
- *dest = gData
-
- return n
-}
-
-// copyDpts copies the datapoints held by s into dest. The number of datapoints
-// copied is returned.
-func (s *lastValue[N]) copyDpts(dest *[]metricdata.DataPoint[N], t time.Time) int {
- n := len(s.values)
- *dest = reset(*dest, n, n)
-
- var i int
- for _, v := range s.values {
- (*dest)[i].Attributes = v.attrs
- (*dest)[i].StartTime = s.start
- (*dest)[i].Time = t
- (*dest)[i].Value = v.value
- collectExemplars(&(*dest)[i].Exemplars, v.res.Collect)
- i++
- }
- return n
-}
-
-// newPrecomputedLastValue returns an aggregator that summarizes a set of
-// observations as the last one made.
-func newPrecomputedLastValue[N int64 | float64](limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *precomputedLastValue[N] {
- return &precomputedLastValue[N]{lastValue: newLastValue[N](limit, r)}
-}
-
-// precomputedLastValue summarizes a set of observations as the last one made.
-type precomputedLastValue[N int64 | float64] struct {
- *lastValue[N]
-}
-
-func (s *precomputedLastValue[N]) delta(dest *metricdata.Aggregation) int {
- t := now()
- // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of
- // the DataPoints is missed (better luck next time).
- gData, _ := (*dest).(metricdata.Gauge[N])
-
- s.Lock()
- defer s.Unlock()
-
- n := s.copyDpts(&gData.DataPoints, t)
- // Do not report stale values.
- clear(s.values)
- // Update start time for delta temporality.
- s.start = t
-
- *dest = gData
-
- return n
-}
-
-func (s *precomputedLastValue[N]) cumulative(dest *metricdata.Aggregation) int {
- t := now()
- // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of
- // the DataPoints is missed (better luck next time).
- gData, _ := (*dest).(metricdata.Gauge[N])
-
- s.Lock()
- defer s.Unlock()
-
- n := s.copyDpts(&gData.DataPoints, t)
- // Do not report stale values.
- clear(s.values)
- *dest = gData
-
- return n
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go
deleted file mode 100644
index 9ea0251ed..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// overflowSet is the attribute set used to record a measurement when adding
-// another distinct attribute set to the aggregate would exceed the aggregate
-// limit.
-var overflowSet = attribute.NewSet(attribute.Bool("otel.metric.overflow", true))
-
-// limiter limits aggregate values.
-type limiter[V any] struct {
- // aggLimit is the maximum number of metric streams that can be aggregated.
- //
- // Any metric stream with attributes distinct from any set already
- // aggregated once the aggLimit will be meet will instead be aggregated
- // into an "overflow" metric stream. That stream will only contain the
- // "otel.metric.overflow"=true attribute.
- aggLimit int
-}
-
-// newLimiter returns a new Limiter with the provided aggregation limit.
-func newLimiter[V any](aggregation int) limiter[V] {
- return limiter[V]{aggLimit: aggregation}
-}
-
-// Attributes checks if adding a measurement for attrs will exceed the
-// aggregation cardinality limit for the existing measurements. If it will,
-// overflowSet is returned. Otherwise, if it will not exceed the limit, or the
-// limit is not set (limit <= 0), attr is returned.
-func (l limiter[V]) Attributes(attrs attribute.Set, measurements map[attribute.Distinct]V) attribute.Set {
- if l.aggLimit > 0 {
- _, exists := measurements[attrs.Equivalent()]
- if !exists && len(measurements) >= l.aggLimit-1 {
- return overflowSet
- }
- }
-
- return attrs
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go
deleted file mode 100644
index 8e132ad61..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go
+++ /dev/null
@@ -1,237 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
-
-import (
- "context"
- "sync"
- "time"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/sdk/metric/metricdata"
-)
-
-type sumValue[N int64 | float64] struct {
- n N
- res FilteredExemplarReservoir[N]
- attrs attribute.Set
-}
-
-// valueMap is the storage for sums.
-type valueMap[N int64 | float64] struct {
- sync.Mutex
- newRes func(attribute.Set) FilteredExemplarReservoir[N]
- limit limiter[sumValue[N]]
- values map[attribute.Distinct]sumValue[N]
-}
-
-func newValueMap[N int64 | float64](limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *valueMap[N] {
- return &valueMap[N]{
- newRes: r,
- limit: newLimiter[sumValue[N]](limit),
- values: make(map[attribute.Distinct]sumValue[N]),
- }
-}
-
-func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) {
- s.Lock()
- defer s.Unlock()
-
- attr := s.limit.Attributes(fltrAttr, s.values)
- v, ok := s.values[attr.Equivalent()]
- if !ok {
- v.res = s.newRes(attr)
- }
-
- v.attrs = attr
- v.n += value
- v.res.Offer(ctx, value, droppedAttr)
-
- s.values[attr.Equivalent()] = v
-}
-
-// newSum returns an aggregator that summarizes a set of measurements as their
-// arithmetic sum. Each sum is scoped by attributes and the aggregation cycle
-// the measurements were made in.
-func newSum[N int64 | float64](monotonic bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *sum[N] {
- return &sum[N]{
- valueMap: newValueMap[N](limit, r),
- monotonic: monotonic,
- start: now(),
- }
-}
-
-// sum summarizes a set of measurements made as their arithmetic sum.
-type sum[N int64 | float64] struct {
- *valueMap[N]
-
- monotonic bool
- start time.Time
-}
-
-func (s *sum[N]) delta(dest *metricdata.Aggregation) int {
- t := now()
-
- // If *dest is not a metricdata.Sum, memory reuse is missed. In that case,
- // use the zero-value sData and hope for better alignment next cycle.
- sData, _ := (*dest).(metricdata.Sum[N])
- sData.Temporality = metricdata.DeltaTemporality
- sData.IsMonotonic = s.monotonic
-
- s.Lock()
- defer s.Unlock()
-
- n := len(s.values)
- dPts := reset(sData.DataPoints, n, n)
-
- var i int
- for _, val := range s.values {
- dPts[i].Attributes = val.attrs
- dPts[i].StartTime = s.start
- dPts[i].Time = t
- dPts[i].Value = val.n
- collectExemplars(&dPts[i].Exemplars, val.res.Collect)
- i++
- }
- // Do not report stale values.
- clear(s.values)
- // The delta collection cycle resets.
- s.start = t
-
- sData.DataPoints = dPts
- *dest = sData
-
- return n
-}
-
-func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int {
- t := now()
-
- // If *dest is not a metricdata.Sum, memory reuse is missed. In that case,
- // use the zero-value sData and hope for better alignment next cycle.
- sData, _ := (*dest).(metricdata.Sum[N])
- sData.Temporality = metricdata.CumulativeTemporality
- sData.IsMonotonic = s.monotonic
-
- s.Lock()
- defer s.Unlock()
-
- n := len(s.values)
- dPts := reset(sData.DataPoints, n, n)
-
- var i int
- for _, value := range s.values {
- dPts[i].Attributes = value.attrs
- dPts[i].StartTime = s.start
- dPts[i].Time = t
- dPts[i].Value = value.n
- collectExemplars(&dPts[i].Exemplars, value.res.Collect)
- // TODO (#3006): This will use an unbounded amount of memory if there
- // are unbounded number of attribute sets being aggregated. Attribute
- // sets that become "stale" need to be forgotten so this will not
- // overload the system.
- i++
- }
-
- sData.DataPoints = dPts
- *dest = sData
-
- return n
-}
-
-// newPrecomputedSum returns an aggregator that summarizes a set of
-// observations as their arithmetic sum. Each sum is scoped by attributes and
-// the aggregation cycle the measurements were made in.
-func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *precomputedSum[N] {
- return &precomputedSum[N]{
- valueMap: newValueMap[N](limit, r),
- monotonic: monotonic,
- start: now(),
- }
-}
-
-// precomputedSum summarizes a set of observations as their arithmetic sum.
-type precomputedSum[N int64 | float64] struct {
- *valueMap[N]
-
- monotonic bool
- start time.Time
-
- reported map[attribute.Distinct]N
-}
-
-func (s *precomputedSum[N]) delta(dest *metricdata.Aggregation) int {
- t := now()
- newReported := make(map[attribute.Distinct]N)
-
- // If *dest is not a metricdata.Sum, memory reuse is missed. In that case,
- // use the zero-value sData and hope for better alignment next cycle.
- sData, _ := (*dest).(metricdata.Sum[N])
- sData.Temporality = metricdata.DeltaTemporality
- sData.IsMonotonic = s.monotonic
-
- s.Lock()
- defer s.Unlock()
-
- n := len(s.values)
- dPts := reset(sData.DataPoints, n, n)
-
- var i int
- for key, value := range s.values {
- delta := value.n - s.reported[key]
-
- dPts[i].Attributes = value.attrs
- dPts[i].StartTime = s.start
- dPts[i].Time = t
- dPts[i].Value = delta
- collectExemplars(&dPts[i].Exemplars, value.res.Collect)
-
- newReported[key] = value.n
- i++
- }
- // Unused attribute sets do not report.
- clear(s.values)
- s.reported = newReported
- // The delta collection cycle resets.
- s.start = t
-
- sData.DataPoints = dPts
- *dest = sData
-
- return n
-}
-
-func (s *precomputedSum[N]) cumulative(dest *metricdata.Aggregation) int {
- t := now()
-
- // If *dest is not a metricdata.Sum, memory reuse is missed. In that case,
- // use the zero-value sData and hope for better alignment next cycle.
- sData, _ := (*dest).(metricdata.Sum[N])
- sData.Temporality = metricdata.CumulativeTemporality
- sData.IsMonotonic = s.monotonic
-
- s.Lock()
- defer s.Unlock()
-
- n := len(s.values)
- dPts := reset(sData.DataPoints, n, n)
-
- var i int
- for _, val := range s.values {
- dPts[i].Attributes = val.attrs
- dPts[i].StartTime = s.start
- dPts[i].Time = t
- dPts[i].Value = val.n
- collectExemplars(&dPts[i].Exemplars, val.res.Collect)
-
- i++
- }
- // Unused attribute sets do not report.
- clear(s.values)
-
- sData.DataPoints = dPts
- *dest = sData
-
- return n
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reuse_slice.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reuse_slice.go
deleted file mode 100644
index 19ec6806f..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reuse_slice.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package internal // import "go.opentelemetry.io/otel/sdk/metric/internal"
-
-// ReuseSlice returns a zeroed view of slice if its capacity is greater than or
-// equal to n. Otherwise, it returns a new []T with capacity equal to n.
-func ReuseSlice[T any](slice []T, n int) []T {
- if cap(slice) >= n {
- return slice[:n]
- }
- return make([]T, n)
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md
deleted file mode 100644
index 59f736b73..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md
+++ /dev/null
@@ -1,131 +0,0 @@
-# Experimental Features
-
-The metric SDK contains features that have not yet stabilized in the OpenTelemetry specification.
-These features are added to the OpenTelemetry Go metric SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback.
-
-These feature may change in backwards incompatible ways as feedback is applied.
-See the [Compatibility and Stability](#compatibility-and-stability) section for more information.
-
-## Features
-
-- [Cardinality Limit](#cardinality-limit)
-- [Exemplars](#exemplars)
-- [Instrument Enabled](#instrument-enabled)
-
-### Cardinality Limit
-
-The cardinality limit is the hard limit on the number of metric streams that can be collected for a single instrument.
-
-This experimental feature can be enabled by setting the `OTEL_GO_X_CARDINALITY_LIMIT` environment value.
-The value must be an integer value.
-All other values are ignored.
-
-If the value set is less than or equal to `0`, no limit will be applied.
-
-#### Examples
-
-Set the cardinality limit to 2000.
-
-```console
-export OTEL_GO_X_CARDINALITY_LIMIT=2000
-```
-
-Set an infinite cardinality limit (functionally equivalent to disabling the feature).
-
-```console
-export OTEL_GO_X_CARDINALITY_LIMIT=-1
-```
-
-Disable the cardinality limit.
-
-```console
-unset OTEL_GO_X_CARDINALITY_LIMIT
-```
-
-### Exemplars
-
-A sample of measurements made may be exported directly as a set of exemplars.
-
-This experimental feature can be enabled by setting the `OTEL_GO_X_EXEMPLAR` environment variable.
-The value of must be the case-insensitive string of `"true"` to enable the feature.
-All other values are ignored.
-
-Exemplar filters are a supported.
-The exemplar filter applies to all measurements made.
-They filter these measurements, only allowing certain measurements to be passed to the underlying exemplar reservoir.
-
-To change the exemplar filter from the default `"trace_based"` filter set the `OTEL_METRICS_EXEMPLAR_FILTER` environment variable.
-The value must be the case-sensitive string defined by the [OpenTelemetry specification].
-
-- `"always_on"`: allows all measurements
-- `"always_off"`: denies all measurements
-- `"trace_based"`: allows only sampled measurements
-
-All values other than these will result in the default, `"trace_based"`, exemplar filter being used.
-
-[OpenTelemetry specification]: https://github.com/open-telemetry/opentelemetry-specification/blob/a6ca2fd484c9e76fe1d8e1c79c99f08f4745b5ee/specification/configuration/sdk-environment-variables.md#exemplar
-
-#### Examples
-
-Enable exemplars to be exported.
-
-```console
-export OTEL_GO_X_EXEMPLAR=true
-```
-
-Disable exemplars from being exported.
-
-```console
-unset OTEL_GO_X_EXEMPLAR
-```
-
-Set the exemplar filter to allow all measurements.
-
-```console
-export OTEL_METRICS_EXEMPLAR_FILTER=always_on
-```
-
-Set the exemplar filter to deny all measurements.
-
-```console
-export OTEL_METRICS_EXEMPLAR_FILTER=always_off
-```
-
-Set the exemplar filter to only allow sampled measurements.
-
-```console
-export OTEL_METRICS_EXEMPLAR_FILTER=trace_based
-```
-
-Revert to the default exemplar filter (`"trace_based"`)
-
-```console
-unset OTEL_METRICS_EXEMPLAR_FILTER
-```
-
-### Instrument Enabled
-
-To help users avoid performing computationally expensive operations when recording measurements, synchronous instruments provide an `Enabled` method.
-
-#### Examples
-
-The following code shows an example of how to check if an instrument implements the `EnabledInstrument` interface before using the `Enabled` function to avoid doing an expensive computation:
-
-```go
-type enabledInstrument interface { Enabled(context.Context) bool }
-
-ctr, err := m.Int64Counter("expensive-counter")
-c, ok := ctr.(enabledInstrument)
-if !ok || c.Enabled(context.Background()) {
- c.Add(expensiveComputation())
-}
-```
-
-## Compatibility and Stability
-
-Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../VERSIONING.md).
-These features may be removed or modified in successive version releases, including patch versions.
-
-When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release.
-There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version.
-If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go
deleted file mode 100644
index a98606238..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Package x contains support for OTel metric SDK experimental features.
-//
-// This package should only be used for features defined in the specification.
-// It should not be used for experiments or new project ideas.
-package x // import "go.opentelemetry.io/otel/sdk/metric/internal/x"
-
-import (
- "context"
- "os"
- "strconv"
-)
-
-// CardinalityLimit is an experimental feature flag that defines if
-// cardinality limits should be applied to the recorded metric data-points.
-//
-// To enable this feature set the OTEL_GO_X_CARDINALITY_LIMIT environment
-// variable to the integer limit value you want to use.
-//
-// Setting OTEL_GO_X_CARDINALITY_LIMIT to a value less than or equal to 0
-// will disable the cardinality limits.
-var CardinalityLimit = newFeature("CARDINALITY_LIMIT", func(v string) (int, bool) {
- n, err := strconv.Atoi(v)
- if err != nil {
- return 0, false
- }
- return n, true
-})
-
-// Feature is an experimental feature control flag. It provides a uniform way
-// to interact with these feature flags and parse their values.
-type Feature[T any] struct {
- key string
- parse func(v string) (T, bool)
-}
-
-func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] {
- const envKeyRoot = "OTEL_GO_X_"
- return Feature[T]{
- key: envKeyRoot + suffix,
- parse: parse,
- }
-}
-
-// Key returns the environment variable key that needs to be set to enable the
-// feature.
-func (f Feature[T]) Key() string { return f.key }
-
-// Lookup returns the user configured value for the feature and true if the
-// user has enabled the feature. Otherwise, if the feature is not enabled, a
-// zero-value and false are returned.
-func (f Feature[T]) Lookup() (v T, ok bool) {
- // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value
- //
- // > The SDK MUST interpret an empty value of an environment variable the
- // > same way as when the variable is unset.
- vRaw := os.Getenv(f.key)
- if vRaw == "" {
- return v, ok
- }
- return f.parse(vRaw)
-}
-
-// Enabled returns if the feature is enabled.
-func (f Feature[T]) Enabled() bool {
- _, ok := f.Lookup()
- return ok
-}
-
-// EnabledInstrument informs whether the instrument is enabled.
-//
-// EnabledInstrument interface is implemented by synchronous instruments.
-type EnabledInstrument interface {
- // Enabled returns whether the instrument will process measurements for the given context.
- //
- // This function can be used in places where measuring an instrument
- // would result in computationally expensive operations.
- Enabled(context.Context) bool
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go
deleted file mode 100644
index c495985bc..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go
+++ /dev/null
@@ -1,202 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package metric // import "go.opentelemetry.io/otel/sdk/metric"
-
-import (
- "context"
- "errors"
- "fmt"
- "sync"
- "sync/atomic"
-
- "go.opentelemetry.io/otel/internal/global"
- "go.opentelemetry.io/otel/sdk/metric/metricdata"
-)
-
-// ManualReader is a simple Reader that allows an application to
-// read metrics on demand.
-type ManualReader struct {
- sdkProducer atomic.Value
- shutdownOnce sync.Once
-
- mu sync.Mutex
- isShutdown bool
- externalProducers atomic.Value
-
- temporalitySelector TemporalitySelector
- aggregationSelector AggregationSelector
-}
-
-// Compile time check the manualReader implements Reader and is comparable.
-var _ = map[Reader]struct{}{&ManualReader{}: {}}
-
-// NewManualReader returns a Reader which is directly called to collect metrics.
-func NewManualReader(opts ...ManualReaderOption) *ManualReader {
- cfg := newManualReaderConfig(opts)
- r := &ManualReader{
- temporalitySelector: cfg.temporalitySelector,
- aggregationSelector: cfg.aggregationSelector,
- }
- r.externalProducers.Store(cfg.producers)
- return r
-}
-
-// register stores the sdkProducer which enables the caller
-// to read metrics from the SDK on demand.
-func (mr *ManualReader) register(p sdkProducer) {
- // Only register once. If producer is already set, do nothing.
- if !mr.sdkProducer.CompareAndSwap(nil, produceHolder{produce: p.produce}) {
- msg := "did not register manual reader"
- global.Error(errDuplicateRegister, msg)
- }
-}
-
-// temporality reports the Temporality for the instrument kind provided.
-func (mr *ManualReader) temporality(kind InstrumentKind) metricdata.Temporality {
- return mr.temporalitySelector(kind)
-}
-
-// aggregation returns what Aggregation to use for kind.
-func (mr *ManualReader) aggregation(kind InstrumentKind) Aggregation { // nolint:revive // import-shadow for method scoped by type.
- return mr.aggregationSelector(kind)
-}
-
-// Shutdown closes any connections and frees any resources used by the reader.
-//
-// This method is safe to call concurrently.
-func (mr *ManualReader) Shutdown(context.Context) error {
- err := ErrReaderShutdown
- mr.shutdownOnce.Do(func() {
- // Any future call to Collect will now return ErrReaderShutdown.
- mr.sdkProducer.Store(produceHolder{
- produce: shutdownProducer{}.produce,
- })
- mr.mu.Lock()
- defer mr.mu.Unlock()
- mr.isShutdown = true
- // release references to Producer(s)
- mr.externalProducers.Store([]Producer{})
- err = nil
- })
- return err
-}
-
-// Collect gathers all metric data related to the Reader from
-// the SDK and other Producers and stores the result in rm.
-//
-// Collect will return an error if called after shutdown.
-// Collect will return an error if rm is a nil ResourceMetrics.
-// Collect will return an error if the context's Done channel is closed.
-//
-// This method is safe to call concurrently.
-func (mr *ManualReader) Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error {
- if rm == nil {
- return errors.New("manual reader: *metricdata.ResourceMetrics is nil")
- }
- p := mr.sdkProducer.Load()
- if p == nil {
- return ErrReaderNotRegistered
- }
-
- ph, ok := p.(produceHolder)
- if !ok {
- // The atomic.Value is entirely in the periodicReader's control so
- // this should never happen. In the unforeseen case that this does
- // happen, return an error instead of panicking so a users code does
- // not halt in the processes.
- err := fmt.Errorf("manual reader: invalid producer: %T", p)
- return err
- }
-
- err := ph.produce(ctx, rm)
- if err != nil {
- return err
- }
- for _, producer := range mr.externalProducers.Load().([]Producer) {
- externalMetrics, e := producer.Produce(ctx)
- if e != nil {
- err = errors.Join(err, e)
- }
- rm.ScopeMetrics = append(rm.ScopeMetrics, externalMetrics...)
- }
-
- global.Debug("ManualReader collection", "Data", rm)
-
- return err
-}
-
-// MarshalLog returns logging data about the ManualReader.
-func (r *ManualReader) MarshalLog() interface{} {
- r.mu.Lock()
- down := r.isShutdown
- r.mu.Unlock()
- return struct {
- Type string
- Registered bool
- Shutdown bool
- }{
- Type: "ManualReader",
- Registered: r.sdkProducer.Load() != nil,
- Shutdown: down,
- }
-}
-
-// manualReaderConfig contains configuration options for a ManualReader.
-type manualReaderConfig struct {
- temporalitySelector TemporalitySelector
- aggregationSelector AggregationSelector
- producers []Producer
-}
-
-// newManualReaderConfig returns a manualReaderConfig configured with options.
-func newManualReaderConfig(opts []ManualReaderOption) manualReaderConfig {
- cfg := manualReaderConfig{
- temporalitySelector: DefaultTemporalitySelector,
- aggregationSelector: DefaultAggregationSelector,
- }
- for _, opt := range opts {
- cfg = opt.applyManual(cfg)
- }
- return cfg
-}
-
-// ManualReaderOption applies a configuration option value to a ManualReader.
-type ManualReaderOption interface {
- applyManual(manualReaderConfig) manualReaderConfig
-}
-
-// WithTemporalitySelector sets the TemporalitySelector a reader will use to
-// determine the Temporality of an instrument based on its kind. If this
-// option is not used, the reader will use the DefaultTemporalitySelector.
-func WithTemporalitySelector(selector TemporalitySelector) ManualReaderOption {
- return temporalitySelectorOption{selector: selector}
-}
-
-type temporalitySelectorOption struct {
- selector func(instrument InstrumentKind) metricdata.Temporality
-}
-
-// applyManual returns a manualReaderConfig with option applied.
-func (t temporalitySelectorOption) applyManual(mrc manualReaderConfig) manualReaderConfig {
- mrc.temporalitySelector = t.selector
- return mrc
-}
-
-// WithAggregationSelector sets the AggregationSelector a reader will use to
-// determine the aggregation to use for an instrument based on its kind. If
-// this option is not used, the reader will use the DefaultAggregationSelector
-// or the aggregation explicitly passed for a view matching an instrument.
-func WithAggregationSelector(selector AggregationSelector) ManualReaderOption {
- return aggregationSelectorOption{selector: selector}
-}
-
-type aggregationSelectorOption struct {
- selector AggregationSelector
-}
-
-// applyManual returns a manualReaderConfig with option applied.
-func (t aggregationSelectorOption) applyManual(c manualReaderConfig) manualReaderConfig {
- c.aggregationSelector = t.selector
- return c
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go
deleted file mode 100644
index a6ccd117b..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go
+++ /dev/null
@@ -1,736 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package metric // import "go.opentelemetry.io/otel/sdk/metric"
-
-import (
- "context"
- "errors"
- "fmt"
-
- "go.opentelemetry.io/otel/internal/global"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/metric/embedded"
- "go.opentelemetry.io/otel/sdk/instrumentation"
-
- "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
-)
-
-// ErrInstrumentName indicates the created instrument has an invalid name.
-// Valid names must consist of 255 or fewer characters including alphanumeric, _, ., -, / and start with a letter.
-var ErrInstrumentName = errors.New("invalid instrument name")
-
-// meter handles the creation and coordination of all metric instruments. A
-// meter represents a single instrumentation scope; all metric telemetry
-// produced by an instrumentation scope will use metric instruments from a
-// single meter.
-type meter struct {
- embedded.Meter
-
- scope instrumentation.Scope
- pipes pipelines
-
- int64Insts *cacheWithErr[instID, *int64Inst]
- float64Insts *cacheWithErr[instID, *float64Inst]
- int64ObservableInsts *cacheWithErr[instID, int64Observable]
- float64ObservableInsts *cacheWithErr[instID, float64Observable]
-
- int64Resolver resolver[int64]
- float64Resolver resolver[float64]
-}
-
-func newMeter(s instrumentation.Scope, p pipelines) *meter {
- // viewCache ensures instrument conflicts, including number conflicts, this
- // meter is asked to create are logged to the user.
- var viewCache cache[string, instID]
-
- var int64Insts cacheWithErr[instID, *int64Inst]
- var float64Insts cacheWithErr[instID, *float64Inst]
- var int64ObservableInsts cacheWithErr[instID, int64Observable]
- var float64ObservableInsts cacheWithErr[instID, float64Observable]
-
- return &meter{
- scope: s,
- pipes: p,
- int64Insts: &int64Insts,
- float64Insts: &float64Insts,
- int64ObservableInsts: &int64ObservableInsts,
- float64ObservableInsts: &float64ObservableInsts,
- int64Resolver: newResolver[int64](p, &viewCache),
- float64Resolver: newResolver[float64](p, &viewCache),
- }
-}
-
-// Compile-time check meter implements metric.Meter.
-var _ metric.Meter = (*meter)(nil)
-
-// Int64Counter returns a new instrument identified by name and configured with
-// options. The instrument is used to synchronously record increasing int64
-// measurements during a computational operation.
-func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) {
- cfg := metric.NewInt64CounterConfig(options...)
- const kind = InstrumentKindCounter
- p := int64InstProvider{m}
- i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
- if err != nil {
- return i, err
- }
-
- return i, validateInstrumentName(name)
-}
-
-// Int64UpDownCounter returns a new instrument identified by name and
-// configured with options. The instrument is used to synchronously record
-// int64 measurements during a computational operation.
-func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) {
- cfg := metric.NewInt64UpDownCounterConfig(options...)
- const kind = InstrumentKindUpDownCounter
- p := int64InstProvider{m}
- i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
- if err != nil {
- return i, err
- }
-
- return i, validateInstrumentName(name)
-}
-
-// Int64Histogram returns a new instrument identified by name and configured
-// with options. The instrument is used to synchronously record the
-// distribution of int64 measurements during a computational operation.
-func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) {
- cfg := metric.NewInt64HistogramConfig(options...)
- p := int64InstProvider{m}
- i, err := p.lookupHistogram(name, cfg)
- if err != nil {
- return i, err
- }
-
- return i, validateInstrumentName(name)
-}
-
-// Int64Gauge returns a new instrument identified by name and configured
-// with options. The instrument is used to synchronously record the
-// distribution of int64 measurements during a computational operation.
-func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) {
- cfg := metric.NewInt64GaugeConfig(options...)
- const kind = InstrumentKindGauge
- p := int64InstProvider{m}
- i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
- if err != nil {
- return i, err
- }
-
- return i, validateInstrumentName(name)
-}
-
-// int64ObservableInstrument returns a new observable identified by the Instrument.
-// It registers callbacks for each reader's pipeline.
-func (m *meter) int64ObservableInstrument(id Instrument, callbacks []metric.Int64Callback) (int64Observable, error) {
- key := instID{
- Name: id.Name,
- Description: id.Description,
- Unit: id.Unit,
- Kind: id.Kind,
- }
- if m.int64ObservableInsts.HasKey(key) && len(callbacks) > 0 {
- warnRepeatedObservableCallbacks(id)
- }
- return m.int64ObservableInsts.Lookup(key, func() (int64Observable, error) {
- inst := newInt64Observable(m, id.Kind, id.Name, id.Description, id.Unit)
- for _, insert := range m.int64Resolver.inserters {
- // Connect the measure functions for instruments in this pipeline with the
- // callbacks for this pipeline.
- in, err := insert.Instrument(id, insert.readerDefaultAggregation(id.Kind))
- if err != nil {
- return inst, err
- }
- // Drop aggregation
- if len(in) == 0 {
- inst.dropAggregation = true
- continue
- }
- inst.appendMeasures(in)
-
- // Add the measures to the pipeline. It is required to maintain
- // measures per pipeline to avoid calling the measure that
- // is not part of the pipeline.
- insert.pipeline.addInt64Measure(inst.observableID, in)
- for _, cback := range callbacks {
- inst := int64Observer{measures: in}
- fn := cback
- insert.addCallback(func(ctx context.Context) error { return fn(ctx, inst) })
- }
- }
- return inst, validateInstrumentName(id.Name)
- })
-}
-
-// Int64ObservableCounter returns a new instrument identified by name and
-// configured with options. The instrument is used to asynchronously record
-// increasing int64 measurements once per a measurement collection cycle.
-// Only the measurements recorded during the collection cycle are exported.
-//
-// If Int64ObservableCounter is invoked repeatedly with the same Name,
-// Description, and Unit, only the first set of callbacks provided are used.
-// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
-// if instrumentation can be created multiple times with different callbacks.
-func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) {
- cfg := metric.NewInt64ObservableCounterConfig(options...)
- id := Instrument{
- Name: name,
- Description: cfg.Description(),
- Unit: cfg.Unit(),
- Kind: InstrumentKindObservableCounter,
- Scope: m.scope,
- }
- return m.int64ObservableInstrument(id, cfg.Callbacks())
-}
-
-// Int64ObservableUpDownCounter returns a new instrument identified by name and
-// configured with options. The instrument is used to asynchronously record
-// int64 measurements once per a measurement collection cycle. Only the
-// measurements recorded during the collection cycle are exported.
-//
-// If Int64ObservableUpDownCounter is invoked repeatedly with the same Name,
-// Description, and Unit, only the first set of callbacks provided are used.
-// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
-// if instrumentation can be created multiple times with different callbacks.
-func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) {
- cfg := metric.NewInt64ObservableUpDownCounterConfig(options...)
- id := Instrument{
- Name: name,
- Description: cfg.Description(),
- Unit: cfg.Unit(),
- Kind: InstrumentKindObservableUpDownCounter,
- Scope: m.scope,
- }
- return m.int64ObservableInstrument(id, cfg.Callbacks())
-}
-
-// Int64ObservableGauge returns a new instrument identified by name and
-// configured with options. The instrument is used to asynchronously record
-// instantaneous int64 measurements once per a measurement collection cycle.
-// Only the measurements recorded during the collection cycle are exported.
-//
-// If Int64ObservableGauge is invoked repeatedly with the same Name,
-// Description, and Unit, only the first set of callbacks provided are used.
-// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
-// if instrumentation can be created multiple times with different callbacks.
-func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) {
- cfg := metric.NewInt64ObservableGaugeConfig(options...)
- id := Instrument{
- Name: name,
- Description: cfg.Description(),
- Unit: cfg.Unit(),
- Kind: InstrumentKindObservableGauge,
- Scope: m.scope,
- }
- return m.int64ObservableInstrument(id, cfg.Callbacks())
-}
-
-// Float64Counter returns a new instrument identified by name and configured
-// with options. The instrument is used to synchronously record increasing
-// float64 measurements during a computational operation.
-func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) {
- cfg := metric.NewFloat64CounterConfig(options...)
- const kind = InstrumentKindCounter
- p := float64InstProvider{m}
- i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
- if err != nil {
- return i, err
- }
-
- return i, validateInstrumentName(name)
-}
-
-// Float64UpDownCounter returns a new instrument identified by name and
-// configured with options. The instrument is used to synchronously record
-// float64 measurements during a computational operation.
-func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) {
- cfg := metric.NewFloat64UpDownCounterConfig(options...)
- const kind = InstrumentKindUpDownCounter
- p := float64InstProvider{m}
- i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
- if err != nil {
- return i, err
- }
-
- return i, validateInstrumentName(name)
-}
-
-// Float64Histogram returns a new instrument identified by name and configured
-// with options. The instrument is used to synchronously record the
-// distribution of float64 measurements during a computational operation.
-func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) {
- cfg := metric.NewFloat64HistogramConfig(options...)
- p := float64InstProvider{m}
- i, err := p.lookupHistogram(name, cfg)
- if err != nil {
- return i, err
- }
-
- return i, validateInstrumentName(name)
-}
-
-// Float64Gauge returns a new instrument identified by name and configured
-// with options. The instrument is used to synchronously record the
-// distribution of float64 measurements during a computational operation.
-func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) {
- cfg := metric.NewFloat64GaugeConfig(options...)
- const kind = InstrumentKindGauge
- p := float64InstProvider{m}
- i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
- if err != nil {
- return i, err
- }
-
- return i, validateInstrumentName(name)
-}
-
-// float64ObservableInstrument returns a new observable identified by the Instrument.
-// It registers callbacks for each reader's pipeline.
-func (m *meter) float64ObservableInstrument(id Instrument, callbacks []metric.Float64Callback) (float64Observable, error) {
- key := instID{
- Name: id.Name,
- Description: id.Description,
- Unit: id.Unit,
- Kind: id.Kind,
- }
- if m.int64ObservableInsts.HasKey(key) && len(callbacks) > 0 {
- warnRepeatedObservableCallbacks(id)
- }
- return m.float64ObservableInsts.Lookup(key, func() (float64Observable, error) {
- inst := newFloat64Observable(m, id.Kind, id.Name, id.Description, id.Unit)
- for _, insert := range m.float64Resolver.inserters {
- // Connect the measure functions for instruments in this pipeline with the
- // callbacks for this pipeline.
- in, err := insert.Instrument(id, insert.readerDefaultAggregation(id.Kind))
- if err != nil {
- return inst, err
- }
- // Drop aggregation
- if len(in) == 0 {
- inst.dropAggregation = true
- continue
- }
- inst.appendMeasures(in)
-
- // Add the measures to the pipeline. It is required to maintain
- // measures per pipeline to avoid calling the measure that
- // is not part of the pipeline.
- insert.pipeline.addFloat64Measure(inst.observableID, in)
- for _, cback := range callbacks {
- inst := float64Observer{measures: in}
- fn := cback
- insert.addCallback(func(ctx context.Context) error { return fn(ctx, inst) })
- }
- }
- return inst, validateInstrumentName(id.Name)
- })
-}
-
-// Float64ObservableCounter returns a new instrument identified by name and
-// configured with options. The instrument is used to asynchronously record
-// increasing float64 measurements once per a measurement collection cycle.
-// Only the measurements recorded during the collection cycle are exported.
-//
-// If Float64ObservableCounter is invoked repeatedly with the same Name,
-// Description, and Unit, only the first set of callbacks provided are used.
-// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
-// if instrumentation can be created multiple times with different callbacks.
-func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) {
- cfg := metric.NewFloat64ObservableCounterConfig(options...)
- id := Instrument{
- Name: name,
- Description: cfg.Description(),
- Unit: cfg.Unit(),
- Kind: InstrumentKindObservableCounter,
- Scope: m.scope,
- }
- return m.float64ObservableInstrument(id, cfg.Callbacks())
-}
-
-// Float64ObservableUpDownCounter returns a new instrument identified by name
-// and configured with options. The instrument is used to asynchronously record
-// float64 measurements once per a measurement collection cycle. Only the
-// measurements recorded during the collection cycle are exported.
-//
-// If Float64ObservableUpDownCounter is invoked repeatedly with the same Name,
-// Description, and Unit, only the first set of callbacks provided are used.
-// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
-// if instrumentation can be created multiple times with different callbacks.
-func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) {
- cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...)
- id := Instrument{
- Name: name,
- Description: cfg.Description(),
- Unit: cfg.Unit(),
- Kind: InstrumentKindObservableUpDownCounter,
- Scope: m.scope,
- }
- return m.float64ObservableInstrument(id, cfg.Callbacks())
-}
-
-// Float64ObservableGauge returns a new instrument identified by name and
-// configured with options. The instrument is used to asynchronously record
-// instantaneous float64 measurements once per a measurement collection cycle.
-// Only the measurements recorded during the collection cycle are exported.
-//
-// If Float64ObservableGauge is invoked repeatedly with the same Name,
-// Description, and Unit, only the first set of callbacks provided are used.
-// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
-// if instrumentation can be created multiple times with different callbacks.
-func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) {
- cfg := metric.NewFloat64ObservableGaugeConfig(options...)
- id := Instrument{
- Name: name,
- Description: cfg.Description(),
- Unit: cfg.Unit(),
- Kind: InstrumentKindObservableGauge,
- Scope: m.scope,
- }
- return m.float64ObservableInstrument(id, cfg.Callbacks())
-}
-
-func validateInstrumentName(name string) error {
- if len(name) == 0 {
- return fmt.Errorf("%w: %s: is empty", ErrInstrumentName, name)
- }
- if len(name) > 255 {
- return fmt.Errorf("%w: %s: longer than 255 characters", ErrInstrumentName, name)
- }
- if !isAlpha([]rune(name)[0]) {
- return fmt.Errorf("%w: %s: must start with a letter", ErrInstrumentName, name)
- }
- if len(name) == 1 {
- return nil
- }
- for _, c := range name[1:] {
- if !isAlphanumeric(c) && c != '_' && c != '.' && c != '-' && c != '/' {
- return fmt.Errorf("%w: %s: must only contain [A-Za-z0-9_.-/]", ErrInstrumentName, name)
- }
- }
- return nil
-}
-
-func isAlpha(c rune) bool {
- return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z')
-}
-
-func isAlphanumeric(c rune) bool {
- return isAlpha(c) || ('0' <= c && c <= '9')
-}
-
-func warnRepeatedObservableCallbacks(id Instrument) {
- inst := fmt.Sprintf(
- "Instrument{Name: %q, Description: %q, Kind: %q, Unit: %q}",
- id.Name, id.Description, "InstrumentKind"+id.Kind.String(), id.Unit,
- )
- global.Warn("Repeated observable instrument creation with callbacks. Ignoring new callbacks. Use meter.RegisterCallback and Registration.Unregister to manage callbacks.",
- "instrument", inst,
- )
-}
-
-// RegisterCallback registers f to be called each collection cycle so it will
-// make observations for insts during those cycles.
-//
-// The only instruments f can make observations for are insts. All other
-// observations will be dropped and an error will be logged.
-//
-// Only instruments from this meter can be registered with f, an error is
-// returned if other instrument are provided.
-//
-// Only observations made in the callback will be exported. Unlike synchronous
-// instruments, asynchronous callbacks can "forget" attribute sets that are no
-// longer relevant by omitting the observation during the callback.
-//
-// The returned Registration can be used to unregister f.
-func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) {
- if len(insts) == 0 {
- // Don't allocate a observer if not needed.
- return noopRegister{}, nil
- }
-
- var err error
- validInstruments := make([]metric.Observable, 0, len(insts))
- for _, inst := range insts {
- switch o := inst.(type) {
- case int64Observable:
- if e := o.registerable(m); e != nil {
- if !errors.Is(e, errEmptyAgg) {
- err = errors.Join(err, e)
- }
- continue
- }
-
- validInstruments = append(validInstruments, inst)
- case float64Observable:
- if e := o.registerable(m); e != nil {
- if !errors.Is(e, errEmptyAgg) {
- err = errors.Join(err, e)
- }
- continue
- }
-
- validInstruments = append(validInstruments, inst)
- default:
- // Instrument external to the SDK.
- return nil, errors.New("invalid observable: from different implementation")
- }
- }
-
- if len(validInstruments) == 0 {
- // All insts use drop aggregation or are invalid.
- return noopRegister{}, err
- }
-
- unregs := make([]func(), len(m.pipes))
- for ix, pipe := range m.pipes {
- reg := newObserver(pipe)
- for _, inst := range validInstruments {
- switch o := inst.(type) {
- case int64Observable:
- reg.registerInt64(o.observableID)
- case float64Observable:
- reg.registerFloat64(o.observableID)
- }
- }
-
- // Some or all instruments were valid.
- cBack := func(ctx context.Context) error { return f(ctx, reg) }
- unregs[ix] = pipe.addMultiCallback(cBack)
- }
-
- return unregisterFuncs{f: unregs}, err
-}
-
-type observer struct {
- embedded.Observer
-
- pipe *pipeline
- float64 map[observableID[float64]]struct{}
- int64 map[observableID[int64]]struct{}
-}
-
-func newObserver(p *pipeline) observer {
- return observer{
- pipe: p,
- float64: make(map[observableID[float64]]struct{}),
- int64: make(map[observableID[int64]]struct{}),
- }
-}
-
-func (r observer) registerFloat64(id observableID[float64]) {
- r.float64[id] = struct{}{}
-}
-
-func (r observer) registerInt64(id observableID[int64]) {
- r.int64[id] = struct{}{}
-}
-
-var (
- errUnknownObserver = errors.New("unknown observable instrument")
- errUnregObserver = errors.New("observable instrument not registered for callback")
-)
-
-func (r observer) ObserveFloat64(o metric.Float64Observable, v float64, opts ...metric.ObserveOption) {
- var oImpl float64Observable
- switch conv := o.(type) {
- case float64Observable:
- oImpl = conv
- default:
- global.Error(errUnknownObserver, "failed to record")
- return
- }
-
- if _, registered := r.float64[oImpl.observableID]; !registered {
- if !oImpl.dropAggregation {
- global.Error(errUnregObserver, "failed to record",
- "name", oImpl.name,
- "description", oImpl.description,
- "unit", oImpl.unit,
- "number", fmt.Sprintf("%T", float64(0)),
- )
- }
- return
- }
- c := metric.NewObserveConfig(opts)
- // Access to r.pipe.float64Measure is already guarded by a lock in pipeline.produce.
- // TODO (#5946): Refactor pipeline and observable measures.
- measures := r.pipe.float64Measures[oImpl.observableID]
- for _, m := range measures {
- m(context.Background(), v, c.Attributes())
- }
-}
-
-func (r observer) ObserveInt64(o metric.Int64Observable, v int64, opts ...metric.ObserveOption) {
- var oImpl int64Observable
- switch conv := o.(type) {
- case int64Observable:
- oImpl = conv
- default:
- global.Error(errUnknownObserver, "failed to record")
- return
- }
-
- if _, registered := r.int64[oImpl.observableID]; !registered {
- if !oImpl.dropAggregation {
- global.Error(errUnregObserver, "failed to record",
- "name", oImpl.name,
- "description", oImpl.description,
- "unit", oImpl.unit,
- "number", fmt.Sprintf("%T", int64(0)),
- )
- }
- return
- }
- c := metric.NewObserveConfig(opts)
- // Access to r.pipe.int64Measures is already guarded b a lock in pipeline.produce.
- // TODO (#5946): Refactor pipeline and observable measures.
- measures := r.pipe.int64Measures[oImpl.observableID]
- for _, m := range measures {
- m(context.Background(), v, c.Attributes())
- }
-}
-
-type noopRegister struct{ embedded.Registration }
-
-func (noopRegister) Unregister() error {
- return nil
-}
-
-// int64InstProvider provides int64 OpenTelemetry instruments.
-type int64InstProvider struct{ *meter }
-
-func (p int64InstProvider) aggs(kind InstrumentKind, name, desc, u string) ([]aggregate.Measure[int64], error) {
- inst := Instrument{
- Name: name,
- Description: desc,
- Unit: u,
- Kind: kind,
- Scope: p.scope,
- }
- return p.int64Resolver.Aggregators(inst)
-}
-
-func (p int64InstProvider) histogramAggs(name string, cfg metric.Int64HistogramConfig) ([]aggregate.Measure[int64], error) {
- boundaries := cfg.ExplicitBucketBoundaries()
- aggError := AggregationExplicitBucketHistogram{Boundaries: boundaries}.err()
- if aggError != nil {
- // If boundaries are invalid, ignore them.
- boundaries = nil
- }
- inst := Instrument{
- Name: name,
- Description: cfg.Description(),
- Unit: cfg.Unit(),
- Kind: InstrumentKindHistogram,
- Scope: p.scope,
- }
- measures, err := p.int64Resolver.HistogramAggregators(inst, boundaries)
- return measures, errors.Join(aggError, err)
-}
-
-// lookup returns the resolved instrumentImpl.
-func (p int64InstProvider) lookup(kind InstrumentKind, name, desc, u string) (*int64Inst, error) {
- return p.meter.int64Insts.Lookup(instID{
- Name: name,
- Description: desc,
- Unit: u,
- Kind: kind,
- }, func() (*int64Inst, error) {
- aggs, err := p.aggs(kind, name, desc, u)
- return &int64Inst{measures: aggs}, err
- })
-}
-
-// lookupHistogram returns the resolved instrumentImpl.
-func (p int64InstProvider) lookupHistogram(name string, cfg metric.Int64HistogramConfig) (*int64Inst, error) {
- return p.meter.int64Insts.Lookup(instID{
- Name: name,
- Description: cfg.Description(),
- Unit: cfg.Unit(),
- Kind: InstrumentKindHistogram,
- }, func() (*int64Inst, error) {
- aggs, err := p.histogramAggs(name, cfg)
- return &int64Inst{measures: aggs}, err
- })
-}
-
-// float64InstProvider provides float64 OpenTelemetry instruments.
-type float64InstProvider struct{ *meter }
-
-func (p float64InstProvider) aggs(kind InstrumentKind, name, desc, u string) ([]aggregate.Measure[float64], error) {
- inst := Instrument{
- Name: name,
- Description: desc,
- Unit: u,
- Kind: kind,
- Scope: p.scope,
- }
- return p.float64Resolver.Aggregators(inst)
-}
-
-func (p float64InstProvider) histogramAggs(name string, cfg metric.Float64HistogramConfig) ([]aggregate.Measure[float64], error) {
- boundaries := cfg.ExplicitBucketBoundaries()
- aggError := AggregationExplicitBucketHistogram{Boundaries: boundaries}.err()
- if aggError != nil {
- // If boundaries are invalid, ignore them.
- boundaries = nil
- }
- inst := Instrument{
- Name: name,
- Description: cfg.Description(),
- Unit: cfg.Unit(),
- Kind: InstrumentKindHistogram,
- Scope: p.scope,
- }
- measures, err := p.float64Resolver.HistogramAggregators(inst, boundaries)
- return measures, errors.Join(aggError, err)
-}
-
-// lookup returns the resolved instrumentImpl.
-func (p float64InstProvider) lookup(kind InstrumentKind, name, desc, u string) (*float64Inst, error) {
- return p.meter.float64Insts.Lookup(instID{
- Name: name,
- Description: desc,
- Unit: u,
- Kind: kind,
- }, func() (*float64Inst, error) {
- aggs, err := p.aggs(kind, name, desc, u)
- return &float64Inst{measures: aggs}, err
- })
-}
-
-// lookupHistogram returns the resolved instrumentImpl.
-func (p float64InstProvider) lookupHistogram(name string, cfg metric.Float64HistogramConfig) (*float64Inst, error) {
- return p.meter.float64Insts.Lookup(instID{
- Name: name,
- Description: cfg.Description(),
- Unit: cfg.Unit(),
- Kind: InstrumentKindHistogram,
- }, func() (*float64Inst, error) {
- aggs, err := p.histogramAggs(name, cfg)
- return &float64Inst{measures: aggs}, err
- })
-}
-
-type int64Observer struct {
- embedded.Int64Observer
- measures[int64]
-}
-
-func (o int64Observer) Observe(val int64, opts ...metric.ObserveOption) {
- c := metric.NewObserveConfig(opts)
- o.observe(val, c.Attributes())
-}
-
-type float64Observer struct {
- embedded.Float64Observer
- measures[float64]
-}
-
-func (o float64Observer) Observe(val float64, opts ...metric.ObserveOption) {
- c := metric.NewObserveConfig(opts)
- o.observe(val, c.Attributes())
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/README.md b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/README.md
deleted file mode 100644
index d1390df1b..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# SDK Metric data
-
-[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/metric/metricdata)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric/metricdata)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go
deleted file mode 100644
index d32cfc67d..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go
+++ /dev/null
@@ -1,296 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package metricdata // import "go.opentelemetry.io/otel/sdk/metric/metricdata"
-
-import (
- "encoding/json"
- "time"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/sdk/instrumentation"
- "go.opentelemetry.io/otel/sdk/resource"
-)
-
-// ResourceMetrics is a collection of ScopeMetrics and the associated Resource
-// that created them.
-type ResourceMetrics struct {
- // Resource represents the entity that collected the metrics.
- Resource *resource.Resource
- // ScopeMetrics are the collection of metrics with unique Scopes.
- ScopeMetrics []ScopeMetrics
-}
-
-// ScopeMetrics is a collection of Metrics Produces by a Meter.
-type ScopeMetrics struct {
- // Scope is the Scope that the Meter was created with.
- Scope instrumentation.Scope
- // Metrics are a list of aggregations created by the Meter.
- Metrics []Metrics
-}
-
-// Metrics is a collection of one or more aggregated timeseries from an Instrument.
-type Metrics struct {
- // Name is the name of the Instrument that created this data.
- Name string
- // Description is the description of the Instrument, which can be used in documentation.
- Description string
- // Unit is the unit in which the Instrument reports.
- Unit string
- // Data is the aggregated data from an Instrument.
- Data Aggregation
-}
-
-// Aggregation is the store of data reported by an Instrument.
-// It will be one of: Gauge, Sum, Histogram.
-type Aggregation interface {
- privateAggregation()
-}
-
-// Gauge represents a measurement of the current value of an instrument.
-type Gauge[N int64 | float64] struct {
- // DataPoints are the individual aggregated measurements with unique
- // Attributes.
- DataPoints []DataPoint[N]
-}
-
-func (Gauge[N]) privateAggregation() {}
-
-// Sum represents the sum of all measurements of values from an instrument.
-type Sum[N int64 | float64] struct {
- // DataPoints are the individual aggregated measurements with unique
- // Attributes.
- DataPoints []DataPoint[N]
- // Temporality describes if the aggregation is reported as the change from the
- // last report time, or the cumulative changes since a fixed start time.
- Temporality Temporality
- // IsMonotonic represents if this aggregation only increases or decreases.
- IsMonotonic bool
-}
-
-func (Sum[N]) privateAggregation() {}
-
-// DataPoint is a single data point in a timeseries.
-type DataPoint[N int64 | float64] struct {
- // Attributes is the set of key value pairs that uniquely identify the
- // timeseries.
- Attributes attribute.Set
- // StartTime is when the timeseries was started. (optional)
- StartTime time.Time `json:",omitempty"`
- // Time is the time when the timeseries was recorded. (optional)
- Time time.Time `json:",omitempty"`
- // Value is the value of this data point.
- Value N
-
- // Exemplars is the sampled Exemplars collected during the timeseries.
- Exemplars []Exemplar[N] `json:",omitempty"`
-}
-
-// Histogram represents the histogram of all measurements of values from an instrument.
-type Histogram[N int64 | float64] struct {
- // DataPoints are the individual aggregated measurements with unique
- // Attributes.
- DataPoints []HistogramDataPoint[N]
- // Temporality describes if the aggregation is reported as the change from the
- // last report time, or the cumulative changes since a fixed start time.
- Temporality Temporality
-}
-
-func (Histogram[N]) privateAggregation() {}
-
-// HistogramDataPoint is a single histogram data point in a timeseries.
-type HistogramDataPoint[N int64 | float64] struct {
- // Attributes is the set of key value pairs that uniquely identify the
- // timeseries.
- Attributes attribute.Set
- // StartTime is when the timeseries was started.
- StartTime time.Time
- // Time is the time when the timeseries was recorded.
- Time time.Time
-
- // Count is the number of updates this histogram has been calculated with.
- Count uint64
- // Bounds are the upper bounds of the buckets of the histogram. Because the
- // last boundary is +infinity this one is implied.
- Bounds []float64
- // BucketCounts is the count of each of the buckets.
- BucketCounts []uint64
-
- // Min is the minimum value recorded. (optional)
- Min Extrema[N]
- // Max is the maximum value recorded. (optional)
- Max Extrema[N]
- // Sum is the sum of the values recorded.
- Sum N
-
- // Exemplars is the sampled Exemplars collected during the timeseries.
- Exemplars []Exemplar[N] `json:",omitempty"`
-}
-
-// ExponentialHistogram represents the histogram of all measurements of values from an instrument.
-type ExponentialHistogram[N int64 | float64] struct {
- // DataPoints are the individual aggregated measurements with unique
- // attributes.
- DataPoints []ExponentialHistogramDataPoint[N]
- // Temporality describes if the aggregation is reported as the change from the
- // last report time, or the cumulative changes since a fixed start time.
- Temporality Temporality
-}
-
-func (ExponentialHistogram[N]) privateAggregation() {}
-
-// ExponentialHistogramDataPoint is a single exponential histogram data point in a timeseries.
-type ExponentialHistogramDataPoint[N int64 | float64] struct {
- // Attributes is the set of key value pairs that uniquely identify the
- // timeseries.
- Attributes attribute.Set
- // StartTime is when the timeseries was started.
- StartTime time.Time
- // Time is the time when the timeseries was recorded.
- Time time.Time
-
- // Count is the number of updates this histogram has been calculated with.
- Count uint64
- // Min is the minimum value recorded. (optional)
- Min Extrema[N]
- // Max is the maximum value recorded. (optional)
- Max Extrema[N]
- // Sum is the sum of the values recorded.
- Sum N
-
- // Scale describes the resolution of the histogram. Boundaries are
- // located at powers of the base, where:
- //
- // base = 2 ^ (2 ^ -Scale)
- Scale int32
- // ZeroCount is the number of values whose absolute value
- // is less than or equal to [ZeroThreshold].
- // When ZeroThreshold is 0, this is the number of values that
- // cannot be expressed using the standard exponential formula
- // as well as values that have been rounded to zero.
- // ZeroCount represents the special zero count bucket.
- ZeroCount uint64
-
- // PositiveBucket is range of positive value bucket counts.
- PositiveBucket ExponentialBucket
- // NegativeBucket is range of negative value bucket counts.
- NegativeBucket ExponentialBucket
-
- // ZeroThreshold is the width of the zero region. Where the zero region is
- // defined as the closed interval [-ZeroThreshold, ZeroThreshold].
- ZeroThreshold float64
-
- // Exemplars is the sampled Exemplars collected during the timeseries.
- Exemplars []Exemplar[N] `json:",omitempty"`
-}
-
-// ExponentialBucket are a set of bucket counts, encoded in a contiguous array
-// of counts.
-type ExponentialBucket struct {
- // Offset is the bucket index of the first entry in the Counts slice.
- Offset int32
- // Counts is an slice where Counts[i] carries the count of the bucket at
- // index (Offset+i). Counts[i] is the count of values greater than
- // base^(Offset+i) and less than or equal to base^(Offset+i+1).
- Counts []uint64
-}
-
-// Extrema is the minimum or maximum value of a dataset.
-type Extrema[N int64 | float64] struct {
- value N
- valid bool
-}
-
-// MarshalText converts the Extrema value to text.
-func (e Extrema[N]) MarshalText() ([]byte, error) {
- if !e.valid {
- return json.Marshal(nil)
- }
- return json.Marshal(e.value)
-}
-
-// MarshalJSON converts the Extrema value to JSON number.
-func (e *Extrema[N]) MarshalJSON() ([]byte, error) {
- return e.MarshalText()
-}
-
-// NewExtrema returns an Extrema set to v.
-func NewExtrema[N int64 | float64](v N) Extrema[N] {
- return Extrema[N]{value: v, valid: true}
-}
-
-// Value returns the Extrema value and true if the Extrema is defined.
-// Otherwise, if the Extrema is its zero-value, defined will be false.
-func (e Extrema[N]) Value() (v N, defined bool) {
- return e.value, e.valid
-}
-
-// Exemplar is a measurement sampled from a timeseries providing a typical
-// example.
-type Exemplar[N int64 | float64] struct {
- // FilteredAttributes are the attributes recorded with the measurement but
- // filtered out of the timeseries' aggregated data.
- FilteredAttributes []attribute.KeyValue
- // Time is the time when the measurement was recorded.
- Time time.Time
- // Value is the measured value.
- Value N
- // SpanID is the ID of the span that was active during the measurement. If
- // no span was active or the span was not sampled this will be empty.
- SpanID []byte `json:",omitempty"`
- // TraceID is the ID of the trace the active span belonged to during the
- // measurement. If no span was active or the span was not sampled this will
- // be empty.
- TraceID []byte `json:",omitempty"`
-}
-
-// Summary metric data are used to convey quantile summaries,
-// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary)
-// data type.
-//
-// These data points cannot always be merged in a meaningful way. The Summary
-// type is only used by bridges from other metrics libraries, and cannot be
-// produced using OpenTelemetry instrumentation.
-type Summary struct {
- // DataPoints are the individual aggregated measurements with unique
- // attributes.
- DataPoints []SummaryDataPoint
-}
-
-func (Summary) privateAggregation() {}
-
-// SummaryDataPoint is a single data point in a timeseries that describes the
-// time-varying values of a Summary metric.
-type SummaryDataPoint struct {
- // Attributes is the set of key value pairs that uniquely identify the
- // timeseries.
- Attributes attribute.Set
-
- // StartTime is when the timeseries was started.
- StartTime time.Time
- // Time is the time when the timeseries was recorded.
- Time time.Time
-
- // Count is the number of updates this summary has been calculated with.
- Count uint64
-
- // Sum is the sum of the values recorded.
- Sum float64
-
- // (Optional) list of values at different quantiles of the distribution calculated
- // from the current snapshot. The quantiles must be strictly increasing.
- QuantileValues []QuantileValue
-}
-
-// QuantileValue is the value at a given quantile of a summary.
-type QuantileValue struct {
- // Quantile is the quantile of this value.
- //
- // Must be in the interval [0.0, 1.0].
- Quantile float64
-
- // Value is the value at the given quantile of a summary.
- //
- // Quantile values must NOT be negative.
- Value float64
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality.go b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality.go
deleted file mode 100644
index 187713dad..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-//go:generate stringer -type=Temporality
-
-package metricdata // import "go.opentelemetry.io/otel/sdk/metric/metricdata"
-
-// Temporality defines the window that an aggregation was calculated over.
-type Temporality uint8
-
-const (
- // undefinedTemporality represents an unset Temporality.
- //nolint:deadcode,unused,varcheck
- undefinedTemporality Temporality = iota
-
- // CumulativeTemporality defines a measurement interval that continues to
- // expand forward in time from a starting point. New measurements are
- // added to all previous measurements since a start time.
- CumulativeTemporality
-
- // DeltaTemporality defines a measurement interval that resets each cycle.
- // Measurements from one cycle are recorded independently, measurements
- // from other cycles do not affect them.
- DeltaTemporality
-)
-
-// MarshalText returns the byte encoded of t.
-func (t Temporality) MarshalText() ([]byte, error) {
- return []byte(t.String()), nil
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go
deleted file mode 100644
index 4da833cdc..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Code generated by "stringer -type=Temporality"; DO NOT EDIT.
-
-package metricdata
-
-import "strconv"
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[undefinedTemporality-0]
- _ = x[CumulativeTemporality-1]
- _ = x[DeltaTemporality-2]
-}
-
-const _Temporality_name = "undefinedTemporalityCumulativeTemporalityDeltaTemporality"
-
-var _Temporality_index = [...]uint8{0, 20, 41, 57}
-
-func (i Temporality) String() string {
- if i >= Temporality(len(_Temporality_index)-1) {
- return "Temporality(" + strconv.FormatInt(int64(i), 10) + ")"
- }
- return _Temporality_name[_Temporality_index[i]:_Temporality_index[i+1]]
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go
deleted file mode 100644
index dcd2182d9..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go
+++ /dev/null
@@ -1,369 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package metric // import "go.opentelemetry.io/otel/sdk/metric"
-
-import (
- "context"
- "errors"
- "fmt"
- "sync"
- "sync/atomic"
- "time"
-
- "go.opentelemetry.io/otel"
- "go.opentelemetry.io/otel/internal/global"
- "go.opentelemetry.io/otel/sdk/metric/metricdata"
-)
-
-// Default periodic reader timing.
-const (
- defaultTimeout = time.Millisecond * 30000
- defaultInterval = time.Millisecond * 60000
-)
-
-// periodicReaderConfig contains configuration options for a PeriodicReader.
-type periodicReaderConfig struct {
- interval time.Duration
- timeout time.Duration
- producers []Producer
-}
-
-// newPeriodicReaderConfig returns a periodicReaderConfig configured with
-// options.
-func newPeriodicReaderConfig(options []PeriodicReaderOption) periodicReaderConfig {
- c := periodicReaderConfig{
- interval: envDuration(envInterval, defaultInterval),
- timeout: envDuration(envTimeout, defaultTimeout),
- }
- for _, o := range options {
- c = o.applyPeriodic(c)
- }
- return c
-}
-
-// PeriodicReaderOption applies a configuration option value to a PeriodicReader.
-type PeriodicReaderOption interface {
- applyPeriodic(periodicReaderConfig) periodicReaderConfig
-}
-
-// periodicReaderOptionFunc applies a set of options to a periodicReaderConfig.
-type periodicReaderOptionFunc func(periodicReaderConfig) periodicReaderConfig
-
-// applyPeriodic returns a periodicReaderConfig with option(s) applied.
-func (o periodicReaderOptionFunc) applyPeriodic(conf periodicReaderConfig) periodicReaderConfig {
- return o(conf)
-}
-
-// WithTimeout configures the time a PeriodicReader waits for an export to
-// complete before canceling it. This includes an export which occurs as part
-// of Shutdown or ForceFlush if the user passed context does not have a
-// deadline. If the user passed context does have a deadline, it will be used
-// instead.
-//
-// This option overrides any value set for the
-// OTEL_METRIC_EXPORT_TIMEOUT environment variable.
-//
-// If this option is not used or d is less than or equal to zero, 30 seconds
-// is used as the default.
-func WithTimeout(d time.Duration) PeriodicReaderOption {
- return periodicReaderOptionFunc(func(conf periodicReaderConfig) periodicReaderConfig {
- if d <= 0 {
- return conf
- }
- conf.timeout = d
- return conf
- })
-}
-
-// WithInterval configures the intervening time between exports for a
-// PeriodicReader.
-//
-// This option overrides any value set for the
-// OTEL_METRIC_EXPORT_INTERVAL environment variable.
-//
-// If this option is not used or d is less than or equal to zero, 60 seconds
-// is used as the default.
-func WithInterval(d time.Duration) PeriodicReaderOption {
- return periodicReaderOptionFunc(func(conf periodicReaderConfig) periodicReaderConfig {
- if d <= 0 {
- return conf
- }
- conf.interval = d
- return conf
- })
-}
-
-// NewPeriodicReader returns a Reader that collects and exports metric data to
-// the exporter at a defined interval. By default, the returned Reader will
-// collect and export data every 60 seconds, and will cancel any attempts that
-// exceed 30 seconds, collect and export combined. The collect and export time
-// are not counted towards the interval between attempts.
-//
-// The Collect method of the returned Reader continues to gather and return
-// metric data to the user. It will not automatically send that data to the
-// exporter. That is left to the user to accomplish.
-func NewPeriodicReader(exporter Exporter, options ...PeriodicReaderOption) *PeriodicReader {
- conf := newPeriodicReaderConfig(options)
- ctx, cancel := context.WithCancel(context.Background())
- r := &PeriodicReader{
- interval: conf.interval,
- timeout: conf.timeout,
- exporter: exporter,
- flushCh: make(chan chan error),
- cancel: cancel,
- done: make(chan struct{}),
- rmPool: sync.Pool{
- New: func() interface{} {
- return &metricdata.ResourceMetrics{}
- },
- },
- }
- r.externalProducers.Store(conf.producers)
-
- go func() {
- defer func() { close(r.done) }()
- r.run(ctx, conf.interval)
- }()
-
- return r
-}
-
-// PeriodicReader is a Reader that continuously collects and exports metric
-// data at a set interval.
-type PeriodicReader struct {
- sdkProducer atomic.Value
-
- mu sync.Mutex
- isShutdown bool
- externalProducers atomic.Value
-
- interval time.Duration
- timeout time.Duration
- exporter Exporter
- flushCh chan chan error
-
- done chan struct{}
- cancel context.CancelFunc
- shutdownOnce sync.Once
-
- rmPool sync.Pool
-}
-
-// Compile time check the periodicReader implements Reader and is comparable.
-var _ = map[Reader]struct{}{&PeriodicReader{}: {}}
-
-// newTicker allows testing override.
-var newTicker = time.NewTicker
-
-// run continuously collects and exports metric data at the specified
-// interval. This will run until ctx is canceled or times out.
-func (r *PeriodicReader) run(ctx context.Context, interval time.Duration) {
- ticker := newTicker(interval)
- defer ticker.Stop()
-
- for {
- select {
- case <-ticker.C:
- err := r.collectAndExport(ctx)
- if err != nil {
- otel.Handle(err)
- }
- case errCh := <-r.flushCh:
- errCh <- r.collectAndExport(ctx)
- ticker.Reset(interval)
- case <-ctx.Done():
- return
- }
- }
-}
-
-// register registers p as the producer of this reader.
-func (r *PeriodicReader) register(p sdkProducer) {
- // Only register once. If producer is already set, do nothing.
- if !r.sdkProducer.CompareAndSwap(nil, produceHolder{produce: p.produce}) {
- msg := "did not register periodic reader"
- global.Error(errDuplicateRegister, msg)
- }
-}
-
-// temporality reports the Temporality for the instrument kind provided.
-func (r *PeriodicReader) temporality(kind InstrumentKind) metricdata.Temporality {
- return r.exporter.Temporality(kind)
-}
-
-// aggregation returns what Aggregation to use for kind.
-func (r *PeriodicReader) aggregation(kind InstrumentKind) Aggregation { // nolint:revive // import-shadow for method scoped by type.
- return r.exporter.Aggregation(kind)
-}
-
-// collectAndExport gather all metric data related to the periodicReader r from
-// the SDK and exports it with r's exporter.
-func (r *PeriodicReader) collectAndExport(ctx context.Context) error {
- ctx, cancel := context.WithTimeout(ctx, r.timeout)
- defer cancel()
-
- // TODO (#3047): Use a sync.Pool or persistent pointer instead of allocating rm every Collect.
- rm := r.rmPool.Get().(*metricdata.ResourceMetrics)
- err := r.Collect(ctx, rm)
- if err == nil {
- err = r.export(ctx, rm)
- }
- r.rmPool.Put(rm)
- return err
-}
-
-// Collect gathers all metric data related to the Reader from
-// the SDK and other Producers and stores the result in rm. The metric
-// data is not exported to the configured exporter, it is left to the caller to
-// handle that if desired.
-//
-// Collect will return an error if called after shutdown.
-// Collect will return an error if rm is a nil ResourceMetrics.
-// Collect will return an error if the context's Done channel is closed.
-//
-// This method is safe to call concurrently.
-func (r *PeriodicReader) Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error {
- if rm == nil {
- return errors.New("periodic reader: *metricdata.ResourceMetrics is nil")
- }
- // TODO (#3047): When collect is updated to accept output as param, pass rm.
- return r.collect(ctx, r.sdkProducer.Load(), rm)
-}
-
-// collect unwraps p as a produceHolder and returns its produce results.
-func (r *PeriodicReader) collect(ctx context.Context, p interface{}, rm *metricdata.ResourceMetrics) error {
- if p == nil {
- return ErrReaderNotRegistered
- }
-
- ph, ok := p.(produceHolder)
- if !ok {
- // The atomic.Value is entirely in the periodicReader's control so
- // this should never happen. In the unforeseen case that this does
- // happen, return an error instead of panicking so a users code does
- // not halt in the processes.
- err := fmt.Errorf("periodic reader: invalid producer: %T", p)
- return err
- }
-
- err := ph.produce(ctx, rm)
- if err != nil {
- return err
- }
- for _, producer := range r.externalProducers.Load().([]Producer) {
- externalMetrics, e := producer.Produce(ctx)
- if e != nil {
- err = errors.Join(err, e)
- }
- rm.ScopeMetrics = append(rm.ScopeMetrics, externalMetrics...)
- }
-
- global.Debug("PeriodicReader collection", "Data", rm)
-
- return err
-}
-
-// export exports metric data m using r's exporter.
-func (r *PeriodicReader) export(ctx context.Context, m *metricdata.ResourceMetrics) error {
- return r.exporter.Export(ctx, m)
-}
-
-// ForceFlush flushes pending telemetry.
-//
-// This method is safe to call concurrently.
-func (r *PeriodicReader) ForceFlush(ctx context.Context) error {
- // Prioritize the ctx timeout if it is set.
- if _, ok := ctx.Deadline(); !ok {
- var cancel context.CancelFunc
- ctx, cancel = context.WithTimeout(ctx, r.timeout)
- defer cancel()
- }
-
- errCh := make(chan error, 1)
- select {
- case r.flushCh <- errCh:
- select {
- case err := <-errCh:
- if err != nil {
- return err
- }
- close(errCh)
- case <-ctx.Done():
- return ctx.Err()
- }
- case <-r.done:
- return ErrReaderShutdown
- case <-ctx.Done():
- return ctx.Err()
- }
- return r.exporter.ForceFlush(ctx)
-}
-
-// Shutdown flushes pending telemetry and then stops the export pipeline.
-//
-// This method is safe to call concurrently.
-func (r *PeriodicReader) Shutdown(ctx context.Context) error {
- err := ErrReaderShutdown
- r.shutdownOnce.Do(func() {
- // Prioritize the ctx timeout if it is set.
- if _, ok := ctx.Deadline(); !ok {
- var cancel context.CancelFunc
- ctx, cancel = context.WithTimeout(ctx, r.timeout)
- defer cancel()
- }
-
- // Stop the run loop.
- r.cancel()
- <-r.done
-
- // Any future call to Collect will now return ErrReaderShutdown.
- ph := r.sdkProducer.Swap(produceHolder{
- produce: shutdownProducer{}.produce,
- })
-
- if ph != nil { // Reader was registered.
- // Flush pending telemetry.
- m := r.rmPool.Get().(*metricdata.ResourceMetrics)
- err = r.collect(ctx, ph, m)
- if err == nil {
- err = r.export(ctx, m)
- }
- r.rmPool.Put(m)
- }
-
- sErr := r.exporter.Shutdown(ctx)
- if err == nil || errors.Is(err, ErrReaderShutdown) {
- err = sErr
- }
-
- r.mu.Lock()
- defer r.mu.Unlock()
- r.isShutdown = true
- // release references to Producer(s)
- r.externalProducers.Store([]Producer{})
- })
- return err
-}
-
-// MarshalLog returns logging data about the PeriodicReader.
-func (r *PeriodicReader) MarshalLog() interface{} {
- r.mu.Lock()
- down := r.isShutdown
- r.mu.Unlock()
- return struct {
- Type string
- Exporter Exporter
- Registered bool
- Shutdown bool
- Interval time.Duration
- Timeout time.Duration
- }{
- Type: "PeriodicReader",
- Exporter: r.exporter,
- Registered: r.sdkProducer.Load() != nil,
- Shutdown: down,
- Interval: r.interval,
- Timeout: r.timeout,
- }
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go
deleted file mode 100644
index 775e24526..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go
+++ /dev/null
@@ -1,659 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package metric // import "go.opentelemetry.io/otel/sdk/metric"
-
-import (
- "container/list"
- "context"
- "errors"
- "fmt"
- "sync"
- "sync/atomic"
-
- "go.opentelemetry.io/otel/internal/global"
- "go.opentelemetry.io/otel/metric/embedded"
- "go.opentelemetry.io/otel/sdk/instrumentation"
- "go.opentelemetry.io/otel/sdk/metric/exemplar"
- "go.opentelemetry.io/otel/sdk/metric/internal"
- "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
- "go.opentelemetry.io/otel/sdk/metric/internal/x"
- "go.opentelemetry.io/otel/sdk/metric/metricdata"
- "go.opentelemetry.io/otel/sdk/resource"
-)
-
-var (
- errCreatingAggregators = errors.New("could not create all aggregators")
- errIncompatibleAggregation = errors.New("incompatible aggregation")
- errUnknownAggregation = errors.New("unrecognized aggregation")
-)
-
-// instrumentSync is a synchronization point between a pipeline and an
-// instrument's aggregate function.
-type instrumentSync struct {
- name string
- description string
- unit string
- compAgg aggregate.ComputeAggregation
-}
-
-func newPipeline(res *resource.Resource, reader Reader, views []View, exemplarFilter exemplar.Filter) *pipeline {
- if res == nil {
- res = resource.Empty()
- }
- return &pipeline{
- resource: res,
- reader: reader,
- views: views,
- int64Measures: map[observableID[int64]][]aggregate.Measure[int64]{},
- float64Measures: map[observableID[float64]][]aggregate.Measure[float64]{},
- exemplarFilter: exemplarFilter,
- // aggregations is lazy allocated when needed.
- }
-}
-
-// pipeline connects all of the instruments created by a meter provider to a Reader.
-// This is the object that will be `Reader.register()` when a meter provider is created.
-//
-// As instruments are created the instrument should be checked if it exists in
-// the views of a the Reader, and if so each aggregate function should be added
-// to the pipeline.
-type pipeline struct {
- resource *resource.Resource
-
- reader Reader
- views []View
-
- sync.Mutex
- int64Measures map[observableID[int64]][]aggregate.Measure[int64]
- float64Measures map[observableID[float64]][]aggregate.Measure[float64]
- aggregations map[instrumentation.Scope][]instrumentSync
- callbacks []func(context.Context) error
- multiCallbacks list.List
- exemplarFilter exemplar.Filter
-}
-
-// addInt64Measure adds a new int64 measure to the pipeline for each observer.
-func (p *pipeline) addInt64Measure(id observableID[int64], m []aggregate.Measure[int64]) {
- p.Lock()
- defer p.Unlock()
- p.int64Measures[id] = m
-}
-
-// addFloat64Measure adds a new float64 measure to the pipeline for each observer.
-func (p *pipeline) addFloat64Measure(id observableID[float64], m []aggregate.Measure[float64]) {
- p.Lock()
- defer p.Unlock()
- p.float64Measures[id] = m
-}
-
-// addSync adds the instrumentSync to pipeline p with scope. This method is not
-// idempotent. Duplicate calls will result in duplicate additions, it is the
-// callers responsibility to ensure this is called with unique values.
-func (p *pipeline) addSync(scope instrumentation.Scope, iSync instrumentSync) {
- p.Lock()
- defer p.Unlock()
- if p.aggregations == nil {
- p.aggregations = map[instrumentation.Scope][]instrumentSync{
- scope: {iSync},
- }
- return
- }
- p.aggregations[scope] = append(p.aggregations[scope], iSync)
-}
-
-type multiCallback func(context.Context) error
-
-// addMultiCallback registers a multi-instrument callback to be run when
-// `produce()` is called.
-func (p *pipeline) addMultiCallback(c multiCallback) (unregister func()) {
- p.Lock()
- defer p.Unlock()
- e := p.multiCallbacks.PushBack(c)
- return func() {
- p.Lock()
- p.multiCallbacks.Remove(e)
- p.Unlock()
- }
-}
-
-// produce returns aggregated metrics from a single collection.
-//
-// This method is safe to call concurrently.
-func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics) error {
- p.Lock()
- defer p.Unlock()
-
- var err error
- for _, c := range p.callbacks {
- // TODO make the callbacks parallel. ( #3034 )
- if e := c(ctx); e != nil {
- err = errors.Join(err, e)
- }
- if err := ctx.Err(); err != nil {
- rm.Resource = nil
- clear(rm.ScopeMetrics) // Erase elements to let GC collect objects.
- rm.ScopeMetrics = rm.ScopeMetrics[:0]
- return err
- }
- }
- for e := p.multiCallbacks.Front(); e != nil; e = e.Next() {
- // TODO make the callbacks parallel. ( #3034 )
- f := e.Value.(multiCallback)
- if e := f(ctx); e != nil {
- err = errors.Join(err, e)
- }
- if err := ctx.Err(); err != nil {
- // This means the context expired before we finished running callbacks.
- rm.Resource = nil
- clear(rm.ScopeMetrics) // Erase elements to let GC collect objects.
- rm.ScopeMetrics = rm.ScopeMetrics[:0]
- return err
- }
- }
-
- rm.Resource = p.resource
- rm.ScopeMetrics = internal.ReuseSlice(rm.ScopeMetrics, len(p.aggregations))
-
- i := 0
- for scope, instruments := range p.aggregations {
- rm.ScopeMetrics[i].Metrics = internal.ReuseSlice(rm.ScopeMetrics[i].Metrics, len(instruments))
- j := 0
- for _, inst := range instruments {
- data := rm.ScopeMetrics[i].Metrics[j].Data
- if n := inst.compAgg(&data); n > 0 {
- rm.ScopeMetrics[i].Metrics[j].Name = inst.name
- rm.ScopeMetrics[i].Metrics[j].Description = inst.description
- rm.ScopeMetrics[i].Metrics[j].Unit = inst.unit
- rm.ScopeMetrics[i].Metrics[j].Data = data
- j++
- }
- }
- rm.ScopeMetrics[i].Metrics = rm.ScopeMetrics[i].Metrics[:j]
- if len(rm.ScopeMetrics[i].Metrics) > 0 {
- rm.ScopeMetrics[i].Scope = scope
- i++
- }
- }
-
- rm.ScopeMetrics = rm.ScopeMetrics[:i]
-
- return err
-}
-
-// inserter facilitates inserting of new instruments from a single scope into a
-// pipeline.
-type inserter[N int64 | float64] struct {
- // aggregators is a cache that holds aggregate function inputs whose
- // outputs have been inserted into the underlying reader pipeline. This
- // cache ensures no duplicate aggregate functions are inserted into the
- // reader pipeline and if a new request during an instrument creation asks
- // for the same aggregate function input the same instance is returned.
- aggregators *cache[instID, aggVal[N]]
-
- // views is a cache that holds instrument identifiers for all the
- // instruments a Meter has created, it is provided from the Meter that owns
- // this inserter. This cache ensures during the creation of instruments
- // with the same name but different options (e.g. description, unit) a
- // warning message is logged.
- views *cache[string, instID]
-
- pipeline *pipeline
-}
-
-func newInserter[N int64 | float64](p *pipeline, vc *cache[string, instID]) *inserter[N] {
- if vc == nil {
- vc = &cache[string, instID]{}
- }
- return &inserter[N]{
- aggregators: &cache[instID, aggVal[N]]{},
- views: vc,
- pipeline: p,
- }
-}
-
-// Instrument inserts the instrument inst with instUnit into a pipeline. All
-// views the pipeline contains are matched against, and any matching view that
-// creates a unique aggregate function will have its output inserted into the
-// pipeline and its input included in the returned slice.
-//
-// The returned aggregate function inputs are ensured to be deduplicated and
-// unique. If another view in another pipeline that is cached by this
-// inserter's cache has already inserted the same aggregate function for the
-// same instrument, that functions input instance is returned.
-//
-// If another instrument has already been inserted by this inserter, or any
-// other using the same cache, and it conflicts with the instrument being
-// inserted in this call, an aggregate function input matching the arguments
-// will still be returned but an Info level log message will also be logged to
-// the OTel global logger.
-//
-// If the passed instrument would result in an incompatible aggregate function,
-// an error is returned and that aggregate function output is not inserted nor
-// is its input returned.
-//
-// If an instrument is determined to use a Drop aggregation, that instrument is
-// not inserted nor returned.
-func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation) ([]aggregate.Measure[N], error) {
- var (
- matched bool
- measures []aggregate.Measure[N]
- )
-
- var err error
- seen := make(map[uint64]struct{})
- for _, v := range i.pipeline.views {
- stream, match := v(inst)
- if !match {
- continue
- }
- matched = true
- in, id, e := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation)
- if e != nil {
- err = errors.Join(err, e)
- }
- if in == nil { // Drop aggregation.
- continue
- }
- if _, ok := seen[id]; ok {
- // This aggregate function has already been added.
- continue
- }
- seen[id] = struct{}{}
- measures = append(measures, in)
- }
-
- if err != nil {
- err = errors.Join(errCreatingAggregators, err)
- }
-
- if matched {
- return measures, err
- }
-
- // Apply implicit default view if no explicit matched.
- stream := Stream{
- Name: inst.Name,
- Description: inst.Description,
- Unit: inst.Unit,
- }
- in, _, e := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation)
- if e != nil {
- if err == nil {
- err = errCreatingAggregators
- }
- err = errors.Join(err, e)
- }
- if in != nil {
- // Ensured to have not seen given matched was false.
- measures = append(measures, in)
- }
- return measures, err
-}
-
-// addCallback registers a single instrument callback to be run when
-// `produce()` is called.
-func (i *inserter[N]) addCallback(cback func(context.Context) error) {
- i.pipeline.Lock()
- defer i.pipeline.Unlock()
- i.pipeline.callbacks = append(i.pipeline.callbacks, cback)
-}
-
-var aggIDCount uint64
-
-// aggVal is the cached value in an aggregators cache.
-type aggVal[N int64 | float64] struct {
- ID uint64
- Measure aggregate.Measure[N]
- Err error
-}
-
-// readerDefaultAggregation returns the default aggregation for the instrument
-// kind based on the reader's aggregation preferences. This is used unless the
-// aggregation is overridden with a view.
-func (i *inserter[N]) readerDefaultAggregation(kind InstrumentKind) Aggregation {
- aggregation := i.pipeline.reader.aggregation(kind)
- switch aggregation.(type) {
- case nil, AggregationDefault:
- // If the reader returns default or nil use the default selector.
- aggregation = DefaultAggregationSelector(kind)
- default:
- // Deep copy and validate before using.
- aggregation = aggregation.copy()
- if err := aggregation.err(); err != nil {
- orig := aggregation
- aggregation = DefaultAggregationSelector(kind)
- global.Error(
- err, "using default aggregation instead",
- "aggregation", orig,
- "replacement", aggregation,
- )
- }
- }
- return aggregation
-}
-
-// cachedAggregator returns the appropriate aggregate input and output
-// functions for an instrument configuration. If the exact instrument has been
-// created within the inst.Scope, those aggregate function instances will be
-// returned. Otherwise, new computed aggregate functions will be cached and
-// returned.
-//
-// If the instrument configuration conflicts with an instrument that has
-// already been created (e.g. description, unit, data type) a warning will be
-// logged at the "Info" level with the global OTel logger. Valid new aggregate
-// functions for the instrument configuration will still be returned without an
-// error.
-//
-// If the instrument defines an unknown or incompatible aggregation, an error
-// is returned.
-func (i *inserter[N]) cachedAggregator(scope instrumentation.Scope, kind InstrumentKind, stream Stream, readerAggregation Aggregation) (meas aggregate.Measure[N], aggID uint64, err error) {
- switch stream.Aggregation.(type) {
- case nil:
- // The aggregation was not overridden with a view. Use the aggregation
- // provided by the reader.
- stream.Aggregation = readerAggregation
- case AggregationDefault:
- // The view explicitly requested the default aggregation.
- stream.Aggregation = DefaultAggregationSelector(kind)
- }
- if stream.ExemplarReservoirProviderSelector == nil {
- stream.ExemplarReservoirProviderSelector = DefaultExemplarReservoirProviderSelector
- }
-
- if err := isAggregatorCompatible(kind, stream.Aggregation); err != nil {
- return nil, 0, fmt.Errorf(
- "creating aggregator with instrumentKind: %d, aggregation %v: %w",
- kind, stream.Aggregation, err,
- )
- }
-
- id := i.instID(kind, stream)
- // If there is a conflict, the specification says the view should
- // still be applied and a warning should be logged.
- i.logConflict(id)
-
- // If there are requests for the same instrument with different name
- // casing, the first-seen needs to be returned. Use a normalize ID for the
- // cache lookup to ensure the correct comparison.
- normID := id.normalize()
- cv := i.aggregators.Lookup(normID, func() aggVal[N] {
- b := aggregate.Builder[N]{
- Temporality: i.pipeline.reader.temporality(kind),
- ReservoirFunc: reservoirFunc[N](stream.ExemplarReservoirProviderSelector(stream.Aggregation), i.pipeline.exemplarFilter),
- }
- b.Filter = stream.AttributeFilter
- // A value less than or equal to zero will disable the aggregation
- // limits for the builder (an all the created aggregates).
- // CardinalityLimit.Lookup returns 0 by default if unset (or
- // unrecognized input). Use that value directly.
- b.AggregationLimit, _ = x.CardinalityLimit.Lookup()
-
- in, out, err := i.aggregateFunc(b, stream.Aggregation, kind)
- if err != nil {
- return aggVal[N]{0, nil, err}
- }
- if in == nil { // Drop aggregator.
- return aggVal[N]{0, nil, nil}
- }
- i.pipeline.addSync(scope, instrumentSync{
- // Use the first-seen name casing for this and all subsequent
- // requests of this instrument.
- name: stream.Name,
- description: stream.Description,
- unit: stream.Unit,
- compAgg: out,
- })
- id := atomic.AddUint64(&aggIDCount, 1)
- return aggVal[N]{id, in, err}
- })
- return cv.Measure, cv.ID, cv.Err
-}
-
-// logConflict validates if an instrument with the same case-insensitive name
-// as id has already been created. If that instrument conflicts with id, a
-// warning is logged.
-func (i *inserter[N]) logConflict(id instID) {
- // The API specification defines names as case-insensitive. If there is a
- // different casing of a name it needs to be a conflict.
- name := id.normalize().Name
- existing := i.views.Lookup(name, func() instID { return id })
- if id == existing {
- return
- }
-
- const msg = "duplicate metric stream definitions"
- args := []interface{}{
- "names", fmt.Sprintf("%q, %q", existing.Name, id.Name),
- "descriptions", fmt.Sprintf("%q, %q", existing.Description, id.Description),
- "kinds", fmt.Sprintf("%s, %s", existing.Kind, id.Kind),
- "units", fmt.Sprintf("%s, %s", existing.Unit, id.Unit),
- "numbers", fmt.Sprintf("%s, %s", existing.Number, id.Number),
- }
-
- // The specification recommends logging a suggested view to resolve
- // conflicts if possible.
- //
- // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.21.0/specification/metrics/sdk.md#duplicate-instrument-registration
- if id.Unit != existing.Unit || id.Number != existing.Number {
- // There is no view resolution for these, don't make a suggestion.
- global.Warn(msg, args...)
- return
- }
-
- var stream string
- if id.Name != existing.Name || id.Kind != existing.Kind {
- stream = `Stream{Name: "{{NEW_NAME}}"}`
- } else if id.Description != existing.Description {
- stream = fmt.Sprintf("Stream{Description: %q}", existing.Description)
- }
-
- inst := fmt.Sprintf(
- "Instrument{Name: %q, Description: %q, Kind: %q, Unit: %q}",
- id.Name, id.Description, "InstrumentKind"+id.Kind.String(), id.Unit,
- )
- args = append(args, "suggested.view", fmt.Sprintf("NewView(%s, %s)", inst, stream))
-
- global.Warn(msg, args...)
-}
-
-func (i *inserter[N]) instID(kind InstrumentKind, stream Stream) instID {
- var zero N
- return instID{
- Name: stream.Name,
- Description: stream.Description,
- Unit: stream.Unit,
- Kind: kind,
- Number: fmt.Sprintf("%T", zero),
- }
-}
-
-// aggregateFunc returns new aggregate functions matching agg, kind, and
-// monotonic. If the agg is unknown or temporality is invalid, an error is
-// returned.
-func (i *inserter[N]) aggregateFunc(b aggregate.Builder[N], agg Aggregation, kind InstrumentKind) (meas aggregate.Measure[N], comp aggregate.ComputeAggregation, err error) {
- switch a := agg.(type) {
- case AggregationDefault:
- return i.aggregateFunc(b, DefaultAggregationSelector(kind), kind)
- case AggregationDrop:
- // Return nil in and out to signify the drop aggregator.
- case AggregationLastValue:
- switch kind {
- case InstrumentKindGauge:
- meas, comp = b.LastValue()
- case InstrumentKindObservableGauge:
- meas, comp = b.PrecomputedLastValue()
- }
- case AggregationSum:
- switch kind {
- case InstrumentKindObservableCounter:
- meas, comp = b.PrecomputedSum(true)
- case InstrumentKindObservableUpDownCounter:
- meas, comp = b.PrecomputedSum(false)
- case InstrumentKindCounter, InstrumentKindHistogram:
- meas, comp = b.Sum(true)
- default:
- // InstrumentKindUpDownCounter, InstrumentKindObservableGauge, and
- // instrumentKindUndefined or other invalid instrument kinds.
- meas, comp = b.Sum(false)
- }
- case AggregationExplicitBucketHistogram:
- var noSum bool
- switch kind {
- case InstrumentKindUpDownCounter, InstrumentKindObservableUpDownCounter, InstrumentKindObservableGauge, InstrumentKindGauge:
- // The sum should not be collected for any instrument that can make
- // negative measurements:
- // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.21.0/specification/metrics/sdk.md#histogram-aggregations
- noSum = true
- }
- meas, comp = b.ExplicitBucketHistogram(a.Boundaries, a.NoMinMax, noSum)
- case AggregationBase2ExponentialHistogram:
- var noSum bool
- switch kind {
- case InstrumentKindUpDownCounter, InstrumentKindObservableUpDownCounter, InstrumentKindObservableGauge, InstrumentKindGauge:
- // The sum should not be collected for any instrument that can make
- // negative measurements:
- // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.21.0/specification/metrics/sdk.md#histogram-aggregations
- noSum = true
- }
- meas, comp = b.ExponentialBucketHistogram(a.MaxSize, a.MaxScale, a.NoMinMax, noSum)
-
- default:
- err = errUnknownAggregation
- }
-
- return meas, comp, err
-}
-
-// isAggregatorCompatible checks if the aggregation can be used by the instrument.
-// Current compatibility:
-//
-// | Instrument Kind | Drop | LastValue | Sum | Histogram | Exponential Histogram |
-// |--------------------------|------|-----------|-----|-----------|-----------------------|
-// | Counter | ✓ | | ✓ | ✓ | ✓ |
-// | UpDownCounter | ✓ | | ✓ | ✓ | ✓ |
-// | Histogram | ✓ | | ✓ | ✓ | ✓ |
-// | Gauge | ✓ | ✓ | | ✓ | ✓ |
-// | Observable Counter | ✓ | | ✓ | ✓ | ✓ |
-// | Observable UpDownCounter | ✓ | | ✓ | ✓ | ✓ |
-// | Observable Gauge | ✓ | ✓ | | ✓ | ✓ |.
-func isAggregatorCompatible(kind InstrumentKind, agg Aggregation) error {
- switch agg.(type) {
- case AggregationDefault:
- return nil
- case AggregationExplicitBucketHistogram, AggregationBase2ExponentialHistogram:
- switch kind {
- case InstrumentKindCounter,
- InstrumentKindUpDownCounter,
- InstrumentKindHistogram,
- InstrumentKindGauge,
- InstrumentKindObservableCounter,
- InstrumentKindObservableUpDownCounter,
- InstrumentKindObservableGauge:
- return nil
- default:
- return errIncompatibleAggregation
- }
- case AggregationSum:
- switch kind {
- case InstrumentKindObservableCounter, InstrumentKindObservableUpDownCounter, InstrumentKindCounter, InstrumentKindHistogram, InstrumentKindUpDownCounter:
- return nil
- default:
- // TODO: review need for aggregation check after
- // https://github.com/open-telemetry/opentelemetry-specification/issues/2710
- return errIncompatibleAggregation
- }
- case AggregationLastValue:
- switch kind {
- case InstrumentKindObservableGauge, InstrumentKindGauge:
- return nil
- }
- // TODO: review need for aggregation check after
- // https://github.com/open-telemetry/opentelemetry-specification/issues/2710
- return errIncompatibleAggregation
- case AggregationDrop:
- return nil
- default:
- // This is used passed checking for default, it should be an error at this point.
- return fmt.Errorf("%w: %v", errUnknownAggregation, agg)
- }
-}
-
-// pipelines is the group of pipelines connecting Readers with instrument
-// measurement.
-type pipelines []*pipeline
-
-func newPipelines(res *resource.Resource, readers []Reader, views []View, exemplarFilter exemplar.Filter) pipelines {
- pipes := make([]*pipeline, 0, len(readers))
- for _, r := range readers {
- p := newPipeline(res, r, views, exemplarFilter)
- r.register(p)
- pipes = append(pipes, p)
- }
- return pipes
-}
-
-type unregisterFuncs struct {
- embedded.Registration
- f []func()
-}
-
-func (u unregisterFuncs) Unregister() error {
- for _, f := range u.f {
- f()
- }
- return nil
-}
-
-// resolver facilitates resolving aggregate functions an instrument calls to
-// aggregate measurements with while updating all pipelines that need to pull
-// from those aggregations.
-type resolver[N int64 | float64] struct {
- inserters []*inserter[N]
-}
-
-func newResolver[N int64 | float64](p pipelines, vc *cache[string, instID]) resolver[N] {
- in := make([]*inserter[N], len(p))
- for i := range in {
- in[i] = newInserter[N](p[i], vc)
- }
- return resolver[N]{in}
-}
-
-// Aggregators returns the Aggregators that must be updated by the instrument
-// defined by key.
-func (r resolver[N]) Aggregators(id Instrument) ([]aggregate.Measure[N], error) {
- var measures []aggregate.Measure[N]
-
- var err error
- for _, i := range r.inserters {
- in, e := i.Instrument(id, i.readerDefaultAggregation(id.Kind))
- if e != nil {
- err = errors.Join(err, e)
- }
- measures = append(measures, in...)
- }
- return measures, err
-}
-
-// HistogramAggregators returns the histogram Aggregators that must be updated by the instrument
-// defined by key. If boundaries were provided on instrument instantiation, those take precedence
-// over boundaries provided by the reader.
-func (r resolver[N]) HistogramAggregators(id Instrument, boundaries []float64) ([]aggregate.Measure[N], error) {
- var measures []aggregate.Measure[N]
-
- var err error
- for _, i := range r.inserters {
- agg := i.readerDefaultAggregation(id.Kind)
- if histAgg, ok := agg.(AggregationExplicitBucketHistogram); ok && len(boundaries) > 0 {
- histAgg.Boundaries = boundaries
- agg = histAgg
- }
- in, e := i.Instrument(id, agg)
- if e != nil {
- err = errors.Join(err, e)
- }
- measures = append(measures, in...)
- }
- return measures, err
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go b/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go
deleted file mode 100644
index 2fca89e5a..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go
+++ /dev/null
@@ -1,145 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package metric // import "go.opentelemetry.io/otel/sdk/metric"
-
-import (
- "context"
- "sync/atomic"
-
- "go.opentelemetry.io/otel/internal/global"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/metric/embedded"
- "go.opentelemetry.io/otel/metric/noop"
- "go.opentelemetry.io/otel/sdk/instrumentation"
-)
-
-// MeterProvider handles the creation and coordination of Meters. All Meters
-// created by a MeterProvider will be associated with the same Resource, have
-// the same Views applied to them, and have their produced metric telemetry
-// passed to the configured Readers.
-type MeterProvider struct {
- embedded.MeterProvider
-
- pipes pipelines
- meters cache[instrumentation.Scope, *meter]
-
- forceFlush, shutdown func(context.Context) error
- stopped atomic.Bool
-}
-
-// Compile-time check MeterProvider implements metric.MeterProvider.
-var _ metric.MeterProvider = (*MeterProvider)(nil)
-
-// NewMeterProvider returns a new and configured MeterProvider.
-//
-// By default, the returned MeterProvider is configured with the default
-// Resource and no Readers. Readers cannot be added after a MeterProvider is
-// created. This means the returned MeterProvider, one created with no
-// Readers, will perform no operations.
-func NewMeterProvider(options ...Option) *MeterProvider {
- conf := newConfig(options)
- flush, sdown := conf.readerSignals()
-
- mp := &MeterProvider{
- pipes: newPipelines(conf.res, conf.readers, conf.views, conf.exemplarFilter),
- forceFlush: flush,
- shutdown: sdown,
- }
- // Log after creation so all readers show correctly they are registered.
- global.Info("MeterProvider created",
- "Resource", conf.res,
- "Readers", conf.readers,
- "Views", len(conf.views),
- )
- return mp
-}
-
-// Meter returns a Meter with the given name and configured with options.
-//
-// The name should be the name of the instrumentation scope creating
-// telemetry. This name may be the same as the instrumented code only if that
-// code provides built-in instrumentation.
-//
-// Calls to the Meter method after Shutdown has been called will return Meters
-// that perform no operations.
-//
-// This method is safe to call concurrently.
-func (mp *MeterProvider) Meter(name string, options ...metric.MeterOption) metric.Meter {
- if name == "" {
- global.Warn("Invalid Meter name.", "name", name)
- }
-
- if mp.stopped.Load() {
- return noop.Meter{}
- }
-
- c := metric.NewMeterConfig(options...)
- s := instrumentation.Scope{
- Name: name,
- Version: c.InstrumentationVersion(),
- SchemaURL: c.SchemaURL(),
- Attributes: c.InstrumentationAttributes(),
- }
-
- global.Info("Meter created",
- "Name", s.Name,
- "Version", s.Version,
- "SchemaURL", s.SchemaURL,
- "Attributes", s.Attributes,
- )
-
- return mp.meters.Lookup(s, func() *meter {
- return newMeter(s, mp.pipes)
- })
-}
-
-// ForceFlush flushes all pending telemetry.
-//
-// This method honors the deadline or cancellation of ctx. An appropriate
-// error will be returned in these situations. There is no guaranteed that all
-// telemetry be flushed or all resources have been released in these
-// situations.
-//
-// ForceFlush calls ForceFlush(context.Context) error
-// on all Readers that implements this method.
-//
-// This method is safe to call concurrently.
-func (mp *MeterProvider) ForceFlush(ctx context.Context) error {
- if mp.forceFlush != nil {
- return mp.forceFlush(ctx)
- }
- return nil
-}
-
-// Shutdown shuts down the MeterProvider flushing all pending telemetry and
-// releasing any held computational resources.
-//
-// This call is idempotent. The first call will perform all flush and
-// releasing operations. Subsequent calls will perform no action and will
-// return an error stating this.
-//
-// Measurements made by instruments from meters this MeterProvider created
-// will not be exported after Shutdown is called.
-//
-// This method honors the deadline or cancellation of ctx. An appropriate
-// error will be returned in these situations. There is no guaranteed that all
-// telemetry be flushed or all resources have been released in these
-// situations.
-//
-// This method is safe to call concurrently.
-func (mp *MeterProvider) Shutdown(ctx context.Context) error {
- // Even though it may seem like there is a synchronization issue between the
- // call to `Store` and checking `shutdown`, the Go concurrency model ensures
- // that is not the case, as all the atomic operations executed in a program
- // behave as though executed in some sequentially consistent order. This
- // definition provides the same semantics as C++'s sequentially consistent
- // atomics and Java's volatile variables.
- // See https://go.dev/ref/mem#atomic and https://pkg.go.dev/sync/atomic.
-
- mp.stopped.Store(true)
- if mp.shutdown != nil {
- return mp.shutdown(ctx)
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go
deleted file mode 100644
index d13a70697..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go
+++ /dev/null
@@ -1,189 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package metric // import "go.opentelemetry.io/otel/sdk/metric"
-
-import (
- "context"
- "errors"
-
- "go.opentelemetry.io/otel/sdk/metric/metricdata"
-)
-
-// errDuplicateRegister is logged by a Reader when an attempt to registered it
-// more than once occurs.
-var errDuplicateRegister = errors.New("duplicate reader registration")
-
-// ErrReaderNotRegistered is returned if Collect or Shutdown are called before
-// the reader is registered with a MeterProvider.
-var ErrReaderNotRegistered = errors.New("reader is not registered")
-
-// ErrReaderShutdown is returned if Collect or Shutdown are called after a
-// reader has been Shutdown once.
-var ErrReaderShutdown = errors.New("reader is shutdown")
-
-// errNonPositiveDuration is logged when an environmental variable
-// has non-positive value.
-var errNonPositiveDuration = errors.New("non-positive duration")
-
-// Reader is the interface used between the SDK and an
-// exporter. Control flow is bi-directional through the
-// Reader, since the SDK initiates ForceFlush and Shutdown
-// while the exporter initiates collection. The Register() method here
-// informs the Reader that it can begin reading, signaling the
-// start of bi-directional control flow.
-//
-// Typically, push-based exporters that are periodic will
-// implement PeriodicExporter themselves and construct a
-// PeriodicReader to satisfy this interface.
-//
-// Pull-based exporters will typically implement Register
-// themselves, since they read on demand.
-//
-// Warning: methods may be added to this interface in minor releases.
-type Reader interface {
- // register registers a Reader with a MeterProvider.
- // The producer argument allows the Reader to signal the sdk to collect
- // and send aggregated metric measurements.
- register(sdkProducer)
-
- // temporality reports the Temporality for the instrument kind provided.
- //
- // This method needs to be concurrent safe with itself and all the other
- // Reader methods.
- temporality(InstrumentKind) metricdata.Temporality
-
- // aggregation returns what Aggregation to use for an instrument kind.
- //
- // This method needs to be concurrent safe with itself and all the other
- // Reader methods.
- aggregation(InstrumentKind) Aggregation // nolint:revive // import-shadow for method scoped by type.
-
- // Collect gathers and returns all metric data related to the Reader from
- // the SDK and stores it in rm. An error is returned if this is called
- // after Shutdown or if rm is nil.
- //
- // This method needs to be concurrent safe, and the cancellation of the
- // passed context is expected to be honored.
- Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-
- // Shutdown flushes all metric measurements held in an export pipeline and releases any
- // held computational resources.
- //
- // This deadline or cancellation of the passed context are honored. An appropriate
- // error will be returned in these situations. There is no guaranteed that all
- // telemetry be flushed or all resources have been released in these
- // situations.
- //
- // After Shutdown is called, calls to Collect will perform no operation and instead will return
- // an error indicating the shutdown state.
- //
- // This method needs to be concurrent safe.
- Shutdown(context.Context) error
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-}
-
-// sdkProducer produces metrics for a Reader.
-type sdkProducer interface {
- // produce returns aggregated metrics from a single collection.
- //
- // This method is safe to call concurrently.
- produce(context.Context, *metricdata.ResourceMetrics) error
-}
-
-// Producer produces metrics for a Reader from an external source.
-type Producer interface {
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-
- // Produce returns aggregated metrics from an external source.
- //
- // This method should be safe to call concurrently.
- Produce(context.Context) ([]metricdata.ScopeMetrics, error)
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-}
-
-// produceHolder is used as an atomic.Value to wrap the non-concrete producer
-// type.
-type produceHolder struct {
- produce func(context.Context, *metricdata.ResourceMetrics) error
-}
-
-// shutdownProducer produces an ErrReaderShutdown error always.
-type shutdownProducer struct{}
-
-// produce returns an ErrReaderShutdown error.
-func (p shutdownProducer) produce(context.Context, *metricdata.ResourceMetrics) error {
- return ErrReaderShutdown
-}
-
-// TemporalitySelector selects the temporality to use based on the InstrumentKind.
-type TemporalitySelector func(InstrumentKind) metricdata.Temporality
-
-// DefaultTemporalitySelector is the default TemporalitySelector used if
-// WithTemporalitySelector is not provided. CumulativeTemporality will be used
-// for all instrument kinds if this TemporalitySelector is used.
-func DefaultTemporalitySelector(InstrumentKind) metricdata.Temporality {
- return metricdata.CumulativeTemporality
-}
-
-// AggregationSelector selects the aggregation and the parameters to use for
-// that aggregation based on the InstrumentKind.
-//
-// If the Aggregation returned is nil or DefaultAggregation, the selection from
-// DefaultAggregationSelector will be used.
-type AggregationSelector func(InstrumentKind) Aggregation
-
-// DefaultAggregationSelector returns the default aggregation and parameters
-// that will be used to summarize measurement made from an instrument of
-// InstrumentKind. This AggregationSelector using the following selection
-// mapping: Counter ⇨ Sum, Observable Counter ⇨ Sum, UpDownCounter ⇨ Sum,
-// Observable UpDownCounter ⇨ Sum, Observable Gauge ⇨ LastValue,
-// Histogram ⇨ ExplicitBucketHistogram.
-func DefaultAggregationSelector(ik InstrumentKind) Aggregation {
- switch ik {
- case InstrumentKindCounter, InstrumentKindUpDownCounter, InstrumentKindObservableCounter, InstrumentKindObservableUpDownCounter:
- return AggregationSum{}
- case InstrumentKindObservableGauge, InstrumentKindGauge:
- return AggregationLastValue{}
- case InstrumentKindHistogram:
- return AggregationExplicitBucketHistogram{
- Boundaries: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000},
- NoMinMax: false,
- }
- }
- panic("unknown instrument kind")
-}
-
-// ReaderOption is an option which can be applied to manual or Periodic
-// readers.
-type ReaderOption interface {
- PeriodicReaderOption
- ManualReaderOption
-}
-
-// WithProducer registers producers as an external Producer of metric data
-// for this Reader.
-func WithProducer(p Producer) ReaderOption {
- return producerOption{p: p}
-}
-
-type producerOption struct {
- p Producer
-}
-
-// applyManual returns a manualReaderConfig with option applied.
-func (o producerOption) applyManual(c manualReaderConfig) manualReaderConfig {
- c.producers = append(c.producers, o.p)
- return c
-}
-
-// applyPeriodic returns a periodicReaderConfig with option applied.
-func (o producerOption) applyPeriodic(c periodicReaderConfig) periodicReaderConfig {
- c.producers = append(c.producers, o.p)
- return c
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go
deleted file mode 100644
index 7c4b8530d..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package metric // import "go.opentelemetry.io/otel/sdk/metric"
-
-// version is the current release version of the metric SDK in use.
-func version() string {
- return "1.34.0"
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/view.go b/vendor/go.opentelemetry.io/otel/sdk/metric/view.go
deleted file mode 100644
index 630890f42..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/view.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package metric // import "go.opentelemetry.io/otel/sdk/metric"
-
-import (
- "errors"
- "regexp"
- "strings"
-
- "go.opentelemetry.io/otel/internal/global"
-)
-
-var (
- errMultiInst = errors.New("name replacement for multiple instruments")
- errEmptyView = errors.New("no criteria provided for view")
-
- emptyView = func(Instrument) (Stream, bool) { return Stream{}, false }
-)
-
-// View is an override to the default behavior of the SDK. It defines how data
-// should be collected for certain instruments. It returns true and the exact
-// Stream to use for matching Instruments. Otherwise, if the view does not
-// match, false is returned.
-type View func(Instrument) (Stream, bool)
-
-// NewView returns a View that applies the Stream mask for all instruments that
-// match criteria. The returned View will only apply mask if all non-zero-value
-// fields of criteria match the corresponding Instrument passed to the view. If
-// no criteria are provided, all field of criteria are their zero-values, a
-// view that matches no instruments is returned. If you need to match a
-// zero-value field, create a View directly.
-//
-// The Name field of criteria supports wildcard pattern matching. The "*"
-// wildcard is recognized as matching zero or more characters, and "?" is
-// recognized as matching exactly one character. For example, a pattern of "*"
-// matches all instrument names.
-//
-// The Stream mask only applies updates for non-zero-value fields. By default,
-// the Instrument the View matches against will be use for the Name,
-// Description, and Unit of the returned Stream and no Aggregation or
-// AttributeFilter are set. All non-zero-value fields of mask are used instead
-// of the default. If you need to zero out an Stream field returned from a
-// View, create a View directly.
-func NewView(criteria Instrument, mask Stream) View {
- if criteria.IsEmpty() {
- global.Error(
- errEmptyView, "dropping view",
- "mask", mask,
- )
- return emptyView
- }
-
- var matchFunc func(Instrument) bool
- if strings.ContainsAny(criteria.Name, "*?") {
- if mask.Name != "" {
- global.Error(
- errMultiInst, "dropping view",
- "criteria", criteria,
- "mask", mask,
- )
- return emptyView
- }
-
- // Handle branching here in NewView instead of criteria.matches so
- // criteria.matches remains inlinable for the simple case.
- pattern := regexp.QuoteMeta(criteria.Name)
- pattern = "^" + pattern + "$"
- pattern = strings.ReplaceAll(pattern, `\?`, ".")
- pattern = strings.ReplaceAll(pattern, `\*`, ".*")
- re := regexp.MustCompile(pattern)
- matchFunc = func(i Instrument) bool {
- return re.MatchString(i.Name) &&
- criteria.matchesDescription(i) &&
- criteria.matchesKind(i) &&
- criteria.matchesUnit(i) &&
- criteria.matchesScope(i)
- }
- } else {
- matchFunc = criteria.matches
- }
-
- var agg Aggregation
- if mask.Aggregation != nil {
- agg = mask.Aggregation.copy()
- if err := agg.err(); err != nil {
- global.Error(
- err, "not using aggregation with view",
- "criteria", criteria,
- "mask", mask,
- )
- agg = nil
- }
- }
-
- return func(i Instrument) (Stream, bool) {
- if matchFunc(i) {
- return Stream{
- Name: nonZero(mask.Name, i.Name),
- Description: nonZero(mask.Description, i.Description),
- Unit: nonZero(mask.Unit, i.Unit),
- Aggregation: agg,
- AttributeFilter: mask.AttributeFilter,
- ExemplarReservoirProviderSelector: mask.ExemplarReservoirProviderSelector,
- }, true
- }
- return Stream{}, false
- }
-}
-
-// nonZero returns v if it is non-zero-valued, otherwise alt.
-func nonZero[T comparable](v, alt T) T {
- var zero T
- if v != zero {
- return v
- }
- return alt
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/README.md b/vendor/go.opentelemetry.io/otel/sdk/resource/README.md
deleted file mode 100644
index 4ad864d71..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# SDK Resource
-
-[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/resource)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/resource)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go
deleted file mode 100644
index c02aeefdd..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package resource // import "go.opentelemetry.io/otel/sdk/resource"
-
-import (
- "context"
- "errors"
- "fmt"
-)
-
-// ErrPartialResource is returned by a detector when complete source
-// information for a Resource is unavailable or the source information
-// contains invalid values that are omitted from the returned Resource.
-var ErrPartialResource = errors.New("partial resource")
-
-// Detector detects OpenTelemetry resource information.
-type Detector interface {
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-
- // Detect returns an initialized Resource based on gathered information.
- // If the source information to construct a Resource contains invalid
- // values, a Resource is returned with the valid parts of the source
- // information used for initialization along with an appropriately
- // wrapped ErrPartialResource error.
- Detect(ctx context.Context) (*Resource, error)
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-}
-
-// Detect returns a new [Resource] merged from all the Resources each of the
-// detectors produces. Each of the detectors are called sequentially, in the
-// order they are passed, merging the produced resource into the previous.
-//
-// This may return a partial Resource along with an error containing
-// [ErrPartialResource] if that error is returned from a detector. It may also
-// return a merge-conflicting Resource along with an error containing
-// [ErrSchemaURLConflict] if merging Resources from different detectors results
-// in a schema URL conflict. It is up to the caller to determine if this
-// returned Resource should be used or not.
-//
-// If one of the detectors returns an error that is not [ErrPartialResource],
-// the resource produced by the detector will not be merged and the returned
-// error will wrap that detector's error.
-func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) {
- r := new(Resource)
- return r, detect(ctx, r, detectors)
-}
-
-// detect runs all detectors using ctx and merges the result into res. This
-// assumes res is allocated and not nil, it will panic otherwise.
-//
-// If the detectors or merging resources produces any errors (i.e.
-// [ErrPartialResource] [ErrSchemaURLConflict]), a single error wrapping all of
-// these errors will be returned. Otherwise, nil is returned.
-func detect(ctx context.Context, res *Resource, detectors []Detector) error {
- var (
- r *Resource
- err error
- e error
- )
-
- for _, detector := range detectors {
- if detector == nil {
- continue
- }
- r, e = detector.Detect(ctx)
- if e != nil {
- err = errors.Join(err, e)
- if !errors.Is(e, ErrPartialResource) {
- continue
- }
- }
- r, e = Merge(res, r)
- if e != nil {
- err = errors.Join(err, e)
- }
- *res = *r
- }
-
- if err != nil {
- if errors.Is(err, ErrSchemaURLConflict) {
- // If there has been a merge conflict, ensure the resource has no
- // schema URL.
- res.schemaURL = ""
- }
-
- err = fmt.Errorf("error detecting resource: %w", err)
- }
- return err
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
deleted file mode 100644
index cf3c88e15..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package resource // import "go.opentelemetry.io/otel/sdk/resource"
-
-import (
- "context"
- "fmt"
- "os"
- "path/filepath"
-
- "github.com/google/uuid"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/sdk"
- semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
-)
-
-type (
- // telemetrySDK is a Detector that provides information about
- // the OpenTelemetry SDK used. This Detector is included as a
- // builtin. If these resource attributes are not wanted, use
- // resource.New() to explicitly disable them.
- telemetrySDK struct{}
-
- // host is a Detector that provides information about the host
- // being run on. This Detector is included as a builtin. If
- // these resource attributes are not wanted, use the
- // resource.New() to explicitly disable them.
- host struct{}
-
- stringDetector struct {
- schemaURL string
- K attribute.Key
- F func() (string, error)
- }
-
- defaultServiceNameDetector struct{}
-
- defaultServiceInstanceIDDetector struct{}
-)
-
-var (
- _ Detector = telemetrySDK{}
- _ Detector = host{}
- _ Detector = stringDetector{}
- _ Detector = defaultServiceNameDetector{}
- _ Detector = defaultServiceInstanceIDDetector{}
-)
-
-// Detect returns a *Resource that describes the OpenTelemetry SDK used.
-func (telemetrySDK) Detect(context.Context) (*Resource, error) {
- return NewWithAttributes(
- semconv.SchemaURL,
- semconv.TelemetrySDKName("opentelemetry"),
- semconv.TelemetrySDKLanguageGo,
- semconv.TelemetrySDKVersion(sdk.Version()),
- ), nil
-}
-
-// Detect returns a *Resource that describes the host being run on.
-func (host) Detect(ctx context.Context) (*Resource, error) {
- return StringDetector(semconv.SchemaURL, semconv.HostNameKey, os.Hostname).Detect(ctx)
-}
-
-// StringDetector returns a Detector that will produce a *Resource
-// containing the string as a value corresponding to k. The resulting Resource
-// will have the specified schemaURL.
-func StringDetector(schemaURL string, k attribute.Key, f func() (string, error)) Detector {
- return stringDetector{schemaURL: schemaURL, K: k, F: f}
-}
-
-// Detect returns a *Resource that describes the string as a value
-// corresponding to attribute.Key as well as the specific schemaURL.
-func (sd stringDetector) Detect(ctx context.Context) (*Resource, error) {
- value, err := sd.F()
- if err != nil {
- return nil, fmt.Errorf("%s: %w", string(sd.K), err)
- }
- a := sd.K.String(value)
- if !a.Valid() {
- return nil, fmt.Errorf("invalid attribute: %q -> %q", a.Key, a.Value.Emit())
- }
- return NewWithAttributes(sd.schemaURL, sd.K.String(value)), nil
-}
-
-// Detect implements Detector.
-func (defaultServiceNameDetector) Detect(ctx context.Context) (*Resource, error) {
- return StringDetector(
- semconv.SchemaURL,
- semconv.ServiceNameKey,
- func() (string, error) {
- executable, err := os.Executable()
- if err != nil {
- return "unknown_service:go", nil
- }
- return "unknown_service:" + filepath.Base(executable), nil
- },
- ).Detect(ctx)
-}
-
-// Detect implements Detector.
-func (defaultServiceInstanceIDDetector) Detect(ctx context.Context) (*Resource, error) {
- return StringDetector(
- semconv.SchemaURL,
- semconv.ServiceInstanceIDKey,
- func() (string, error) {
- version4Uuid, err := uuid.NewRandom()
- if err != nil {
- return "", err
- }
-
- return version4Uuid.String(), nil
- },
- ).Detect(ctx)
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/config.go b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go
deleted file mode 100644
index 0d6e213d9..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/config.go
+++ /dev/null
@@ -1,195 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package resource // import "go.opentelemetry.io/otel/sdk/resource"
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/attribute"
-)
-
-// config contains configuration for Resource creation.
-type config struct {
- // detectors that will be evaluated.
- detectors []Detector
- // SchemaURL to associate with the Resource.
- schemaURL string
-}
-
-// Option is the interface that applies a configuration option.
-type Option interface {
- // apply sets the Option value of a config.
- apply(config) config
-}
-
-// WithAttributes adds attributes to the configured Resource.
-func WithAttributes(attributes ...attribute.KeyValue) Option {
- return WithDetectors(detectAttributes{attributes})
-}
-
-type detectAttributes struct {
- attributes []attribute.KeyValue
-}
-
-func (d detectAttributes) Detect(context.Context) (*Resource, error) {
- return NewSchemaless(d.attributes...), nil
-}
-
-// WithDetectors adds detectors to be evaluated for the configured resource.
-func WithDetectors(detectors ...Detector) Option {
- return detectorsOption{detectors: detectors}
-}
-
-type detectorsOption struct {
- detectors []Detector
-}
-
-func (o detectorsOption) apply(cfg config) config {
- cfg.detectors = append(cfg.detectors, o.detectors...)
- return cfg
-}
-
-// WithFromEnv adds attributes from environment variables to the configured resource.
-func WithFromEnv() Option {
- return WithDetectors(fromEnv{})
-}
-
-// WithHost adds attributes from the host to the configured resource.
-func WithHost() Option {
- return WithDetectors(host{})
-}
-
-// WithHostID adds host ID information to the configured resource.
-func WithHostID() Option {
- return WithDetectors(hostIDDetector{})
-}
-
-// WithTelemetrySDK adds TelemetrySDK version info to the configured resource.
-func WithTelemetrySDK() Option {
- return WithDetectors(telemetrySDK{})
-}
-
-// WithSchemaURL sets the schema URL for the configured resource.
-func WithSchemaURL(schemaURL string) Option {
- return schemaURLOption(schemaURL)
-}
-
-type schemaURLOption string
-
-func (o schemaURLOption) apply(cfg config) config {
- cfg.schemaURL = string(o)
- return cfg
-}
-
-// WithOS adds all the OS attributes to the configured Resource.
-// See individual WithOS* functions to configure specific attributes.
-func WithOS() Option {
- return WithDetectors(
- osTypeDetector{},
- osDescriptionDetector{},
- )
-}
-
-// WithOSType adds an attribute with the operating system type to the configured Resource.
-func WithOSType() Option {
- return WithDetectors(osTypeDetector{})
-}
-
-// WithOSDescription adds an attribute with the operating system description to the
-// configured Resource. The formatted string is equivalent to the output of the
-// `uname -snrvm` command.
-func WithOSDescription() Option {
- return WithDetectors(osDescriptionDetector{})
-}
-
-// WithProcess adds all the Process attributes to the configured Resource.
-//
-// Warning! This option will include process command line arguments. If these
-// contain sensitive information it will be included in the exported resource.
-//
-// This option is equivalent to calling WithProcessPID,
-// WithProcessExecutableName, WithProcessExecutablePath,
-// WithProcessCommandArgs, WithProcessOwner, WithProcessRuntimeName,
-// WithProcessRuntimeVersion, and WithProcessRuntimeDescription. See each
-// option function for information about what resource attributes each
-// includes.
-func WithProcess() Option {
- return WithDetectors(
- processPIDDetector{},
- processExecutableNameDetector{},
- processExecutablePathDetector{},
- processCommandArgsDetector{},
- processOwnerDetector{},
- processRuntimeNameDetector{},
- processRuntimeVersionDetector{},
- processRuntimeDescriptionDetector{},
- )
-}
-
-// WithProcessPID adds an attribute with the process identifier (PID) to the
-// configured Resource.
-func WithProcessPID() Option {
- return WithDetectors(processPIDDetector{})
-}
-
-// WithProcessExecutableName adds an attribute with the name of the process
-// executable to the configured Resource.
-func WithProcessExecutableName() Option {
- return WithDetectors(processExecutableNameDetector{})
-}
-
-// WithProcessExecutablePath adds an attribute with the full path to the process
-// executable to the configured Resource.
-func WithProcessExecutablePath() Option {
- return WithDetectors(processExecutablePathDetector{})
-}
-
-// WithProcessCommandArgs adds an attribute with all the command arguments (including
-// the command/executable itself) as received by the process to the configured
-// Resource.
-//
-// Warning! This option will include process command line arguments. If these
-// contain sensitive information it will be included in the exported resource.
-func WithProcessCommandArgs() Option {
- return WithDetectors(processCommandArgsDetector{})
-}
-
-// WithProcessOwner adds an attribute with the username of the user that owns the process
-// to the configured Resource.
-func WithProcessOwner() Option {
- return WithDetectors(processOwnerDetector{})
-}
-
-// WithProcessRuntimeName adds an attribute with the name of the runtime of this
-// process to the configured Resource.
-func WithProcessRuntimeName() Option {
- return WithDetectors(processRuntimeNameDetector{})
-}
-
-// WithProcessRuntimeVersion adds an attribute with the version of the runtime of
-// this process to the configured Resource.
-func WithProcessRuntimeVersion() Option {
- return WithDetectors(processRuntimeVersionDetector{})
-}
-
-// WithProcessRuntimeDescription adds an attribute with an additional description
-// about the runtime of the process to the configured Resource.
-func WithProcessRuntimeDescription() Option {
- return WithDetectors(processRuntimeDescriptionDetector{})
-}
-
-// WithContainer adds all the Container attributes to the configured Resource.
-// See individual WithContainer* functions to configure specific attributes.
-func WithContainer() Option {
- return WithDetectors(
- cgroupContainerIDDetector{},
- )
-}
-
-// WithContainerID adds an attribute with the id of the container to the configured Resource.
-// Note: WithContainerID will not extract the correct container ID in an ECS environment.
-// Please use the ECS resource detector instead (https://pkg.go.dev/go.opentelemetry.io/contrib/detectors/aws/ecs).
-func WithContainerID() Option {
- return WithDetectors(cgroupContainerIDDetector{})
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go
deleted file mode 100644
index 5ecd859a5..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package resource // import "go.opentelemetry.io/otel/sdk/resource"
-
-import (
- "bufio"
- "context"
- "errors"
- "io"
- "os"
- "regexp"
-
- semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
-)
-
-type containerIDProvider func() (string, error)
-
-var (
- containerID containerIDProvider = getContainerIDFromCGroup
- cgroupContainerIDRe = regexp.MustCompile(`^.*/(?:.*[-:])?([0-9a-f]+)(?:\.|\s*$)`)
-)
-
-type cgroupContainerIDDetector struct{}
-
-const cgroupPath = "/proc/self/cgroup"
-
-// Detect returns a *Resource that describes the id of the container.
-// If no container id found, an empty resource will be returned.
-func (cgroupContainerIDDetector) Detect(ctx context.Context) (*Resource, error) {
- containerID, err := containerID()
- if err != nil {
- return nil, err
- }
-
- if containerID == "" {
- return Empty(), nil
- }
- return NewWithAttributes(semconv.SchemaURL, semconv.ContainerID(containerID)), nil
-}
-
-var (
- defaultOSStat = os.Stat
- osStat = defaultOSStat
-
- defaultOSOpen = func(name string) (io.ReadCloser, error) {
- return os.Open(name)
- }
- osOpen = defaultOSOpen
-)
-
-// getContainerIDFromCGroup returns the id of the container from the cgroup file.
-// If no container id found, an empty string will be returned.
-func getContainerIDFromCGroup() (string, error) {
- if _, err := osStat(cgroupPath); errors.Is(err, os.ErrNotExist) {
- // File does not exist, skip
- return "", nil
- }
-
- file, err := osOpen(cgroupPath)
- if err != nil {
- return "", err
- }
- defer file.Close()
-
- return getContainerIDFromReader(file), nil
-}
-
-// getContainerIDFromReader returns the id of the container from reader.
-func getContainerIDFromReader(reader io.Reader) string {
- scanner := bufio.NewScanner(reader)
- for scanner.Scan() {
- line := scanner.Text()
-
- if id := getContainerIDFromLine(line); id != "" {
- return id
- }
- }
- return ""
-}
-
-// getContainerIDFromLine returns the id of the container from one string line.
-func getContainerIDFromLine(line string) string {
- matches := cgroupContainerIDRe.FindStringSubmatch(line)
- if len(matches) <= 1 {
- return ""
- }
- return matches[1]
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go b/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go
deleted file mode 100644
index 64939a271..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Package resource provides detecting and representing resources.
-//
-// The fundamental struct is a Resource which holds identifying information
-// about the entities for which telemetry is exported.
-//
-// To automatically construct Resources from an environment a Detector
-// interface is defined. Implementations of this interface can be passed to
-// the Detect function to generate a Resource from the merged information.
-//
-// To load a user defined Resource from the environment variable
-// OTEL_RESOURCE_ATTRIBUTES the FromEnv Detector can be used. It will interpret
-// the value as a list of comma delimited key/value pairs
-// (e.g. `<key1>=<value1>,<key2>=<value2>,...`).
-//
-// While this package provides a stable API,
-// the attributes added by resource detectors may change.
-package resource // import "go.opentelemetry.io/otel/sdk/resource"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
deleted file mode 100644
index 813f05624..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package resource // import "go.opentelemetry.io/otel/sdk/resource"
-
-import (
- "context"
- "fmt"
- "net/url"
- "os"
- "strings"
-
- "go.opentelemetry.io/otel"
- "go.opentelemetry.io/otel/attribute"
- semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
-)
-
-const (
- // resourceAttrKey is the environment variable name OpenTelemetry Resource information will be read from.
- resourceAttrKey = "OTEL_RESOURCE_ATTRIBUTES" //nolint:gosec // False positive G101: Potential hardcoded credentials
-
- // svcNameKey is the environment variable name that Service Name information will be read from.
- svcNameKey = "OTEL_SERVICE_NAME"
-)
-
-// errMissingValue is returned when a resource value is missing.
-var errMissingValue = fmt.Errorf("%w: missing value", ErrPartialResource)
-
-// fromEnv is a Detector that implements the Detector and collects
-// resources from environment. This Detector is included as a
-// builtin.
-type fromEnv struct{}
-
-// compile time assertion that FromEnv implements Detector interface.
-var _ Detector = fromEnv{}
-
-// Detect collects resources from environment.
-func (fromEnv) Detect(context.Context) (*Resource, error) {
- attrs := strings.TrimSpace(os.Getenv(resourceAttrKey))
- svcName := strings.TrimSpace(os.Getenv(svcNameKey))
-
- if attrs == "" && svcName == "" {
- return Empty(), nil
- }
-
- var res *Resource
-
- if svcName != "" {
- res = NewSchemaless(semconv.ServiceName(svcName))
- }
-
- r2, err := constructOTResources(attrs)
-
- // Ensure that the resource with the service name from OTEL_SERVICE_NAME
- // takes precedence, if it was defined.
- res, err2 := Merge(r2, res)
-
- if err == nil {
- err = err2
- } else if err2 != nil {
- err = fmt.Errorf("detecting resources: %s", []string{err.Error(), err2.Error()})
- }
-
- return res, err
-}
-
-func constructOTResources(s string) (*Resource, error) {
- if s == "" {
- return Empty(), nil
- }
- pairs := strings.Split(s, ",")
- var attrs []attribute.KeyValue
- var invalid []string
- for _, p := range pairs {
- k, v, found := strings.Cut(p, "=")
- if !found {
- invalid = append(invalid, p)
- continue
- }
- key := strings.TrimSpace(k)
- val, err := url.PathUnescape(strings.TrimSpace(v))
- if err != nil {
- // Retain original value if decoding fails, otherwise it will be
- // an empty string.
- val = v
- otel.Handle(err)
- }
- attrs = append(attrs, attribute.String(key, val))
- }
- var err error
- if len(invalid) > 0 {
- err = fmt.Errorf("%w: %v", errMissingValue, invalid)
- }
- return NewSchemaless(attrs...), err
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go
deleted file mode 100644
index 2d0f65498..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package resource // import "go.opentelemetry.io/otel/sdk/resource"
-
-import (
- "context"
- "errors"
- "strings"
-
- semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
-)
-
-type hostIDProvider func() (string, error)
-
-var defaultHostIDProvider hostIDProvider = platformHostIDReader.read
-
-var hostID = defaultHostIDProvider
-
-type hostIDReader interface {
- read() (string, error)
-}
-
-type fileReader func(string) (string, error)
-
-type commandExecutor func(string, ...string) (string, error)
-
-// hostIDReaderBSD implements hostIDReader.
-type hostIDReaderBSD struct {
- execCommand commandExecutor
- readFile fileReader
-}
-
-// read attempts to read the machine-id from /etc/hostid. If not found it will
-// execute `kenv -q smbios.system.uuid`. If neither location yields an id an
-// error will be returned.
-func (r *hostIDReaderBSD) read() (string, error) {
- if result, err := r.readFile("/etc/hostid"); err == nil {
- return strings.TrimSpace(result), nil
- }
-
- if result, err := r.execCommand("kenv", "-q", "smbios.system.uuid"); err == nil {
- return strings.TrimSpace(result), nil
- }
-
- return "", errors.New("host id not found in: /etc/hostid or kenv")
-}
-
-// hostIDReaderDarwin implements hostIDReader.
-type hostIDReaderDarwin struct {
- execCommand commandExecutor
-}
-
-// read executes `ioreg -rd1 -c "IOPlatformExpertDevice"` and parses host id
-// from the IOPlatformUUID line. If the command fails or the uuid cannot be
-// parsed an error will be returned.
-func (r *hostIDReaderDarwin) read() (string, error) {
- result, err := r.execCommand("ioreg", "-rd1", "-c", "IOPlatformExpertDevice")
- if err != nil {
- return "", err
- }
-
- lines := strings.Split(result, "\n")
- for _, line := range lines {
- if strings.Contains(line, "IOPlatformUUID") {
- parts := strings.Split(line, " = ")
- if len(parts) == 2 {
- return strings.Trim(parts[1], "\""), nil
- }
- break
- }
- }
-
- return "", errors.New("could not parse IOPlatformUUID")
-}
-
-type hostIDReaderLinux struct {
- readFile fileReader
-}
-
-// read attempts to read the machine-id from /etc/machine-id followed by
-// /var/lib/dbus/machine-id. If neither location yields an ID an error will
-// be returned.
-func (r *hostIDReaderLinux) read() (string, error) {
- if result, err := r.readFile("/etc/machine-id"); err == nil {
- return strings.TrimSpace(result), nil
- }
-
- if result, err := r.readFile("/var/lib/dbus/machine-id"); err == nil {
- return strings.TrimSpace(result), nil
- }
-
- return "", errors.New("host id not found in: /etc/machine-id or /var/lib/dbus/machine-id")
-}
-
-type hostIDDetector struct{}
-
-// Detect returns a *Resource containing the platform specific host id.
-func (hostIDDetector) Detect(ctx context.Context) (*Resource, error) {
- hostID, err := hostID()
- if err != nil {
- return nil, err
- }
-
- return NewWithAttributes(
- semconv.SchemaURL,
- semconv.HostID(hostID),
- ), nil
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go
deleted file mode 100644
index cc8b8938e..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-//go:build dragonfly || freebsd || netbsd || openbsd || solaris
-// +build dragonfly freebsd netbsd openbsd solaris
-
-package resource // import "go.opentelemetry.io/otel/sdk/resource"
-
-var platformHostIDReader hostIDReader = &hostIDReaderBSD{
- execCommand: execCommand,
- readFile: readFile,
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go
deleted file mode 100644
index b09fde3b7..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package resource // import "go.opentelemetry.io/otel/sdk/resource"
-
-var platformHostIDReader hostIDReader = &hostIDReaderDarwin{
- execCommand: execCommand,
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go
deleted file mode 100644
index d9e5d1a8f..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-//go:build darwin || dragonfly || freebsd || netbsd || openbsd || solaris
-
-package resource // import "go.opentelemetry.io/otel/sdk/resource"
-
-import "os/exec"
-
-func execCommand(name string, arg ...string) (string, error) {
- cmd := exec.Command(name, arg...)
- b, err := cmd.Output()
- if err != nil {
- return "", err
- }
-
- return string(b), nil
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go
deleted file mode 100644
index f84f17324..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-//go:build linux
-// +build linux
-
-package resource // import "go.opentelemetry.io/otel/sdk/resource"
-
-var platformHostIDReader hostIDReader = &hostIDReaderLinux{
- readFile: readFile,
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go
deleted file mode 100644
index 6354b3560..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-//go:build linux || dragonfly || freebsd || netbsd || openbsd || solaris
-
-package resource // import "go.opentelemetry.io/otel/sdk/resource"
-
-import "os"
-
-func readFile(filename string) (string, error) {
- b, err := os.ReadFile(filename)
- if err != nil {
- return "", err
- }
-
- return string(b), nil
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go
deleted file mode 100644
index df12c44c5..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-//go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows
-// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows
-
-package resource // import "go.opentelemetry.io/otel/sdk/resource"
-
-// hostIDReaderUnsupported is a placeholder implementation for operating systems
-// for which this project currently doesn't support host.id
-// attribute detection. See build tags declaration early on this file
-// for a list of unsupported OSes.
-type hostIDReaderUnsupported struct{}
-
-func (*hostIDReaderUnsupported) read() (string, error) {
- return "<unknown>", nil
-}
-
-var platformHostIDReader hostIDReader = &hostIDReaderUnsupported{}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go
deleted file mode 100644
index 3677c83d7..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-//go:build windows
-// +build windows
-
-package resource // import "go.opentelemetry.io/otel/sdk/resource"
-
-import (
- "golang.org/x/sys/windows/registry"
-)
-
-// implements hostIDReader.
-type hostIDReaderWindows struct{}
-
-// read reads MachineGuid from the Windows registry key:
-// SOFTWARE\Microsoft\Cryptography.
-func (*hostIDReaderWindows) read() (string, error) {
- k, err := registry.OpenKey(
- registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Cryptography`,
- registry.QUERY_VALUE|registry.WOW64_64KEY,
- )
- if err != nil {
- return "", err
- }
- defer k.Close()
-
- guid, _, err := k.GetStringValue("MachineGuid")
- if err != nil {
- return "", err
- }
-
- return guid, nil
-}
-
-var platformHostIDReader hostIDReader = &hostIDReaderWindows{}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
deleted file mode 100644
index 8a48ab4fa..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package resource // import "go.opentelemetry.io/otel/sdk/resource"
-
-import (
- "context"
- "strings"
-
- "go.opentelemetry.io/otel/attribute"
- semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
-)
-
-type osDescriptionProvider func() (string, error)
-
-var defaultOSDescriptionProvider osDescriptionProvider = platformOSDescription
-
-var osDescription = defaultOSDescriptionProvider
-
-func setDefaultOSDescriptionProvider() {
- setOSDescriptionProvider(defaultOSDescriptionProvider)
-}
-
-func setOSDescriptionProvider(osDescriptionProvider osDescriptionProvider) {
- osDescription = osDescriptionProvider
-}
-
-type (
- osTypeDetector struct{}
- osDescriptionDetector struct{}
-)
-
-// Detect returns a *Resource that describes the operating system type the
-// service is running on.
-func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) {
- osType := runtimeOS()
-
- osTypeAttribute := mapRuntimeOSToSemconvOSType(osType)
-
- return NewWithAttributes(
- semconv.SchemaURL,
- osTypeAttribute,
- ), nil
-}
-
-// Detect returns a *Resource that describes the operating system the
-// service is running on.
-func (osDescriptionDetector) Detect(ctx context.Context) (*Resource, error) {
- description, err := osDescription()
- if err != nil {
- return nil, err
- }
-
- return NewWithAttributes(
- semconv.SchemaURL,
- semconv.OSDescription(description),
- ), nil
-}
-
-// mapRuntimeOSToSemconvOSType translates the OS name as provided by the Go runtime
-// into an OS type attribute with the corresponding value defined by the semantic
-// conventions. In case the provided OS name isn't mapped, it's transformed to lowercase
-// and used as the value for the returned OS type attribute.
-func mapRuntimeOSToSemconvOSType(osType string) attribute.KeyValue {
- // the elements in this map are the intersection between
- // available GOOS values and defined semconv OS types
- osTypeAttributeMap := map[string]attribute.KeyValue{
- "aix": semconv.OSTypeAIX,
- "darwin": semconv.OSTypeDarwin,
- "dragonfly": semconv.OSTypeDragonflyBSD,
- "freebsd": semconv.OSTypeFreeBSD,
- "linux": semconv.OSTypeLinux,
- "netbsd": semconv.OSTypeNetBSD,
- "openbsd": semconv.OSTypeOpenBSD,
- "solaris": semconv.OSTypeSolaris,
- "windows": semconv.OSTypeWindows,
- "zos": semconv.OSTypeZOS,
- }
-
- var osTypeAttribute attribute.KeyValue
-
- if attr, ok := osTypeAttributeMap[osType]; ok {
- osTypeAttribute = attr
- } else {
- osTypeAttribute = semconv.OSTypeKey.String(strings.ToLower(osType))
- }
-
- return osTypeAttribute
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go
deleted file mode 100644
index ce455dc54..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package resource // import "go.opentelemetry.io/otel/sdk/resource"
-
-import (
- "encoding/xml"
- "fmt"
- "io"
- "os"
-)
-
-type plist struct {
- XMLName xml.Name `xml:"plist"`
- Dict dict `xml:"dict"`
-}
-
-type dict struct {
- Key []string `xml:"key"`
- String []string `xml:"string"`
-}
-
-// osRelease builds a string describing the operating system release based on the
-// contents of the property list (.plist) system files. If no .plist files are found,
-// or if the required properties to build the release description string are missing,
-// an empty string is returned instead. The generated string resembles the output of
-// the `sw_vers` commandline program, but in a single-line string. For more information
-// about the `sw_vers` program, see: https://www.unix.com/man-page/osx/1/SW_VERS.
-func osRelease() string {
- file, err := getPlistFile()
- if err != nil {
- return ""
- }
-
- defer file.Close()
-
- values, err := parsePlistFile(file)
- if err != nil {
- return ""
- }
-
- return buildOSRelease(values)
-}
-
-// getPlistFile returns a *os.File pointing to one of the well-known .plist files
-// available on macOS. If no file can be opened, it returns an error.
-func getPlistFile() (*os.File, error) {
- return getFirstAvailableFile([]string{
- "/System/Library/CoreServices/SystemVersion.plist",
- "/System/Library/CoreServices/ServerVersion.plist",
- })
-}
-
-// parsePlistFile process the file pointed by `file` as a .plist file and returns
-// a map with the key-values for each pair of correlated <key> and <string> elements
-// contained in it.
-func parsePlistFile(file io.Reader) (map[string]string, error) {
- var v plist
-
- err := xml.NewDecoder(file).Decode(&v)
- if err != nil {
- return nil, err
- }
-
- if len(v.Dict.Key) != len(v.Dict.String) {
- return nil, fmt.Errorf("the number of <key> and <string> elements doesn't match")
- }
-
- properties := make(map[string]string, len(v.Dict.Key))
- for i, key := range v.Dict.Key {
- properties[key] = v.Dict.String[i]
- }
-
- return properties, nil
-}
-
-// buildOSRelease builds a string describing the OS release based on the properties
-// available on the provided map. It tries to find the `ProductName`, `ProductVersion`
-// and `ProductBuildVersion` properties. If some of these properties are not found,
-// it returns an empty string.
-func buildOSRelease(properties map[string]string) string {
- productName := properties["ProductName"]
- productVersion := properties["ProductVersion"]
- productBuildVersion := properties["ProductBuildVersion"]
-
- if productName == "" || productVersion == "" || productBuildVersion == "" {
- return ""
- }
-
- return fmt.Sprintf("%s %s (%s)", productName, productVersion, productBuildVersion)
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go
deleted file mode 100644
index f537e5ca5..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-//go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
-// +build aix dragonfly freebsd linux netbsd openbsd solaris zos
-
-package resource // import "go.opentelemetry.io/otel/sdk/resource"
-
-import (
- "bufio"
- "fmt"
- "io"
- "os"
- "strings"
-)
-
-// osRelease builds a string describing the operating system release based on the
-// properties of the os-release file. If no os-release file is found, or if the
-// required properties to build the release description string are missing, an empty
-// string is returned instead. For more information about os-release files, see:
-// https://www.freedesktop.org/software/systemd/man/os-release.html
-func osRelease() string {
- file, err := getOSReleaseFile()
- if err != nil {
- return ""
- }
-
- defer file.Close()
-
- values := parseOSReleaseFile(file)
-
- return buildOSRelease(values)
-}
-
-// getOSReleaseFile returns a *os.File pointing to one of the well-known os-release
-// files, according to their order of preference. If no file can be opened, it
-// returns an error.
-func getOSReleaseFile() (*os.File, error) {
- return getFirstAvailableFile([]string{"/etc/os-release", "/usr/lib/os-release"})
-}
-
-// parseOSReleaseFile process the file pointed by `file` as an os-release file and
-// returns a map with the key-values contained in it. Empty lines or lines starting
-// with a '#' character are ignored, as well as lines with the missing key=value
-// separator. Values are unquoted and unescaped.
-func parseOSReleaseFile(file io.Reader) map[string]string {
- values := make(map[string]string)
- scanner := bufio.NewScanner(file)
-
- for scanner.Scan() {
- line := scanner.Text()
-
- if skip(line) {
- continue
- }
-
- key, value, ok := parse(line)
- if ok {
- values[key] = value
- }
- }
-
- return values
-}
-
-// skip returns true if the line is blank or starts with a '#' character, and
-// therefore should be skipped from processing.
-func skip(line string) bool {
- line = strings.TrimSpace(line)
-
- return len(line) == 0 || strings.HasPrefix(line, "#")
-}
-
-// parse attempts to split the provided line on the first '=' character, and then
-// sanitize each side of the split before returning them as a key-value pair.
-func parse(line string) (string, string, bool) {
- k, v, found := strings.Cut(line, "=")
-
- if !found || len(k) == 0 {
- return "", "", false
- }
-
- key := strings.TrimSpace(k)
- value := unescape(unquote(strings.TrimSpace(v)))
-
- return key, value, true
-}
-
-// unquote checks whether the string `s` is quoted with double or single quotes
-// and, if so, returns a version of the string without them. Otherwise it returns
-// the provided string unchanged.
-func unquote(s string) string {
- if len(s) < 2 {
- return s
- }
-
- if (s[0] == '"' || s[0] == '\'') && s[0] == s[len(s)-1] {
- return s[1 : len(s)-1]
- }
-
- return s
-}
-
-// unescape removes the `\` prefix from some characters that are expected
-// to have it added in front of them for escaping purposes.
-func unescape(s string) string {
- return strings.NewReplacer(
- `\$`, `$`,
- `\"`, `"`,
- `\'`, `'`,
- `\\`, `\`,
- "\\`", "`",
- ).Replace(s)
-}
-
-// buildOSRelease builds a string describing the OS release based on the properties
-// available on the provided map. It favors a combination of the `NAME` and `VERSION`
-// properties as first option (falling back to `VERSION_ID` if `VERSION` isn't
-// found), and using `PRETTY_NAME` alone if some of the previous are not present. If
-// none of these properties are found, it returns an empty string.
-//
-// The rationale behind not using `PRETTY_NAME` as first choice was that, for some
-// Linux distributions, it doesn't include the same detail that can be found on the
-// individual `NAME` and `VERSION` properties, and combining `PRETTY_NAME` with
-// other properties can produce "pretty" redundant strings in some cases.
-func buildOSRelease(values map[string]string) string {
- var osRelease string
-
- name := values["NAME"]
- version := values["VERSION"]
-
- if version == "" {
- version = values["VERSION_ID"]
- }
-
- if name != "" && version != "" {
- osRelease = fmt.Sprintf("%s %s", name, version)
- } else {
- osRelease = values["PRETTY_NAME"]
- }
-
- return osRelease
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go
deleted file mode 100644
index a6ff26a4d..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
-
-package resource // import "go.opentelemetry.io/otel/sdk/resource"
-
-import (
- "fmt"
- "os"
-
- "golang.org/x/sys/unix"
-)
-
-type unameProvider func(buf *unix.Utsname) (err error)
-
-var defaultUnameProvider unameProvider = unix.Uname
-
-var currentUnameProvider = defaultUnameProvider
-
-func setDefaultUnameProvider() {
- setUnameProvider(defaultUnameProvider)
-}
-
-func setUnameProvider(unameProvider unameProvider) {
- currentUnameProvider = unameProvider
-}
-
-// platformOSDescription returns a human readable OS version information string.
-// The final string combines OS release information (where available) and the
-// result of the `uname` system call.
-func platformOSDescription() (string, error) {
- uname, err := uname()
- if err != nil {
- return "", err
- }
-
- osRelease := osRelease()
- if osRelease != "" {
- return fmt.Sprintf("%s (%s)", osRelease, uname), nil
- }
-
- return uname, nil
-}
-
-// uname issues a uname(2) system call (or equivalent on systems which doesn't
-// have one) and formats the output in a single string, similar to the output
-// of the `uname` commandline program. The final string resembles the one
-// obtained with a call to `uname -snrvm`.
-func uname() (string, error) {
- var utsName unix.Utsname
-
- err := currentUnameProvider(&utsName)
- if err != nil {
- return "", err
- }
-
- return fmt.Sprintf("%s %s %s %s %s",
- unix.ByteSliceToString(utsName.Sysname[:]),
- unix.ByteSliceToString(utsName.Nodename[:]),
- unix.ByteSliceToString(utsName.Release[:]),
- unix.ByteSliceToString(utsName.Version[:]),
- unix.ByteSliceToString(utsName.Machine[:]),
- ), nil
-}
-
-// getFirstAvailableFile returns an *os.File of the first available
-// file from a list of candidate file paths.
-func getFirstAvailableFile(candidates []string) (*os.File, error) {
- for _, c := range candidates {
- file, err := os.Open(c)
- if err == nil {
- return file, nil
- }
- }
-
- return nil, fmt.Errorf("no candidate file available: %v", candidates)
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go
deleted file mode 100644
index a77742b07..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos
-// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos
-
-package resource // import "go.opentelemetry.io/otel/sdk/resource"
-
-// platformOSDescription is a placeholder implementation for OSes
-// for which this project currently doesn't support os.description
-// attribute detection. See build tags declaration early on this file
-// for a list of unsupported OSes.
-func platformOSDescription() (string, error) {
- return "<unknown>", nil
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go
deleted file mode 100644
index a6a5a53c0..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package resource // import "go.opentelemetry.io/otel/sdk/resource"
-
-import (
- "fmt"
- "strconv"
-
- "golang.org/x/sys/windows/registry"
-)
-
-// platformOSDescription returns a human readable OS version information string.
-// It does so by querying registry values under the
-// `SOFTWARE\Microsoft\Windows NT\CurrentVersion` key. The final string
-// resembles the one displayed by the Version Reporter Applet (winver.exe).
-func platformOSDescription() (string, error) {
- k, err := registry.OpenKey(
- registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
- if err != nil {
- return "", err
- }
-
- defer k.Close()
-
- var (
- productName = readProductName(k)
- displayVersion = readDisplayVersion(k)
- releaseID = readReleaseID(k)
- currentMajorVersionNumber = readCurrentMajorVersionNumber(k)
- currentMinorVersionNumber = readCurrentMinorVersionNumber(k)
- currentBuildNumber = readCurrentBuildNumber(k)
- ubr = readUBR(k)
- )
-
- if displayVersion != "" {
- displayVersion += " "
- }
-
- return fmt.Sprintf("%s %s(%s) [Version %s.%s.%s.%s]",
- productName,
- displayVersion,
- releaseID,
- currentMajorVersionNumber,
- currentMinorVersionNumber,
- currentBuildNumber,
- ubr,
- ), nil
-}
-
-func getStringValue(name string, k registry.Key) string {
- value, _, _ := k.GetStringValue(name)
-
- return value
-}
-
-func getIntegerValue(name string, k registry.Key) uint64 {
- value, _, _ := k.GetIntegerValue(name)
-
- return value
-}
-
-func readProductName(k registry.Key) string {
- return getStringValue("ProductName", k)
-}
-
-func readDisplayVersion(k registry.Key) string {
- return getStringValue("DisplayVersion", k)
-}
-
-func readReleaseID(k registry.Key) string {
- return getStringValue("ReleaseID", k)
-}
-
-func readCurrentMajorVersionNumber(k registry.Key) string {
- return strconv.FormatUint(getIntegerValue("CurrentMajorVersionNumber", k), 10)
-}
-
-func readCurrentMinorVersionNumber(k registry.Key) string {
- return strconv.FormatUint(getIntegerValue("CurrentMinorVersionNumber", k), 10)
-}
-
-func readCurrentBuildNumber(k registry.Key) string {
- return getStringValue("CurrentBuildNumber", k)
-}
-
-func readUBR(k registry.Key) string {
- return strconv.FormatUint(getIntegerValue("UBR", k), 10)
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
deleted file mode 100644
index 085fe68fd..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package resource // import "go.opentelemetry.io/otel/sdk/resource"
-
-import (
- "context"
- "fmt"
- "os"
- "os/user"
- "path/filepath"
- "runtime"
-
- semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
-)
-
-type (
- pidProvider func() int
- executablePathProvider func() (string, error)
- commandArgsProvider func() []string
- ownerProvider func() (*user.User, error)
- runtimeNameProvider func() string
- runtimeVersionProvider func() string
- runtimeOSProvider func() string
- runtimeArchProvider func() string
-)
-
-var (
- defaultPidProvider pidProvider = os.Getpid
- defaultExecutablePathProvider executablePathProvider = os.Executable
- defaultCommandArgsProvider commandArgsProvider = func() []string { return os.Args }
- defaultOwnerProvider ownerProvider = user.Current
- defaultRuntimeNameProvider runtimeNameProvider = func() string {
- if runtime.Compiler == "gc" {
- return "go"
- }
- return runtime.Compiler
- }
- defaultRuntimeVersionProvider runtimeVersionProvider = runtime.Version
- defaultRuntimeOSProvider runtimeOSProvider = func() string { return runtime.GOOS }
- defaultRuntimeArchProvider runtimeArchProvider = func() string { return runtime.GOARCH }
-)
-
-var (
- pid = defaultPidProvider
- executablePath = defaultExecutablePathProvider
- commandArgs = defaultCommandArgsProvider
- owner = defaultOwnerProvider
- runtimeName = defaultRuntimeNameProvider
- runtimeVersion = defaultRuntimeVersionProvider
- runtimeOS = defaultRuntimeOSProvider
- runtimeArch = defaultRuntimeArchProvider
-)
-
-func setDefaultOSProviders() {
- setOSProviders(
- defaultPidProvider,
- defaultExecutablePathProvider,
- defaultCommandArgsProvider,
- )
-}
-
-func setOSProviders(
- pidProvider pidProvider,
- executablePathProvider executablePathProvider,
- commandArgsProvider commandArgsProvider,
-) {
- pid = pidProvider
- executablePath = executablePathProvider
- commandArgs = commandArgsProvider
-}
-
-func setDefaultRuntimeProviders() {
- setRuntimeProviders(
- defaultRuntimeNameProvider,
- defaultRuntimeVersionProvider,
- defaultRuntimeOSProvider,
- defaultRuntimeArchProvider,
- )
-}
-
-func setRuntimeProviders(
- runtimeNameProvider runtimeNameProvider,
- runtimeVersionProvider runtimeVersionProvider,
- runtimeOSProvider runtimeOSProvider,
- runtimeArchProvider runtimeArchProvider,
-) {
- runtimeName = runtimeNameProvider
- runtimeVersion = runtimeVersionProvider
- runtimeOS = runtimeOSProvider
- runtimeArch = runtimeArchProvider
-}
-
-func setDefaultUserProviders() {
- setUserProviders(defaultOwnerProvider)
-}
-
-func setUserProviders(ownerProvider ownerProvider) {
- owner = ownerProvider
-}
-
-type (
- processPIDDetector struct{}
- processExecutableNameDetector struct{}
- processExecutablePathDetector struct{}
- processCommandArgsDetector struct{}
- processOwnerDetector struct{}
- processRuntimeNameDetector struct{}
- processRuntimeVersionDetector struct{}
- processRuntimeDescriptionDetector struct{}
-)
-
-// Detect returns a *Resource that describes the process identifier (PID) of the
-// executing process.
-func (processPIDDetector) Detect(ctx context.Context) (*Resource, error) {
- return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPID(pid())), nil
-}
-
-// Detect returns a *Resource that describes the name of the process executable.
-func (processExecutableNameDetector) Detect(ctx context.Context) (*Resource, error) {
- executableName := filepath.Base(commandArgs()[0])
-
- return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableName(executableName)), nil
-}
-
-// Detect returns a *Resource that describes the full path of the process executable.
-func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, error) {
- executablePath, err := executablePath()
- if err != nil {
- return nil, err
- }
-
- return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutablePath(executablePath)), nil
-}
-
-// Detect returns a *Resource that describes all the command arguments as received
-// by the process.
-func (processCommandArgsDetector) Detect(ctx context.Context) (*Resource, error) {
- return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgs(commandArgs()...)), nil
-}
-
-// Detect returns a *Resource that describes the username of the user that owns the
-// process.
-func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) {
- owner, err := owner()
- if err != nil {
- return nil, err
- }
-
- return NewWithAttributes(semconv.SchemaURL, semconv.ProcessOwner(owner.Username)), nil
-}
-
-// Detect returns a *Resource that describes the name of the compiler used to compile
-// this process image.
-func (processRuntimeNameDetector) Detect(ctx context.Context) (*Resource, error) {
- return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeName(runtimeName())), nil
-}
-
-// Detect returns a *Resource that describes the version of the runtime of this process.
-func (processRuntimeVersionDetector) Detect(ctx context.Context) (*Resource, error) {
- return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersion(runtimeVersion())), nil
-}
-
-// Detect returns a *Resource that describes the runtime of this process.
-func (processRuntimeDescriptionDetector) Detect(ctx context.Context) (*Resource, error) {
- runtimeDescription := fmt.Sprintf(
- "go version %s %s/%s", runtimeVersion(), runtimeOS(), runtimeArch())
-
- return NewWithAttributes(
- semconv.SchemaURL,
- semconv.ProcessRuntimeDescription(runtimeDescription),
- ), nil
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go
deleted file mode 100644
index ad4b50df4..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go
+++ /dev/null
@@ -1,294 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package resource // import "go.opentelemetry.io/otel/sdk/resource"
-
-import (
- "context"
- "errors"
- "fmt"
- "sync"
-
- "go.opentelemetry.io/otel"
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/sdk/internal/x"
-)
-
-// Resource describes an entity about which identifying information
-// and metadata is exposed. Resource is an immutable object,
-// equivalent to a map from key to unique value.
-//
-// Resources should be passed and stored as pointers
-// (`*resource.Resource`). The `nil` value is equivalent to an empty
-// Resource.
-type Resource struct {
- attrs attribute.Set
- schemaURL string
-}
-
-var (
- defaultResource *Resource
- defaultResourceOnce sync.Once
-)
-
-// ErrSchemaURLConflict is an error returned when two Resources are merged
-// together that contain different, non-empty, schema URLs.
-var ErrSchemaURLConflict = errors.New("conflicting Schema URL")
-
-// New returns a [Resource] built using opts.
-//
-// This may return a partial Resource along with an error containing
-// [ErrPartialResource] if options that provide a [Detector] are used and that
-// error is returned from one or more of the Detectors. It may also return a
-// merge-conflict Resource along with an error containing
-// [ErrSchemaURLConflict] if merging Resources from the opts results in a
-// schema URL conflict (see [Resource.Merge] for more information). It is up to
-// the caller to determine if this returned Resource should be used or not
-// based on these errors.
-func New(ctx context.Context, opts ...Option) (*Resource, error) {
- cfg := config{}
- for _, opt := range opts {
- cfg = opt.apply(cfg)
- }
-
- r := &Resource{schemaURL: cfg.schemaURL}
- return r, detect(ctx, r, cfg.detectors)
-}
-
-// NewWithAttributes creates a resource from attrs and associates the resource with a
-// schema URL. If attrs contains duplicate keys, the last value will be used. If attrs
-// contains any invalid items those items will be dropped. The attrs are assumed to be
-// in a schema identified by schemaURL.
-func NewWithAttributes(schemaURL string, attrs ...attribute.KeyValue) *Resource {
- resource := NewSchemaless(attrs...)
- resource.schemaURL = schemaURL
- return resource
-}
-
-// NewSchemaless creates a resource from attrs. If attrs contains duplicate keys,
-// the last value will be used. If attrs contains any invalid items those items will
-// be dropped. The resource will not be associated with a schema URL. If the schema
-// of the attrs is known use NewWithAttributes instead.
-func NewSchemaless(attrs ...attribute.KeyValue) *Resource {
- if len(attrs) == 0 {
- return &Resource{}
- }
-
- // Ensure attributes comply with the specification:
- // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/common/README.md#attribute
- s, _ := attribute.NewSetWithFiltered(attrs, func(kv attribute.KeyValue) bool {
- return kv.Valid()
- })
-
- // If attrs only contains invalid entries do not allocate a new resource.
- if s.Len() == 0 {
- return &Resource{}
- }
-
- return &Resource{attrs: s} //nolint
-}
-
-// String implements the Stringer interface and provides a
-// human-readable form of the resource.
-//
-// Avoid using this representation as the key in a map of resources,
-// use Equivalent() as the key instead.
-func (r *Resource) String() string {
- if r == nil {
- return ""
- }
- return r.attrs.Encoded(attribute.DefaultEncoder())
-}
-
-// MarshalLog is the marshaling function used by the logging system to represent this Resource.
-func (r *Resource) MarshalLog() interface{} {
- return struct {
- Attributes attribute.Set
- SchemaURL string
- }{
- Attributes: r.attrs,
- SchemaURL: r.schemaURL,
- }
-}
-
-// Attributes returns a copy of attributes from the resource in a sorted order.
-// To avoid allocating a new slice, use an iterator.
-func (r *Resource) Attributes() []attribute.KeyValue {
- if r == nil {
- r = Empty()
- }
- return r.attrs.ToSlice()
-}
-
-// SchemaURL returns the schema URL associated with Resource r.
-func (r *Resource) SchemaURL() string {
- if r == nil {
- return ""
- }
- return r.schemaURL
-}
-
-// Iter returns an iterator of the Resource attributes.
-// This is ideal to use if you do not want a copy of the attributes.
-func (r *Resource) Iter() attribute.Iterator {
- if r == nil {
- r = Empty()
- }
- return r.attrs.Iter()
-}
-
-// Equal returns true when a Resource is equivalent to this Resource.
-func (r *Resource) Equal(eq *Resource) bool {
- if r == nil {
- r = Empty()
- }
- if eq == nil {
- eq = Empty()
- }
- return r.Equivalent() == eq.Equivalent()
-}
-
-// Merge creates a new [Resource] by merging a and b.
-//
-// If there are common keys between a and b, then the value from b will
-// overwrite the value from a, even if b's value is empty.
-//
-// The SchemaURL of the resources will be merged according to the
-// [OpenTelemetry specification rules]:
-//
-// - If a's schema URL is empty then the returned Resource's schema URL will
-// be set to the schema URL of b,
-// - Else if b's schema URL is empty then the returned Resource's schema URL
-// will be set to the schema URL of a,
-// - Else if the schema URLs of a and b are the same then that will be the
-// schema URL of the returned Resource,
-// - Else this is a merging error. If the resources have different,
-// non-empty, schema URLs an error containing [ErrSchemaURLConflict] will
-// be returned with the merged Resource. The merged Resource will have an
-// empty schema URL. It may be the case that some unintended attributes
-// have been overwritten or old semantic conventions persisted in the
-// returned Resource. It is up to the caller to determine if this returned
-// Resource should be used or not.
-//
-// [OpenTelemetry specification rules]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/resource/sdk.md#merge
-func Merge(a, b *Resource) (*Resource, error) {
- if a == nil && b == nil {
- return Empty(), nil
- }
- if a == nil {
- return b, nil
- }
- if b == nil {
- return a, nil
- }
-
- // Note: 'b' attributes will overwrite 'a' with last-value-wins in attribute.Key()
- // Meaning this is equivalent to: append(a.Attributes(), b.Attributes()...)
- mi := attribute.NewMergeIterator(b.Set(), a.Set())
- combine := make([]attribute.KeyValue, 0, a.Len()+b.Len())
- for mi.Next() {
- combine = append(combine, mi.Attribute())
- }
-
- switch {
- case a.schemaURL == "":
- return NewWithAttributes(b.schemaURL, combine...), nil
- case b.schemaURL == "":
- return NewWithAttributes(a.schemaURL, combine...), nil
- case a.schemaURL == b.schemaURL:
- return NewWithAttributes(a.schemaURL, combine...), nil
- }
- // Return the merged resource with an appropriate error. It is up to
- // the user to decide if the returned resource can be used or not.
- return NewSchemaless(combine...), fmt.Errorf(
- "%w: %s and %s",
- ErrSchemaURLConflict,
- a.schemaURL,
- b.schemaURL,
- )
-}
-
-// Empty returns an instance of Resource with no attributes. It is
-// equivalent to a `nil` Resource.
-func Empty() *Resource {
- return &Resource{}
-}
-
-// Default returns an instance of Resource with a default
-// "service.name" and OpenTelemetrySDK attributes.
-func Default() *Resource {
- defaultResourceOnce.Do(func() {
- var err error
- defaultDetectors := []Detector{
- defaultServiceNameDetector{},
- fromEnv{},
- telemetrySDK{},
- }
- if x.Resource.Enabled() {
- defaultDetectors = append([]Detector{defaultServiceInstanceIDDetector{}}, defaultDetectors...)
- }
- defaultResource, err = Detect(
- context.Background(),
- defaultDetectors...,
- )
- if err != nil {
- otel.Handle(err)
- }
- // If Detect did not return a valid resource, fall back to emptyResource.
- if defaultResource == nil {
- defaultResource = &Resource{}
- }
- })
- return defaultResource
-}
-
-// Environment returns an instance of Resource with attributes
-// extracted from the OTEL_RESOURCE_ATTRIBUTES environment variable.
-func Environment() *Resource {
- detector := &fromEnv{}
- resource, err := detector.Detect(context.Background())
- if err != nil {
- otel.Handle(err)
- }
- return resource
-}
-
-// Equivalent returns an object that can be compared for equality
-// between two resources. This value is suitable for use as a key in
-// a map.
-func (r *Resource) Equivalent() attribute.Distinct {
- return r.Set().Equivalent()
-}
-
-// Set returns the equivalent *attribute.Set of this resource's attributes.
-func (r *Resource) Set() *attribute.Set {
- if r == nil {
- r = Empty()
- }
- return &r.attrs
-}
-
-// MarshalJSON encodes the resource attributes as a JSON list of { "Key":
-// "...", "Value": ... } pairs in order sorted by key.
-func (r *Resource) MarshalJSON() ([]byte, error) {
- if r == nil {
- r = Empty()
- }
- return r.attrs.MarshalJSON()
-}
-
-// Len returns the number of unique key-values in this Resource.
-func (r *Resource) Len() int {
- if r == nil {
- return 0
- }
- return r.attrs.Len()
-}
-
-// Encoded returns an encoded representation of the resource.
-func (r *Resource) Encoded(enc attribute.Encoder) string {
- if r == nil {
- return ""
- }
- return r.attrs.Encoded(enc)
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/README.md b/vendor/go.opentelemetry.io/otel/sdk/trace/README.md
deleted file mode 100644
index f2936e143..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# SDK Trace
-
-[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/trace)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/trace)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
deleted file mode 100644
index ccc97e1b6..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
+++ /dev/null
@@ -1,414 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package trace // import "go.opentelemetry.io/otel/sdk/trace"
-
-import (
- "context"
- "sync"
- "sync/atomic"
- "time"
-
- "go.opentelemetry.io/otel"
- "go.opentelemetry.io/otel/internal/global"
- "go.opentelemetry.io/otel/sdk/internal/env"
- "go.opentelemetry.io/otel/trace"
-)
-
-// Defaults for BatchSpanProcessorOptions.
-const (
- DefaultMaxQueueSize = 2048
- DefaultScheduleDelay = 5000
- DefaultExportTimeout = 30000
- DefaultMaxExportBatchSize = 512
-)
-
-// BatchSpanProcessorOption configures a BatchSpanProcessor.
-type BatchSpanProcessorOption func(o *BatchSpanProcessorOptions)
-
-// BatchSpanProcessorOptions is configuration settings for a
-// BatchSpanProcessor.
-type BatchSpanProcessorOptions struct {
- // MaxQueueSize is the maximum queue size to buffer spans for delayed processing. If the
- // queue gets full it drops the spans. Use BlockOnQueueFull to change this behavior.
- // The default value of MaxQueueSize is 2048.
- MaxQueueSize int
-
- // BatchTimeout is the maximum duration for constructing a batch. Processor
- // forcefully sends available spans when timeout is reached.
- // The default value of BatchTimeout is 5000 msec.
- BatchTimeout time.Duration
-
- // ExportTimeout specifies the maximum duration for exporting spans. If the timeout
- // is reached, the export will be cancelled.
- // The default value of ExportTimeout is 30000 msec.
- ExportTimeout time.Duration
-
- // MaxExportBatchSize is the maximum number of spans to process in a single batch.
- // If there are more than one batch worth of spans then it processes multiple batches
- // of spans one batch after the other without any delay.
- // The default value of MaxExportBatchSize is 512.
- MaxExportBatchSize int
-
- // BlockOnQueueFull blocks onEnd() and onStart() method if the queue is full
- // AND if BlockOnQueueFull is set to true.
- // Blocking option should be used carefully as it can severely affect the performance of an
- // application.
- BlockOnQueueFull bool
-}
-
-// batchSpanProcessor is a SpanProcessor that batches asynchronously-received
-// spans and sends them to a trace.Exporter when complete.
-type batchSpanProcessor struct {
- e SpanExporter
- o BatchSpanProcessorOptions
-
- queue chan ReadOnlySpan
- dropped uint32
-
- batch []ReadOnlySpan
- batchMutex sync.Mutex
- timer *time.Timer
- stopWait sync.WaitGroup
- stopOnce sync.Once
- stopCh chan struct{}
- stopped atomic.Bool
-}
-
-var _ SpanProcessor = (*batchSpanProcessor)(nil)
-
-// NewBatchSpanProcessor creates a new SpanProcessor that will send completed
-// span batches to the exporter with the supplied options.
-//
-// If the exporter is nil, the span processor will perform no action.
-func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorOption) SpanProcessor {
- maxQueueSize := env.BatchSpanProcessorMaxQueueSize(DefaultMaxQueueSize)
- maxExportBatchSize := env.BatchSpanProcessorMaxExportBatchSize(DefaultMaxExportBatchSize)
-
- if maxExportBatchSize > maxQueueSize {
- if DefaultMaxExportBatchSize > maxQueueSize {
- maxExportBatchSize = maxQueueSize
- } else {
- maxExportBatchSize = DefaultMaxExportBatchSize
- }
- }
-
- o := BatchSpanProcessorOptions{
- BatchTimeout: time.Duration(env.BatchSpanProcessorScheduleDelay(DefaultScheduleDelay)) * time.Millisecond,
- ExportTimeout: time.Duration(env.BatchSpanProcessorExportTimeout(DefaultExportTimeout)) * time.Millisecond,
- MaxQueueSize: maxQueueSize,
- MaxExportBatchSize: maxExportBatchSize,
- }
- for _, opt := range options {
- opt(&o)
- }
- bsp := &batchSpanProcessor{
- e: exporter,
- o: o,
- batch: make([]ReadOnlySpan, 0, o.MaxExportBatchSize),
- timer: time.NewTimer(o.BatchTimeout),
- queue: make(chan ReadOnlySpan, o.MaxQueueSize),
- stopCh: make(chan struct{}),
- }
-
- bsp.stopWait.Add(1)
- go func() {
- defer bsp.stopWait.Done()
- bsp.processQueue()
- bsp.drainQueue()
- }()
-
- return bsp
-}
-
-// OnStart method does nothing.
-func (bsp *batchSpanProcessor) OnStart(parent context.Context, s ReadWriteSpan) {}
-
-// OnEnd method enqueues a ReadOnlySpan for later processing.
-func (bsp *batchSpanProcessor) OnEnd(s ReadOnlySpan) {
- // Do not enqueue spans after Shutdown.
- if bsp.stopped.Load() {
- return
- }
-
- // Do not enqueue spans if we are just going to drop them.
- if bsp.e == nil {
- return
- }
- bsp.enqueue(s)
-}
-
-// Shutdown flushes the queue and waits until all spans are processed.
-// It only executes once. Subsequent call does nothing.
-func (bsp *batchSpanProcessor) Shutdown(ctx context.Context) error {
- var err error
- bsp.stopOnce.Do(func() {
- bsp.stopped.Store(true)
- wait := make(chan struct{})
- go func() {
- close(bsp.stopCh)
- bsp.stopWait.Wait()
- if bsp.e != nil {
- if err := bsp.e.Shutdown(ctx); err != nil {
- otel.Handle(err)
- }
- }
- close(wait)
- }()
- // Wait until the wait group is done or the context is cancelled
- select {
- case <-wait:
- case <-ctx.Done():
- err = ctx.Err()
- }
- })
- return err
-}
-
-type forceFlushSpan struct {
- ReadOnlySpan
- flushed chan struct{}
-}
-
-func (f forceFlushSpan) SpanContext() trace.SpanContext {
- return trace.NewSpanContext(trace.SpanContextConfig{TraceFlags: trace.FlagsSampled})
-}
-
-// ForceFlush exports all ended spans that have not yet been exported.
-func (bsp *batchSpanProcessor) ForceFlush(ctx context.Context) error {
- // Interrupt if context is already canceled.
- if err := ctx.Err(); err != nil {
- return err
- }
-
- // Do nothing after Shutdown.
- if bsp.stopped.Load() {
- return nil
- }
-
- var err error
- if bsp.e != nil {
- flushCh := make(chan struct{})
- if bsp.enqueueBlockOnQueueFull(ctx, forceFlushSpan{flushed: flushCh}) {
- select {
- case <-bsp.stopCh:
- // The batchSpanProcessor is Shutdown.
- return nil
- case <-flushCh:
- // Processed any items in queue prior to ForceFlush being called
- case <-ctx.Done():
- return ctx.Err()
- }
- }
-
- wait := make(chan error)
- go func() {
- wait <- bsp.exportSpans(ctx)
- close(wait)
- }()
- // Wait until the export is finished or the context is cancelled/timed out
- select {
- case err = <-wait:
- case <-ctx.Done():
- err = ctx.Err()
- }
- }
- return err
-}
-
-// WithMaxQueueSize returns a BatchSpanProcessorOption that configures the
-// maximum queue size allowed for a BatchSpanProcessor.
-func WithMaxQueueSize(size int) BatchSpanProcessorOption {
- return func(o *BatchSpanProcessorOptions) {
- o.MaxQueueSize = size
- }
-}
-
-// WithMaxExportBatchSize returns a BatchSpanProcessorOption that configures
-// the maximum export batch size allowed for a BatchSpanProcessor.
-func WithMaxExportBatchSize(size int) BatchSpanProcessorOption {
- return func(o *BatchSpanProcessorOptions) {
- o.MaxExportBatchSize = size
- }
-}
-
-// WithBatchTimeout returns a BatchSpanProcessorOption that configures the
-// maximum delay allowed for a BatchSpanProcessor before it will export any
-// held span (whether the queue is full or not).
-func WithBatchTimeout(delay time.Duration) BatchSpanProcessorOption {
- return func(o *BatchSpanProcessorOptions) {
- o.BatchTimeout = delay
- }
-}
-
-// WithExportTimeout returns a BatchSpanProcessorOption that configures the
-// amount of time a BatchSpanProcessor waits for an exporter to export before
-// abandoning the export.
-func WithExportTimeout(timeout time.Duration) BatchSpanProcessorOption {
- return func(o *BatchSpanProcessorOptions) {
- o.ExportTimeout = timeout
- }
-}
-
-// WithBlocking returns a BatchSpanProcessorOption that configures a
-// BatchSpanProcessor to wait for enqueue operations to succeed instead of
-// dropping data when the queue is full.
-func WithBlocking() BatchSpanProcessorOption {
- return func(o *BatchSpanProcessorOptions) {
- o.BlockOnQueueFull = true
- }
-}
-
-// exportSpans is a subroutine of processing and draining the queue.
-func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error {
- bsp.timer.Reset(bsp.o.BatchTimeout)
-
- bsp.batchMutex.Lock()
- defer bsp.batchMutex.Unlock()
-
- if bsp.o.ExportTimeout > 0 {
- var cancel context.CancelFunc
- ctx, cancel = context.WithTimeout(ctx, bsp.o.ExportTimeout)
- defer cancel()
- }
-
- if l := len(bsp.batch); l > 0 {
- global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", atomic.LoadUint32(&bsp.dropped))
- err := bsp.e.ExportSpans(ctx, bsp.batch)
-
- // A new batch is always created after exporting, even if the batch failed to be exported.
- //
- // It is up to the exporter to implement any type of retry logic if a batch is failing
- // to be exported, since it is specific to the protocol and backend being sent to.
- clear(bsp.batch) // Erase elements to let GC collect objects
- bsp.batch = bsp.batch[:0]
-
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-// processQueue removes spans from the `queue` channel until processor
-// is shut down. It calls the exporter in batches of up to MaxExportBatchSize
-// waiting up to BatchTimeout to form a batch.
-func (bsp *batchSpanProcessor) processQueue() {
- defer bsp.timer.Stop()
-
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
- for {
- select {
- case <-bsp.stopCh:
- return
- case <-bsp.timer.C:
- if err := bsp.exportSpans(ctx); err != nil {
- otel.Handle(err)
- }
- case sd := <-bsp.queue:
- if ffs, ok := sd.(forceFlushSpan); ok {
- close(ffs.flushed)
- continue
- }
- bsp.batchMutex.Lock()
- bsp.batch = append(bsp.batch, sd)
- shouldExport := len(bsp.batch) >= bsp.o.MaxExportBatchSize
- bsp.batchMutex.Unlock()
- if shouldExport {
- if !bsp.timer.Stop() {
- // Handle both GODEBUG=asynctimerchan=[0|1] properly.
- select {
- case <-bsp.timer.C:
- default:
- }
- }
- if err := bsp.exportSpans(ctx); err != nil {
- otel.Handle(err)
- }
- }
- }
- }
-}
-
-// drainQueue awaits the any caller that had added to bsp.stopWait
-// to finish the enqueue, then exports the final batch.
-func (bsp *batchSpanProcessor) drainQueue() {
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
- for {
- select {
- case sd := <-bsp.queue:
- if _, ok := sd.(forceFlushSpan); ok {
- // Ignore flush requests as they are not valid spans.
- continue
- }
-
- bsp.batchMutex.Lock()
- bsp.batch = append(bsp.batch, sd)
- shouldExport := len(bsp.batch) == bsp.o.MaxExportBatchSize
- bsp.batchMutex.Unlock()
-
- if shouldExport {
- if err := bsp.exportSpans(ctx); err != nil {
- otel.Handle(err)
- }
- }
- default:
- // There are no more enqueued spans. Make final export.
- if err := bsp.exportSpans(ctx); err != nil {
- otel.Handle(err)
- }
- return
- }
- }
-}
-
-func (bsp *batchSpanProcessor) enqueue(sd ReadOnlySpan) {
- ctx := context.TODO()
- if bsp.o.BlockOnQueueFull {
- bsp.enqueueBlockOnQueueFull(ctx, sd)
- } else {
- bsp.enqueueDrop(ctx, sd)
- }
-}
-
-func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd ReadOnlySpan) bool {
- if !sd.SpanContext().IsSampled() {
- return false
- }
-
- select {
- case bsp.queue <- sd:
- return true
- case <-ctx.Done():
- return false
- }
-}
-
-func (bsp *batchSpanProcessor) enqueueDrop(_ context.Context, sd ReadOnlySpan) bool {
- if !sd.SpanContext().IsSampled() {
- return false
- }
-
- select {
- case bsp.queue <- sd:
- return true
- default:
- atomic.AddUint32(&bsp.dropped, 1)
- }
- return false
-}
-
-// MarshalLog is the marshaling function used by the logging system to represent this Span Processor.
-func (bsp *batchSpanProcessor) MarshalLog() interface{} {
- return struct {
- Type string
- SpanExporter SpanExporter
- Config BatchSpanProcessorOptions
- }{
- Type: "BatchSpanProcessor",
- SpanExporter: bsp.e,
- Config: bsp.o,
- }
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go
deleted file mode 100644
index 1f60524e3..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-/*
-Package trace contains support for OpenTelemetry distributed tracing.
-
-The following assumes a basic familiarity with OpenTelemetry concepts.
-See https://opentelemetry.io.
-*/
-package trace // import "go.opentelemetry.io/otel/sdk/trace"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/event.go b/vendor/go.opentelemetry.io/otel/sdk/trace/event.go
deleted file mode 100644
index 60a7ed134..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/event.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package trace // import "go.opentelemetry.io/otel/sdk/trace"
-
-import (
- "time"
-
- "go.opentelemetry.io/otel/attribute"
-)
-
-// Event is a thing that happened during a Span's lifetime.
-type Event struct {
- // Name is the name of this event
- Name string
-
- // Attributes describe the aspects of the event.
- Attributes []attribute.KeyValue
-
- // DroppedAttributeCount is the number of attributes that were not
- // recorded due to configured limits being reached.
- DroppedAttributeCount int
-
- // Time at which this event was recorded.
- Time time.Time
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go
deleted file mode 100644
index 8c308dd60..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package trace // import "go.opentelemetry.io/otel/sdk/trace"
-
-import (
- "slices"
- "sync"
-
- "go.opentelemetry.io/otel/internal/global"
-)
-
-// evictedQueue is a FIFO queue with a configurable capacity.
-type evictedQueue[T any] struct {
- queue []T
- capacity int
- droppedCount int
- logDroppedMsg string
- logDroppedOnce sync.Once
-}
-
-func newEvictedQueueEvent(capacity int) evictedQueue[Event] {
- // Do not pre-allocate queue, do this lazily.
- return evictedQueue[Event]{
- capacity: capacity,
- logDroppedMsg: "limit reached: dropping trace trace.Event",
- }
-}
-
-func newEvictedQueueLink(capacity int) evictedQueue[Link] {
- // Do not pre-allocate queue, do this lazily.
- return evictedQueue[Link]{
- capacity: capacity,
- logDroppedMsg: "limit reached: dropping trace trace.Link",
- }
-}
-
-// add adds value to the evictedQueue eq. If eq is at capacity, the oldest
-// queued value will be discarded and the drop count incremented.
-func (eq *evictedQueue[T]) add(value T) {
- if eq.capacity == 0 {
- eq.droppedCount++
- eq.logDropped()
- return
- }
-
- if eq.capacity > 0 && len(eq.queue) == eq.capacity {
- // Drop first-in while avoiding allocating more capacity to eq.queue.
- copy(eq.queue[:eq.capacity-1], eq.queue[1:])
- eq.queue = eq.queue[:eq.capacity-1]
- eq.droppedCount++
- eq.logDropped()
- }
- eq.queue = append(eq.queue, value)
-}
-
-func (eq *evictedQueue[T]) logDropped() {
- eq.logDroppedOnce.Do(func() { global.Warn(eq.logDroppedMsg) })
-}
-
-// copy returns a copy of the evictedQueue.
-func (eq *evictedQueue[T]) copy() []T {
- return slices.Clone(eq.queue)
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go
deleted file mode 100644
index 925bcf993..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package trace // import "go.opentelemetry.io/otel/sdk/trace"
-
-import (
- "context"
- crand "crypto/rand"
- "encoding/binary"
- "math/rand"
- "sync"
-
- "go.opentelemetry.io/otel/trace"
-)
-
-// IDGenerator allows custom generators for TraceID and SpanID.
-type IDGenerator interface {
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-
- // NewIDs returns a new trace and span ID.
- NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID)
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-
- // NewSpanID returns a ID for a new span in the trace with traceID.
- NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-}
-
-type randomIDGenerator struct {
- sync.Mutex
- randSource *rand.Rand
-}
-
-var _ IDGenerator = &randomIDGenerator{}
-
-// NewSpanID returns a non-zero span ID from a randomly-chosen sequence.
-func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID {
- gen.Lock()
- defer gen.Unlock()
- sid := trace.SpanID{}
- for {
- _, _ = gen.randSource.Read(sid[:])
- if sid.IsValid() {
- break
- }
- }
- return sid
-}
-
-// NewIDs returns a non-zero trace ID and a non-zero span ID from a
-// randomly-chosen sequence.
-func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) {
- gen.Lock()
- defer gen.Unlock()
- tid := trace.TraceID{}
- sid := trace.SpanID{}
- for {
- _, _ = gen.randSource.Read(tid[:])
- if tid.IsValid() {
- break
- }
- }
- for {
- _, _ = gen.randSource.Read(sid[:])
- if sid.IsValid() {
- break
- }
- }
- return tid, sid
-}
-
-func defaultIDGenerator() IDGenerator {
- gen := &randomIDGenerator{}
- var rngSeed int64
- _ = binary.Read(crand.Reader, binary.LittleEndian, &rngSeed)
- gen.randSource = rand.New(rand.NewSource(rngSeed))
- return gen
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/link.go b/vendor/go.opentelemetry.io/otel/sdk/trace/link.go
deleted file mode 100644
index c03bdc90f..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/link.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package trace // import "go.opentelemetry.io/otel/sdk/trace"
-
-import (
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/trace"
-)
-
-// Link is the relationship between two Spans. The relationship can be within
-// the same Trace or across different Traces.
-type Link struct {
- // SpanContext of the linked Span.
- SpanContext trace.SpanContext
-
- // Attributes describe the aspects of the link.
- Attributes []attribute.KeyValue
-
- // DroppedAttributeCount is the number of attributes that were not
- // recorded due to configured limits being reached.
- DroppedAttributeCount int
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
deleted file mode 100644
index 185aa7c08..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
+++ /dev/null
@@ -1,494 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package trace // import "go.opentelemetry.io/otel/sdk/trace"
-
-import (
- "context"
- "fmt"
- "sync"
- "sync/atomic"
-
- "go.opentelemetry.io/otel"
- "go.opentelemetry.io/otel/internal/global"
- "go.opentelemetry.io/otel/sdk/instrumentation"
- "go.opentelemetry.io/otel/sdk/resource"
- "go.opentelemetry.io/otel/trace"
- "go.opentelemetry.io/otel/trace/embedded"
- "go.opentelemetry.io/otel/trace/noop"
-)
-
-const (
- defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer"
-)
-
-// tracerProviderConfig.
-type tracerProviderConfig struct {
- // processors contains collection of SpanProcessors that are processing pipeline
- // for spans in the trace signal.
- // SpanProcessors registered with a TracerProvider and are called at the start
- // and end of a Span's lifecycle, and are called in the order they are
- // registered.
- processors []SpanProcessor
-
- // sampler is the default sampler used when creating new spans.
- sampler Sampler
-
- // idGenerator is used to generate all Span and Trace IDs when needed.
- idGenerator IDGenerator
-
- // spanLimits defines the attribute, event, and link limits for spans.
- spanLimits SpanLimits
-
- // resource contains attributes representing an entity that produces telemetry.
- resource *resource.Resource
-}
-
-// MarshalLog is the marshaling function used by the logging system to represent this Provider.
-func (cfg tracerProviderConfig) MarshalLog() interface{} {
- return struct {
- SpanProcessors []SpanProcessor
- SamplerType string
- IDGeneratorType string
- SpanLimits SpanLimits
- Resource *resource.Resource
- }{
- SpanProcessors: cfg.processors,
- SamplerType: fmt.Sprintf("%T", cfg.sampler),
- IDGeneratorType: fmt.Sprintf("%T", cfg.idGenerator),
- SpanLimits: cfg.spanLimits,
- Resource: cfg.resource,
- }
-}
-
-// TracerProvider is an OpenTelemetry TracerProvider. It provides Tracers to
-// instrumentation so it can trace operational flow through a system.
-type TracerProvider struct {
- embedded.TracerProvider
-
- mu sync.Mutex
- namedTracer map[instrumentation.Scope]*tracer
- spanProcessors atomic.Pointer[spanProcessorStates]
-
- isShutdown atomic.Bool
-
- // These fields are not protected by the lock mu. They are assumed to be
- // immutable after creation of the TracerProvider.
- sampler Sampler
- idGenerator IDGenerator
- spanLimits SpanLimits
- resource *resource.Resource
-}
-
-var _ trace.TracerProvider = &TracerProvider{}
-
-// NewTracerProvider returns a new and configured TracerProvider.
-//
-// By default the returned TracerProvider is configured with:
-// - a ParentBased(AlwaysSample) Sampler
-// - a random number IDGenerator
-// - the resource.Default() Resource
-// - the default SpanLimits.
-//
-// The passed opts are used to override these default values and configure the
-// returned TracerProvider appropriately.
-func NewTracerProvider(opts ...TracerProviderOption) *TracerProvider {
- o := tracerProviderConfig{
- spanLimits: NewSpanLimits(),
- }
- o = applyTracerProviderEnvConfigs(o)
-
- for _, opt := range opts {
- o = opt.apply(o)
- }
-
- o = ensureValidTracerProviderConfig(o)
-
- tp := &TracerProvider{
- namedTracer: make(map[instrumentation.Scope]*tracer),
- sampler: o.sampler,
- idGenerator: o.idGenerator,
- spanLimits: o.spanLimits,
- resource: o.resource,
- }
- global.Info("TracerProvider created", "config", o)
-
- spss := make(spanProcessorStates, 0, len(o.processors))
- for _, sp := range o.processors {
- spss = append(spss, newSpanProcessorState(sp))
- }
- tp.spanProcessors.Store(&spss)
-
- return tp
-}
-
-// Tracer returns a Tracer with the given name and options. If a Tracer for
-// the given name and options does not exist it is created, otherwise the
-// existing Tracer is returned.
-//
-// If name is empty, DefaultTracerName is used instead.
-//
-// This method is safe to be called concurrently.
-func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer {
- // This check happens before the mutex is acquired to avoid deadlocking if Tracer() is called from within Shutdown().
- if p.isShutdown.Load() {
- return noop.NewTracerProvider().Tracer(name, opts...)
- }
- c := trace.NewTracerConfig(opts...)
- if name == "" {
- name = defaultTracerName
- }
- is := instrumentation.Scope{
- Name: name,
- Version: c.InstrumentationVersion(),
- SchemaURL: c.SchemaURL(),
- Attributes: c.InstrumentationAttributes(),
- }
-
- t, ok := func() (trace.Tracer, bool) {
- p.mu.Lock()
- defer p.mu.Unlock()
- // Must check the flag after acquiring the mutex to avoid returning a valid tracer if Shutdown() ran
- // after the first check above but before we acquired the mutex.
- if p.isShutdown.Load() {
- return noop.NewTracerProvider().Tracer(name, opts...), true
- }
- t, ok := p.namedTracer[is]
- if !ok {
- t = &tracer{
- provider: p,
- instrumentationScope: is,
- }
- p.namedTracer[is] = t
- }
- return t, ok
- }()
- if !ok {
- // This code is outside the mutex to not hold the lock while calling third party logging code:
- // - That code may do slow things like I/O, which would prolong the duration the lock is held,
- // slowing down all tracing consumers.
- // - Logging code may be instrumented with tracing and deadlock because it could try
- // acquiring the same non-reentrant mutex.
- global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL, "attributes", is.Attributes)
- }
- return t
-}
-
-// RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors.
-func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) {
- // This check prevents calls during a shutdown.
- if p.isShutdown.Load() {
- return
- }
- p.mu.Lock()
- defer p.mu.Unlock()
- // This check prevents calls after a shutdown.
- if p.isShutdown.Load() {
- return
- }
-
- current := p.getSpanProcessors()
- newSPS := make(spanProcessorStates, 0, len(current)+1)
- newSPS = append(newSPS, current...)
- newSPS = append(newSPS, newSpanProcessorState(sp))
- p.spanProcessors.Store(&newSPS)
-}
-
-// UnregisterSpanProcessor removes the given SpanProcessor from the list of SpanProcessors.
-func (p *TracerProvider) UnregisterSpanProcessor(sp SpanProcessor) {
- // This check prevents calls during a shutdown.
- if p.isShutdown.Load() {
- return
- }
- p.mu.Lock()
- defer p.mu.Unlock()
- // This check prevents calls after a shutdown.
- if p.isShutdown.Load() {
- return
- }
- old := p.getSpanProcessors()
- if len(old) == 0 {
- return
- }
- spss := make(spanProcessorStates, len(old))
- copy(spss, old)
-
- // stop the span processor if it is started and remove it from the list
- var stopOnce *spanProcessorState
- var idx int
- for i, sps := range spss {
- if sps.sp == sp {
- stopOnce = sps
- idx = i
- }
- }
- if stopOnce != nil {
- stopOnce.state.Do(func() {
- if err := sp.Shutdown(context.Background()); err != nil {
- otel.Handle(err)
- }
- })
- }
- if len(spss) > 1 {
- copy(spss[idx:], spss[idx+1:])
- }
- spss[len(spss)-1] = nil
- spss = spss[:len(spss)-1]
-
- p.spanProcessors.Store(&spss)
-}
-
-// ForceFlush immediately exports all spans that have not yet been exported for
-// all the registered span processors.
-func (p *TracerProvider) ForceFlush(ctx context.Context) error {
- spss := p.getSpanProcessors()
- if len(spss) == 0 {
- return nil
- }
-
- for _, sps := range spss {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- if err := sps.sp.ForceFlush(ctx); err != nil {
- return err
- }
- }
- return nil
-}
-
-// Shutdown shuts down TracerProvider. All registered span processors are shut down
-// in the order they were registered and any held computational resources are released.
-// After Shutdown is called, all methods are no-ops.
-func (p *TracerProvider) Shutdown(ctx context.Context) error {
- // This check prevents deadlocks in case of recursive shutdown.
- if p.isShutdown.Load() {
- return nil
- }
- p.mu.Lock()
- defer p.mu.Unlock()
- // This check prevents calls after a shutdown has already been done concurrently.
- if !p.isShutdown.CompareAndSwap(false, true) { // did toggle?
- return nil
- }
-
- var retErr error
- for _, sps := range p.getSpanProcessors() {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- var err error
- sps.state.Do(func() {
- err = sps.sp.Shutdown(ctx)
- })
- if err != nil {
- if retErr == nil {
- retErr = err
- } else {
- // Poor man's list of errors
- retErr = fmt.Errorf("%w; %w", retErr, err)
- }
- }
- }
- p.spanProcessors.Store(&spanProcessorStates{})
- return retErr
-}
-
-func (p *TracerProvider) getSpanProcessors() spanProcessorStates {
- return *(p.spanProcessors.Load())
-}
-
-// TracerProviderOption configures a TracerProvider.
-type TracerProviderOption interface {
- apply(tracerProviderConfig) tracerProviderConfig
-}
-
-type traceProviderOptionFunc func(tracerProviderConfig) tracerProviderConfig
-
-func (fn traceProviderOptionFunc) apply(cfg tracerProviderConfig) tracerProviderConfig {
- return fn(cfg)
-}
-
-// WithSyncer registers the exporter with the TracerProvider using a
-// SimpleSpanProcessor.
-//
-// This is not recommended for production use. The synchronous nature of the
-// SimpleSpanProcessor that will wrap the exporter make it good for testing,
-// debugging, or showing examples of other feature, but it will be slow and
-// have a high computation resource usage overhead. The WithBatcher option is
-// recommended for production use instead.
-func WithSyncer(e SpanExporter) TracerProviderOption {
- return WithSpanProcessor(NewSimpleSpanProcessor(e))
-}
-
-// WithBatcher registers the exporter with the TracerProvider using a
-// BatchSpanProcessor configured with the passed opts.
-func WithBatcher(e SpanExporter, opts ...BatchSpanProcessorOption) TracerProviderOption {
- return WithSpanProcessor(NewBatchSpanProcessor(e, opts...))
-}
-
-// WithSpanProcessor registers the SpanProcessor with a TracerProvider.
-func WithSpanProcessor(sp SpanProcessor) TracerProviderOption {
- return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
- cfg.processors = append(cfg.processors, sp)
- return cfg
- })
-}
-
-// WithResource returns a TracerProviderOption that will configure the
-// Resource r as a TracerProvider's Resource. The configured Resource is
-// referenced by all the Tracers the TracerProvider creates. It represents the
-// entity producing telemetry.
-//
-// If this option is not used, the TracerProvider will use the
-// resource.Default() Resource by default.
-func WithResource(r *resource.Resource) TracerProviderOption {
- return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
- var err error
- cfg.resource, err = resource.Merge(resource.Environment(), r)
- if err != nil {
- otel.Handle(err)
- }
- return cfg
- })
-}
-
-// WithIDGenerator returns a TracerProviderOption that will configure the
-// IDGenerator g as a TracerProvider's IDGenerator. The configured IDGenerator
-// is used by the Tracers the TracerProvider creates to generate new Span and
-// Trace IDs.
-//
-// If this option is not used, the TracerProvider will use a random number
-// IDGenerator by default.
-func WithIDGenerator(g IDGenerator) TracerProviderOption {
- return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
- if g != nil {
- cfg.idGenerator = g
- }
- return cfg
- })
-}
-
-// WithSampler returns a TracerProviderOption that will configure the Sampler
-// s as a TracerProvider's Sampler. The configured Sampler is used by the
-// Tracers the TracerProvider creates to make their sampling decisions for the
-// Spans they create.
-//
-// This option overrides the Sampler configured through the OTEL_TRACES_SAMPLER
-// and OTEL_TRACES_SAMPLER_ARG environment variables. If this option is not used
-// and the sampler is not configured through environment variables or the environment
-// contains invalid/unsupported configuration, the TracerProvider will use a
-// ParentBased(AlwaysSample) Sampler by default.
-func WithSampler(s Sampler) TracerProviderOption {
- return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
- if s != nil {
- cfg.sampler = s
- }
- return cfg
- })
-}
-
-// WithSpanLimits returns a TracerProviderOption that configures a
-// TracerProvider to use the SpanLimits sl. These SpanLimits bound any Span
-// created by a Tracer from the TracerProvider.
-//
-// If any field of sl is zero or negative it will be replaced with the default
-// value for that field.
-//
-// If this or WithRawSpanLimits are not provided, the TracerProvider will use
-// the limits defined by environment variables, or the defaults if unset.
-// Refer to the NewSpanLimits documentation for information about this
-// relationship.
-//
-// Deprecated: Use WithRawSpanLimits instead which allows setting unlimited
-// and zero limits. This option will be kept until the next major version
-// incremented release.
-func WithSpanLimits(sl SpanLimits) TracerProviderOption {
- if sl.AttributeValueLengthLimit <= 0 {
- sl.AttributeValueLengthLimit = DefaultAttributeValueLengthLimit
- }
- if sl.AttributeCountLimit <= 0 {
- sl.AttributeCountLimit = DefaultAttributeCountLimit
- }
- if sl.EventCountLimit <= 0 {
- sl.EventCountLimit = DefaultEventCountLimit
- }
- if sl.AttributePerEventCountLimit <= 0 {
- sl.AttributePerEventCountLimit = DefaultAttributePerEventCountLimit
- }
- if sl.LinkCountLimit <= 0 {
- sl.LinkCountLimit = DefaultLinkCountLimit
- }
- if sl.AttributePerLinkCountLimit <= 0 {
- sl.AttributePerLinkCountLimit = DefaultAttributePerLinkCountLimit
- }
- return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
- cfg.spanLimits = sl
- return cfg
- })
-}
-
-// WithRawSpanLimits returns a TracerProviderOption that configures a
-// TracerProvider to use these limits. These limits bound any Span created by
-// a Tracer from the TracerProvider.
-//
-// The limits will be used as-is. Zero or negative values will not be changed
-// to the default value like WithSpanLimits does. Setting a limit to zero will
-// effectively disable the related resource it limits and setting to a
-// negative value will mean that resource is unlimited. Consequentially, this
-// means that the zero-value SpanLimits will disable all span resources.
-// Because of this, limits should be constructed using NewSpanLimits and
-// updated accordingly.
-//
-// If this or WithSpanLimits are not provided, the TracerProvider will use the
-// limits defined by environment variables, or the defaults if unset. Refer to
-// the NewSpanLimits documentation for information about this relationship.
-func WithRawSpanLimits(limits SpanLimits) TracerProviderOption {
- return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
- cfg.spanLimits = limits
- return cfg
- })
-}
-
-func applyTracerProviderEnvConfigs(cfg tracerProviderConfig) tracerProviderConfig {
- for _, opt := range tracerProviderOptionsFromEnv() {
- cfg = opt.apply(cfg)
- }
-
- return cfg
-}
-
-func tracerProviderOptionsFromEnv() []TracerProviderOption {
- var opts []TracerProviderOption
-
- sampler, err := samplerFromEnv()
- if err != nil {
- otel.Handle(err)
- }
-
- if sampler != nil {
- opts = append(opts, WithSampler(sampler))
- }
-
- return opts
-}
-
-// ensureValidTracerProviderConfig ensures that given TracerProviderConfig is valid.
-func ensureValidTracerProviderConfig(cfg tracerProviderConfig) tracerProviderConfig {
- if cfg.sampler == nil {
- cfg.sampler = ParentBased(AlwaysSample())
- }
- if cfg.idGenerator == nil {
- cfg.idGenerator = defaultIDGenerator()
- }
- if cfg.resource == nil {
- cfg.resource = resource.Default()
- }
- return cfg
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go
deleted file mode 100644
index 9b672a1d7..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package trace // import "go.opentelemetry.io/otel/sdk/trace"
-
-import (
- "errors"
- "os"
- "strconv"
- "strings"
-)
-
-const (
- tracesSamplerKey = "OTEL_TRACES_SAMPLER"
- tracesSamplerArgKey = "OTEL_TRACES_SAMPLER_ARG"
-
- samplerAlwaysOn = "always_on"
- samplerAlwaysOff = "always_off"
- samplerTraceIDRatio = "traceidratio"
- samplerParentBasedAlwaysOn = "parentbased_always_on"
- samplerParsedBasedAlwaysOff = "parentbased_always_off"
- samplerParentBasedTraceIDRatio = "parentbased_traceidratio"
-)
-
-type errUnsupportedSampler string
-
-func (e errUnsupportedSampler) Error() string {
- return "unsupported sampler: " + string(e)
-}
-
-var (
- errNegativeTraceIDRatio = errors.New("invalid trace ID ratio: less than 0.0")
- errGreaterThanOneTraceIDRatio = errors.New("invalid trace ID ratio: greater than 1.0")
-)
-
-type samplerArgParseError struct {
- parseErr error
-}
-
-func (e samplerArgParseError) Error() string {
- return "parsing sampler argument: " + e.parseErr.Error()
-}
-
-func (e samplerArgParseError) Unwrap() error {
- return e.parseErr
-}
-
-func samplerFromEnv() (Sampler, error) {
- sampler, ok := os.LookupEnv(tracesSamplerKey)
- if !ok {
- return nil, nil
- }
-
- sampler = strings.ToLower(strings.TrimSpace(sampler))
- samplerArg, hasSamplerArg := os.LookupEnv(tracesSamplerArgKey)
- samplerArg = strings.TrimSpace(samplerArg)
-
- switch sampler {
- case samplerAlwaysOn:
- return AlwaysSample(), nil
- case samplerAlwaysOff:
- return NeverSample(), nil
- case samplerTraceIDRatio:
- if !hasSamplerArg {
- return TraceIDRatioBased(1.0), nil
- }
- return parseTraceIDRatio(samplerArg)
- case samplerParentBasedAlwaysOn:
- return ParentBased(AlwaysSample()), nil
- case samplerParsedBasedAlwaysOff:
- return ParentBased(NeverSample()), nil
- case samplerParentBasedTraceIDRatio:
- if !hasSamplerArg {
- return ParentBased(TraceIDRatioBased(1.0)), nil
- }
- ratio, err := parseTraceIDRatio(samplerArg)
- return ParentBased(ratio), err
- default:
- return nil, errUnsupportedSampler(sampler)
- }
-}
-
-func parseTraceIDRatio(arg string) (Sampler, error) {
- v, err := strconv.ParseFloat(arg, 64)
- if err != nil {
- return TraceIDRatioBased(1.0), samplerArgParseError{err}
- }
- if v < 0.0 {
- return TraceIDRatioBased(1.0), errNegativeTraceIDRatio
- }
- if v > 1.0 {
- return TraceIDRatioBased(1.0), errGreaterThanOneTraceIDRatio
- }
-
- return TraceIDRatioBased(v), nil
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
deleted file mode 100644
index ebb6df6c9..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
+++ /dev/null
@@ -1,282 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package trace // import "go.opentelemetry.io/otel/sdk/trace"
-
-import (
- "context"
- "encoding/binary"
- "fmt"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/trace"
-)
-
-// Sampler decides whether a trace should be sampled and exported.
-type Sampler interface {
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-
- // ShouldSample returns a SamplingResult based on a decision made from the
- // passed parameters.
- ShouldSample(parameters SamplingParameters) SamplingResult
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-
- // Description returns information describing the Sampler.
- Description() string
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-}
-
-// SamplingParameters contains the values passed to a Sampler.
-type SamplingParameters struct {
- ParentContext context.Context
- TraceID trace.TraceID
- Name string
- Kind trace.SpanKind
- Attributes []attribute.KeyValue
- Links []trace.Link
-}
-
-// SamplingDecision indicates whether a span is dropped, recorded and/or sampled.
-type SamplingDecision uint8
-
-// Valid sampling decisions.
-const (
- // Drop will not record the span and all attributes/events will be dropped.
- Drop SamplingDecision = iota
-
- // Record indicates the span's `IsRecording() == true`, but `Sampled` flag
- // *must not* be set.
- RecordOnly
-
- // RecordAndSample has span's `IsRecording() == true` and `Sampled` flag
- // *must* be set.
- RecordAndSample
-)
-
-// SamplingResult conveys a SamplingDecision, set of Attributes and a Tracestate.
-type SamplingResult struct {
- Decision SamplingDecision
- Attributes []attribute.KeyValue
- Tracestate trace.TraceState
-}
-
-type traceIDRatioSampler struct {
- traceIDUpperBound uint64
- description string
-}
-
-func (ts traceIDRatioSampler) ShouldSample(p SamplingParameters) SamplingResult {
- psc := trace.SpanContextFromContext(p.ParentContext)
- x := binary.BigEndian.Uint64(p.TraceID[8:16]) >> 1
- if x < ts.traceIDUpperBound {
- return SamplingResult{
- Decision: RecordAndSample,
- Tracestate: psc.TraceState(),
- }
- }
- return SamplingResult{
- Decision: Drop,
- Tracestate: psc.TraceState(),
- }
-}
-
-func (ts traceIDRatioSampler) Description() string {
- return ts.description
-}
-
-// TraceIDRatioBased samples a given fraction of traces. Fractions >= 1 will
-// always sample. Fractions < 0 are treated as zero. To respect the
-// parent trace's `SampledFlag`, the `TraceIDRatioBased` sampler should be used
-// as a delegate of a `Parent` sampler.
-//
-//nolint:revive // revive complains about stutter of `trace.TraceIDRatioBased`
-func TraceIDRatioBased(fraction float64) Sampler {
- if fraction >= 1 {
- return AlwaysSample()
- }
-
- if fraction <= 0 {
- fraction = 0
- }
-
- return &traceIDRatioSampler{
- traceIDUpperBound: uint64(fraction * (1 << 63)),
- description: fmt.Sprintf("TraceIDRatioBased{%g}", fraction),
- }
-}
-
-type alwaysOnSampler struct{}
-
-func (as alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult {
- return SamplingResult{
- Decision: RecordAndSample,
- Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(),
- }
-}
-
-func (as alwaysOnSampler) Description() string {
- return "AlwaysOnSampler"
-}
-
-// AlwaysSample returns a Sampler that samples every trace.
-// Be careful about using this sampler in a production application with
-// significant traffic: a new trace will be started and exported for every
-// request.
-func AlwaysSample() Sampler {
- return alwaysOnSampler{}
-}
-
-type alwaysOffSampler struct{}
-
-func (as alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult {
- return SamplingResult{
- Decision: Drop,
- Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(),
- }
-}
-
-func (as alwaysOffSampler) Description() string {
- return "AlwaysOffSampler"
-}
-
-// NeverSample returns a Sampler that samples no traces.
-func NeverSample() Sampler {
- return alwaysOffSampler{}
-}
-
-// ParentBased returns a sampler decorator which behaves differently,
-// based on the parent of the span. If the span has no parent,
-// the decorated sampler is used to make sampling decision. If the span has
-// a parent, depending on whether the parent is remote and whether it
-// is sampled, one of the following samplers will apply:
-// - remoteParentSampled(Sampler) (default: AlwaysOn)
-// - remoteParentNotSampled(Sampler) (default: AlwaysOff)
-// - localParentSampled(Sampler) (default: AlwaysOn)
-// - localParentNotSampled(Sampler) (default: AlwaysOff)
-func ParentBased(root Sampler, samplers ...ParentBasedSamplerOption) Sampler {
- return parentBased{
- root: root,
- config: configureSamplersForParentBased(samplers),
- }
-}
-
-type parentBased struct {
- root Sampler
- config samplerConfig
-}
-
-func configureSamplersForParentBased(samplers []ParentBasedSamplerOption) samplerConfig {
- c := samplerConfig{
- remoteParentSampled: AlwaysSample(),
- remoteParentNotSampled: NeverSample(),
- localParentSampled: AlwaysSample(),
- localParentNotSampled: NeverSample(),
- }
-
- for _, so := range samplers {
- c = so.apply(c)
- }
-
- return c
-}
-
-// samplerConfig is a group of options for parentBased sampler.
-type samplerConfig struct {
- remoteParentSampled, remoteParentNotSampled Sampler
- localParentSampled, localParentNotSampled Sampler
-}
-
-// ParentBasedSamplerOption configures the sampler for a particular sampling case.
-type ParentBasedSamplerOption interface {
- apply(samplerConfig) samplerConfig
-}
-
-// WithRemoteParentSampled sets the sampler for the case of sampled remote parent.
-func WithRemoteParentSampled(s Sampler) ParentBasedSamplerOption {
- return remoteParentSampledOption{s}
-}
-
-type remoteParentSampledOption struct {
- s Sampler
-}
-
-func (o remoteParentSampledOption) apply(config samplerConfig) samplerConfig {
- config.remoteParentSampled = o.s
- return config
-}
-
-// WithRemoteParentNotSampled sets the sampler for the case of remote parent
-// which is not sampled.
-func WithRemoteParentNotSampled(s Sampler) ParentBasedSamplerOption {
- return remoteParentNotSampledOption{s}
-}
-
-type remoteParentNotSampledOption struct {
- s Sampler
-}
-
-func (o remoteParentNotSampledOption) apply(config samplerConfig) samplerConfig {
- config.remoteParentNotSampled = o.s
- return config
-}
-
-// WithLocalParentSampled sets the sampler for the case of sampled local parent.
-func WithLocalParentSampled(s Sampler) ParentBasedSamplerOption {
- return localParentSampledOption{s}
-}
-
-type localParentSampledOption struct {
- s Sampler
-}
-
-func (o localParentSampledOption) apply(config samplerConfig) samplerConfig {
- config.localParentSampled = o.s
- return config
-}
-
-// WithLocalParentNotSampled sets the sampler for the case of local parent
-// which is not sampled.
-func WithLocalParentNotSampled(s Sampler) ParentBasedSamplerOption {
- return localParentNotSampledOption{s}
-}
-
-type localParentNotSampledOption struct {
- s Sampler
-}
-
-func (o localParentNotSampledOption) apply(config samplerConfig) samplerConfig {
- config.localParentNotSampled = o.s
- return config
-}
-
-func (pb parentBased) ShouldSample(p SamplingParameters) SamplingResult {
- psc := trace.SpanContextFromContext(p.ParentContext)
- if psc.IsValid() {
- if psc.IsRemote() {
- if psc.IsSampled() {
- return pb.config.remoteParentSampled.ShouldSample(p)
- }
- return pb.config.remoteParentNotSampled.ShouldSample(p)
- }
-
- if psc.IsSampled() {
- return pb.config.localParentSampled.ShouldSample(p)
- }
- return pb.config.localParentNotSampled.ShouldSample(p)
- }
- return pb.root.ShouldSample(p)
-}
-
-func (pb parentBased) Description() string {
- return fmt.Sprintf("ParentBased{root:%s,remoteParentSampled:%s,"+
- "remoteParentNotSampled:%s,localParentSampled:%s,localParentNotSampled:%s}",
- pb.root.Description(),
- pb.config.remoteParentSampled.Description(),
- pb.config.remoteParentNotSampled.Description(),
- pb.config.localParentSampled.Description(),
- pb.config.localParentNotSampled.Description(),
- )
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go
deleted file mode 100644
index 554111bb4..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package trace // import "go.opentelemetry.io/otel/sdk/trace"
-
-import (
- "context"
- "sync"
-
- "go.opentelemetry.io/otel"
- "go.opentelemetry.io/otel/internal/global"
-)
-
-// simpleSpanProcessor is a SpanProcessor that synchronously sends all
-// completed Spans to a trace.Exporter immediately.
-type simpleSpanProcessor struct {
- exporterMu sync.Mutex
- exporter SpanExporter
- stopOnce sync.Once
-}
-
-var _ SpanProcessor = (*simpleSpanProcessor)(nil)
-
-// NewSimpleSpanProcessor returns a new SpanProcessor that will synchronously
-// send completed spans to the exporter immediately.
-//
-// This SpanProcessor is not recommended for production use. The synchronous
-// nature of this SpanProcessor makes it good for testing, debugging, or showing
-// examples of other features, but it will be slow and have a high computation
-// resource usage overhead. The BatchSpanProcessor is recommended for production
-// use instead.
-func NewSimpleSpanProcessor(exporter SpanExporter) SpanProcessor {
- ssp := &simpleSpanProcessor{
- exporter: exporter,
- }
- global.Warn("SimpleSpanProcessor is not recommended for production use, consider using BatchSpanProcessor instead.")
-
- return ssp
-}
-
-// OnStart does nothing.
-func (ssp *simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {}
-
-// OnEnd immediately exports a ReadOnlySpan.
-func (ssp *simpleSpanProcessor) OnEnd(s ReadOnlySpan) {
- ssp.exporterMu.Lock()
- defer ssp.exporterMu.Unlock()
-
- if ssp.exporter != nil && s.SpanContext().TraceFlags().IsSampled() {
- if err := ssp.exporter.ExportSpans(context.Background(), []ReadOnlySpan{s}); err != nil {
- otel.Handle(err)
- }
- }
-}
-
-// Shutdown shuts down the exporter this SimpleSpanProcessor exports to.
-func (ssp *simpleSpanProcessor) Shutdown(ctx context.Context) error {
- var err error
- ssp.stopOnce.Do(func() {
- stopFunc := func(exp SpanExporter) (<-chan error, func()) {
- done := make(chan error)
- return done, func() { done <- exp.Shutdown(ctx) }
- }
-
- // The exporter field of the simpleSpanProcessor needs to be zeroed to
- // signal it is shut down, meaning all subsequent calls to OnEnd will
- // be gracefully ignored. This needs to be done synchronously to avoid
- // any race condition.
- //
- // A closure is used to keep reference to the exporter and then the
- // field is zeroed. This ensures the simpleSpanProcessor is shut down
- // before the exporter. This order is important as it avoids a potential
- // deadlock. If the exporter shut down operation generates a span, that
- // span would need to be exported. Meaning, OnEnd would be called and
- // try acquiring the lock that is held here.
- ssp.exporterMu.Lock()
- done, shutdown := stopFunc(ssp.exporter)
- ssp.exporter = nil
- ssp.exporterMu.Unlock()
-
- go shutdown()
-
- // Wait for the exporter to shut down or the deadline to expire.
- select {
- case err = <-done:
- case <-ctx.Done():
- // It is possible for the exporter to have immediately shut down and
- // the context to be done simultaneously. In that case this outer
- // select statement will randomly choose a case. This will result in
- // a different returned error for similar scenarios. Instead, double
- // check if the exporter shut down at the same time and return that
- // error if so. This will ensure consistency as well as ensure
- // the caller knows the exporter shut down successfully (they can
- // already determine if the deadline is expired given they passed
- // the context).
- select {
- case err = <-done:
- default:
- err = ctx.Err()
- }
- }
- })
- return err
-}
-
-// ForceFlush does nothing as there is no data to flush.
-func (ssp *simpleSpanProcessor) ForceFlush(context.Context) error {
- return nil
-}
-
-// MarshalLog is the marshaling function used by the logging system to represent
-// this Span Processor.
-func (ssp *simpleSpanProcessor) MarshalLog() interface{} {
- return struct {
- Type string
- Exporter SpanExporter
- }{
- Type: "SimpleSpanProcessor",
- Exporter: ssp.exporter,
- }
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go
deleted file mode 100644
index d511d0f27..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package trace // import "go.opentelemetry.io/otel/sdk/trace"
-
-import (
- "time"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/sdk/instrumentation"
- "go.opentelemetry.io/otel/sdk/resource"
- "go.opentelemetry.io/otel/trace"
-)
-
-// snapshot is an record of a spans state at a particular checkpointed time.
-// It is used as a read-only representation of that state.
-type snapshot struct {
- name string
- spanContext trace.SpanContext
- parent trace.SpanContext
- spanKind trace.SpanKind
- startTime time.Time
- endTime time.Time
- attributes []attribute.KeyValue
- events []Event
- links []Link
- status Status
- childSpanCount int
- droppedAttributeCount int
- droppedEventCount int
- droppedLinkCount int
- resource *resource.Resource
- instrumentationScope instrumentation.Scope
-}
-
-var _ ReadOnlySpan = snapshot{}
-
-func (s snapshot) private() {}
-
-// Name returns the name of the span.
-func (s snapshot) Name() string {
- return s.name
-}
-
-// SpanContext returns the unique SpanContext that identifies the span.
-func (s snapshot) SpanContext() trace.SpanContext {
- return s.spanContext
-}
-
-// Parent returns the unique SpanContext that identifies the parent of the
-// span if one exists. If the span has no parent the returned SpanContext
-// will be invalid.
-func (s snapshot) Parent() trace.SpanContext {
- return s.parent
-}
-
-// SpanKind returns the role the span plays in a Trace.
-func (s snapshot) SpanKind() trace.SpanKind {
- return s.spanKind
-}
-
-// StartTime returns the time the span started recording.
-func (s snapshot) StartTime() time.Time {
- return s.startTime
-}
-
-// EndTime returns the time the span stopped recording. It will be zero if
-// the span has not ended.
-func (s snapshot) EndTime() time.Time {
- return s.endTime
-}
-
-// Attributes returns the defining attributes of the span.
-func (s snapshot) Attributes() []attribute.KeyValue {
- return s.attributes
-}
-
-// Links returns all the links the span has to other spans.
-func (s snapshot) Links() []Link {
- return s.links
-}
-
-// Events returns all the events that occurred within in the spans
-// lifetime.
-func (s snapshot) Events() []Event {
- return s.events
-}
-
-// Status returns the spans status.
-func (s snapshot) Status() Status {
- return s.status
-}
-
-// InstrumentationScope returns information about the instrumentation
-// scope that created the span.
-func (s snapshot) InstrumentationScope() instrumentation.Scope {
- return s.instrumentationScope
-}
-
-// InstrumentationLibrary returns information about the instrumentation
-// library that created the span.
-func (s snapshot) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility
- return s.instrumentationScope
-}
-
-// Resource returns information about the entity that produced the span.
-func (s snapshot) Resource() *resource.Resource {
- return s.resource
-}
-
-// DroppedAttributes returns the number of attributes dropped by the span
-// due to limits being reached.
-func (s snapshot) DroppedAttributes() int {
- return s.droppedAttributeCount
-}
-
-// DroppedLinks returns the number of links dropped by the span due to limits
-// being reached.
-func (s snapshot) DroppedLinks() int {
- return s.droppedLinkCount
-}
-
-// DroppedEvents returns the number of events dropped by the span due to
-// limits being reached.
-func (s snapshot) DroppedEvents() int {
- return s.droppedEventCount
-}
-
-// ChildSpanCount returns the count of spans that consider the span a
-// direct parent.
-func (s snapshot) ChildSpanCount() int {
- return s.childSpanCount
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
deleted file mode 100644
index 8f4fc3850..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
+++ /dev/null
@@ -1,937 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package trace // import "go.opentelemetry.io/otel/sdk/trace"
-
-import (
- "context"
- "fmt"
- "reflect"
- "runtime"
- rt "runtime/trace"
- "slices"
- "strings"
- "sync"
- "time"
- "unicode/utf8"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/codes"
- "go.opentelemetry.io/otel/internal/global"
- "go.opentelemetry.io/otel/sdk/instrumentation"
- "go.opentelemetry.io/otel/sdk/resource"
- semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
- "go.opentelemetry.io/otel/trace"
- "go.opentelemetry.io/otel/trace/embedded"
-)
-
-// ReadOnlySpan allows reading information from the data structure underlying a
-// trace.Span. It is used in places where reading information from a span is
-// necessary but changing the span isn't necessary or allowed.
-//
-// Warning: methods may be added to this interface in minor releases.
-type ReadOnlySpan interface {
- // Name returns the name of the span.
- Name() string
- // SpanContext returns the unique SpanContext that identifies the span.
- SpanContext() trace.SpanContext
- // Parent returns the unique SpanContext that identifies the parent of the
- // span if one exists. If the span has no parent the returned SpanContext
- // will be invalid.
- Parent() trace.SpanContext
- // SpanKind returns the role the span plays in a Trace.
- SpanKind() trace.SpanKind
- // StartTime returns the time the span started recording.
- StartTime() time.Time
- // EndTime returns the time the span stopped recording. It will be zero if
- // the span has not ended.
- EndTime() time.Time
- // Attributes returns the defining attributes of the span.
- // The order of the returned attributes is not guaranteed to be stable across invocations.
- Attributes() []attribute.KeyValue
- // Links returns all the links the span has to other spans.
- Links() []Link
- // Events returns all the events that occurred within in the spans
- // lifetime.
- Events() []Event
- // Status returns the spans status.
- Status() Status
- // InstrumentationScope returns information about the instrumentation
- // scope that created the span.
- InstrumentationScope() instrumentation.Scope
- // InstrumentationLibrary returns information about the instrumentation
- // library that created the span.
- // Deprecated: please use InstrumentationScope instead.
- InstrumentationLibrary() instrumentation.Library //nolint:staticcheck // This method needs to be define for backwards compatibility
- // Resource returns information about the entity that produced the span.
- Resource() *resource.Resource
- // DroppedAttributes returns the number of attributes dropped by the span
- // due to limits being reached.
- DroppedAttributes() int
- // DroppedLinks returns the number of links dropped by the span due to
- // limits being reached.
- DroppedLinks() int
- // DroppedEvents returns the number of events dropped by the span due to
- // limits being reached.
- DroppedEvents() int
- // ChildSpanCount returns the count of spans that consider the span a
- // direct parent.
- ChildSpanCount() int
-
- // A private method to prevent users implementing the
- // interface and so future additions to it will not
- // violate compatibility.
- private()
-}
-
-// ReadWriteSpan exposes the same methods as trace.Span and in addition allows
-// reading information from the underlying data structure.
-// This interface exposes the union of the methods of trace.Span (which is a
-// "write-only" span) and ReadOnlySpan. New methods for writing or reading span
-// information should be added under trace.Span or ReadOnlySpan, respectively.
-//
-// Warning: methods may be added to this interface in minor releases.
-type ReadWriteSpan interface {
- trace.Span
- ReadOnlySpan
-}
-
-// recordingSpan is an implementation of the OpenTelemetry Span API
-// representing the individual component of a trace that is sampled.
-type recordingSpan struct {
- embedded.Span
-
- // mu protects the contents of this span.
- mu sync.Mutex
-
- // parent holds the parent span of this span as a trace.SpanContext.
- parent trace.SpanContext
-
- // spanKind represents the kind of this span as a trace.SpanKind.
- spanKind trace.SpanKind
-
- // name is the name of this span.
- name string
-
- // startTime is the time at which this span was started.
- startTime time.Time
-
- // endTime is the time at which this span was ended. It contains the zero
- // value of time.Time until the span is ended.
- endTime time.Time
-
- // status is the status of this span.
- status Status
-
- // childSpanCount holds the number of child spans created for this span.
- childSpanCount int
-
- // spanContext holds the SpanContext of this span.
- spanContext trace.SpanContext
-
- // attributes is a collection of user provided key/values. The collection
- // is constrained by a configurable maximum held by the parent
- // TracerProvider. When additional attributes are added after this maximum
- // is reached these attributes the user is attempting to add are dropped.
- // This dropped number of attributes is tracked and reported in the
- // ReadOnlySpan exported when the span ends.
- attributes []attribute.KeyValue
- droppedAttributes int
- logDropAttrsOnce sync.Once
-
- // events are stored in FIFO queue capped by configured limit.
- events evictedQueue[Event]
-
- // links are stored in FIFO queue capped by configured limit.
- links evictedQueue[Link]
-
- // executionTracerTaskEnd ends the execution tracer span.
- executionTracerTaskEnd func()
-
- // tracer is the SDK tracer that created this span.
- tracer *tracer
-}
-
-var (
- _ ReadWriteSpan = (*recordingSpan)(nil)
- _ runtimeTracer = (*recordingSpan)(nil)
-)
-
-// SpanContext returns the SpanContext of this span.
-func (s *recordingSpan) SpanContext() trace.SpanContext {
- if s == nil {
- return trace.SpanContext{}
- }
- return s.spanContext
-}
-
-// IsRecording returns if this span is being recorded. If this span has ended
-// this will return false.
-func (s *recordingSpan) IsRecording() bool {
- if s == nil {
- return false
- }
- s.mu.Lock()
- defer s.mu.Unlock()
-
- return s.isRecording()
-}
-
-// isRecording returns if this span is being recorded. If this span has ended
-// this will return false.
-//
-// This method assumes s.mu.Lock is held by the caller.
-func (s *recordingSpan) isRecording() bool {
- if s == nil {
- return false
- }
- return s.endTime.IsZero()
-}
-
-// SetStatus sets the status of the Span in the form of a code and a
-// description, overriding previous values set. The description is only
-// included in the set status when the code is for an error. If this span is
-// not being recorded than this method does nothing.
-func (s *recordingSpan) SetStatus(code codes.Code, description string) {
- if s == nil {
- return
- }
-
- s.mu.Lock()
- defer s.mu.Unlock()
- if !s.isRecording() {
- return
- }
- if s.status.Code > code {
- return
- }
-
- status := Status{Code: code}
- if code == codes.Error {
- status.Description = description
- }
-
- s.status = status
-}
-
-// SetAttributes sets attributes of this span.
-//
-// If a key from attributes already exists the value associated with that key
-// will be overwritten with the value contained in attributes.
-//
-// If this span is not being recorded than this method does nothing.
-//
-// If adding attributes to the span would exceed the maximum amount of
-// attributes the span is configured to have, the last added attributes will
-// be dropped.
-func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) {
- if s == nil || len(attributes) == 0 {
- return
- }
-
- s.mu.Lock()
- defer s.mu.Unlock()
- if !s.isRecording() {
- return
- }
-
- limit := s.tracer.provider.spanLimits.AttributeCountLimit
- if limit == 0 {
- // No attributes allowed.
- s.addDroppedAttr(len(attributes))
- return
- }
-
- // If adding these attributes could exceed the capacity of s perform a
- // de-duplication and truncation while adding to avoid over allocation.
- if limit > 0 && len(s.attributes)+len(attributes) > limit {
- s.addOverCapAttrs(limit, attributes)
- return
- }
-
- // Otherwise, add without deduplication. When attributes are read they
- // will be deduplicated, optimizing the operation.
- s.attributes = slices.Grow(s.attributes, len(attributes))
- for _, a := range attributes {
- if !a.Valid() {
- // Drop all invalid attributes.
- s.addDroppedAttr(1)
- continue
- }
- a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a)
- s.attributes = append(s.attributes, a)
- }
-}
-
-// Declared as a var so tests can override.
-var logDropAttrs = func() {
- global.Warn("limit reached: dropping trace Span attributes")
-}
-
-// addDroppedAttr adds incr to the count of dropped attributes.
-//
-// The first, and only the first, time this method is called a warning will be
-// logged.
-//
-// This method assumes s.mu.Lock is held by the caller.
-func (s *recordingSpan) addDroppedAttr(incr int) {
- s.droppedAttributes += incr
- s.logDropAttrsOnce.Do(logDropAttrs)
-}
-
-// addOverCapAttrs adds the attributes attrs to the span s while
-// de-duplicating the attributes of s and attrs and dropping attributes that
-// exceed the limit.
-//
-// This method assumes s.mu.Lock is held by the caller.
-//
-// This method should only be called when there is a possibility that adding
-// attrs to s will exceed the limit. Otherwise, attrs should be added to s
-// without checking for duplicates and all retrieval methods of the attributes
-// for s will de-duplicate as needed.
-//
-// This method assumes limit is a value > 0. The argument should be validated
-// by the caller.
-func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) {
- // In order to not allocate more capacity to s.attributes than needed,
- // prune and truncate this addition of attributes while adding.
-
- // Do not set a capacity when creating this map. Benchmark testing has
- // showed this to only add unused memory allocations in general use.
- exists := make(map[attribute.Key]int, len(s.attributes))
- s.dedupeAttrsFromRecord(exists)
-
- // Now that s.attributes is deduplicated, adding unique attributes up to
- // the capacity of s will not over allocate s.attributes.
-
- // max size = limit
- maxCap := min(len(attrs)+len(s.attributes), limit)
- if cap(s.attributes) < maxCap {
- s.attributes = slices.Grow(s.attributes, maxCap-cap(s.attributes))
- }
- for _, a := range attrs {
- if !a.Valid() {
- // Drop all invalid attributes.
- s.addDroppedAttr(1)
- continue
- }
-
- if idx, ok := exists[a.Key]; ok {
- // Perform all updates before dropping, even when at capacity.
- a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a)
- s.attributes[idx] = a
- continue
- }
-
- if len(s.attributes) >= limit {
- // Do not just drop all of the remaining attributes, make sure
- // updates are checked and performed.
- s.addDroppedAttr(1)
- } else {
- a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a)
- s.attributes = append(s.attributes, a)
- exists[a.Key] = len(s.attributes) - 1
- }
- }
-}
-
-// truncateAttr returns a truncated version of attr. Only string and string
-// slice attribute values are truncated. String values are truncated to at
-// most a length of limit. Each string slice value is truncated in this fashion
-// (the slice length itself is unaffected).
-//
-// No truncation is performed for a negative limit.
-func truncateAttr(limit int, attr attribute.KeyValue) attribute.KeyValue {
- if limit < 0 {
- return attr
- }
- switch attr.Value.Type() {
- case attribute.STRING:
- v := attr.Value.AsString()
- return attr.Key.String(truncate(limit, v))
- case attribute.STRINGSLICE:
- v := attr.Value.AsStringSlice()
- for i := range v {
- v[i] = truncate(limit, v[i])
- }
- return attr.Key.StringSlice(v)
- }
- return attr
-}
-
-// truncate returns a truncated version of s such that it contains less than
-// the limit number of characters. Truncation is applied by returning the limit
-// number of valid characters contained in s.
-//
-// If limit is negative, it returns the original string.
-//
-// UTF-8 is supported. When truncating, all invalid characters are dropped
-// before applying truncation.
-//
-// If s already contains less than the limit number of bytes, it is returned
-// unchanged. No invalid characters are removed.
-func truncate(limit int, s string) string {
- // This prioritize performance in the following order based on the most
- // common expected use-cases.
- //
- // - Short values less than the default limit (128).
- // - Strings with valid encodings that exceed the limit.
- // - No limit.
- // - Strings with invalid encodings that exceed the limit.
- if limit < 0 || len(s) <= limit {
- return s
- }
-
- // Optimistically, assume all valid UTF-8.
- var b strings.Builder
- count := 0
- for i, c := range s {
- if c != utf8.RuneError {
- count++
- if count > limit {
- return s[:i]
- }
- continue
- }
-
- _, size := utf8.DecodeRuneInString(s[i:])
- if size == 1 {
- // Invalid encoding.
- b.Grow(len(s) - 1)
- _, _ = b.WriteString(s[:i])
- s = s[i:]
- break
- }
- }
-
- // Fast-path, no invalid input.
- if b.Cap() == 0 {
- return s
- }
-
- // Truncate while validating UTF-8.
- for i := 0; i < len(s) && count < limit; {
- c := s[i]
- if c < utf8.RuneSelf {
- // Optimization for single byte runes (common case).
- _ = b.WriteByte(c)
- i++
- count++
- continue
- }
-
- _, size := utf8.DecodeRuneInString(s[i:])
- if size == 1 {
- // We checked for all 1-byte runes above, this is a RuneError.
- i++
- continue
- }
-
- _, _ = b.WriteString(s[i : i+size])
- i += size
- count++
- }
-
- return b.String()
-}
-
-// End ends the span. This method does nothing if the span is already ended or
-// is not being recorded.
-//
-// The only SpanEndOption currently supported are [trace.WithTimestamp], and
-// [trace.WithStackTrace].
-//
-// If this method is called while panicking an error event is added to the
-// Span before ending it and the panic is continued.
-func (s *recordingSpan) End(options ...trace.SpanEndOption) {
- // Do not start by checking if the span is being recorded which requires
- // acquiring a lock. Make a minimal check that the span is not nil.
- if s == nil {
- return
- }
-
- // Store the end time as soon as possible to avoid artificially increasing
- // the span's duration in case some operation below takes a while.
- et := monotonicEndTime(s.startTime)
-
- // Lock the span now that we have an end time and see if we need to do any more processing.
- s.mu.Lock()
- if !s.isRecording() {
- s.mu.Unlock()
- return
- }
-
- config := trace.NewSpanEndConfig(options...)
- if recovered := recover(); recovered != nil {
- // Record but don't stop the panic.
- defer panic(recovered)
- opts := []trace.EventOption{
- trace.WithAttributes(
- semconv.ExceptionType(typeStr(recovered)),
- semconv.ExceptionMessage(fmt.Sprint(recovered)),
- ),
- }
-
- if config.StackTrace() {
- opts = append(opts, trace.WithAttributes(
- semconv.ExceptionStacktrace(recordStackTrace()),
- ))
- }
-
- s.addEvent(semconv.ExceptionEventName, opts...)
- }
-
- if s.executionTracerTaskEnd != nil {
- s.mu.Unlock()
- s.executionTracerTaskEnd()
- s.mu.Lock()
- }
-
- // Setting endTime to non-zero marks the span as ended and not recording.
- if config.Timestamp().IsZero() {
- s.endTime = et
- } else {
- s.endTime = config.Timestamp()
- }
- s.mu.Unlock()
-
- sps := s.tracer.provider.getSpanProcessors()
- if len(sps) == 0 {
- return
- }
- snap := s.snapshot()
- for _, sp := range sps {
- sp.sp.OnEnd(snap)
- }
-}
-
-// monotonicEndTime returns the end time at present but offset from start,
-// monotonically.
-//
-// The monotonic clock is used in subtractions hence the duration since start
-// added back to start gives end as a monotonic time. See
-// https://golang.org/pkg/time/#hdr-Monotonic_Clocks
-func monotonicEndTime(start time.Time) time.Time {
- return start.Add(time.Since(start))
-}
-
-// RecordError will record err as a span event for this span. An additional call to
-// SetStatus is required if the Status of the Span should be set to Error, this method
-// does not change the Span status. If this span is not being recorded or err is nil
-// than this method does nothing.
-func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) {
- if s == nil || err == nil {
- return
- }
-
- s.mu.Lock()
- defer s.mu.Unlock()
- if !s.isRecording() {
- return
- }
-
- opts = append(opts, trace.WithAttributes(
- semconv.ExceptionType(typeStr(err)),
- semconv.ExceptionMessage(err.Error()),
- ))
-
- c := trace.NewEventConfig(opts...)
- if c.StackTrace() {
- opts = append(opts, trace.WithAttributes(
- semconv.ExceptionStacktrace(recordStackTrace()),
- ))
- }
-
- s.addEvent(semconv.ExceptionEventName, opts...)
-}
-
-func typeStr(i interface{}) string {
- t := reflect.TypeOf(i)
- if t.PkgPath() == "" && t.Name() == "" {
- // Likely a builtin type.
- return t.String()
- }
- return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name())
-}
-
-func recordStackTrace() string {
- stackTrace := make([]byte, 2048)
- n := runtime.Stack(stackTrace, false)
-
- return string(stackTrace[0:n])
-}
-
-// AddEvent adds an event with the provided name and options. If this span is
-// not being recorded then this method does nothing.
-func (s *recordingSpan) AddEvent(name string, o ...trace.EventOption) {
- if s == nil {
- return
- }
-
- s.mu.Lock()
- defer s.mu.Unlock()
- if !s.isRecording() {
- return
- }
- s.addEvent(name, o...)
-}
-
-// addEvent adds an event with the provided name and options.
-//
-// This method assumes s.mu.Lock is held by the caller.
-func (s *recordingSpan) addEvent(name string, o ...trace.EventOption) {
- c := trace.NewEventConfig(o...)
- e := Event{Name: name, Attributes: c.Attributes(), Time: c.Timestamp()}
-
- // Discard attributes over limit.
- limit := s.tracer.provider.spanLimits.AttributePerEventCountLimit
- if limit == 0 {
- // Drop all attributes.
- e.DroppedAttributeCount = len(e.Attributes)
- e.Attributes = nil
- } else if limit > 0 && len(e.Attributes) > limit {
- // Drop over capacity.
- e.DroppedAttributeCount = len(e.Attributes) - limit
- e.Attributes = e.Attributes[:limit]
- }
-
- s.events.add(e)
-}
-
-// SetName sets the name of this span. If this span is not being recorded than
-// this method does nothing.
-func (s *recordingSpan) SetName(name string) {
- if s == nil {
- return
- }
-
- s.mu.Lock()
- defer s.mu.Unlock()
- if !s.isRecording() {
- return
- }
- s.name = name
-}
-
-// Name returns the name of this span.
-func (s *recordingSpan) Name() string {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.name
-}
-
-// Name returns the SpanContext of this span's parent span.
-func (s *recordingSpan) Parent() trace.SpanContext {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.parent
-}
-
-// SpanKind returns the SpanKind of this span.
-func (s *recordingSpan) SpanKind() trace.SpanKind {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.spanKind
-}
-
-// StartTime returns the time this span started.
-func (s *recordingSpan) StartTime() time.Time {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.startTime
-}
-
-// EndTime returns the time this span ended. For spans that have not yet
-// ended, the returned value will be the zero value of time.Time.
-func (s *recordingSpan) EndTime() time.Time {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.endTime
-}
-
-// Attributes returns the attributes of this span.
-//
-// The order of the returned attributes is not guaranteed to be stable.
-func (s *recordingSpan) Attributes() []attribute.KeyValue {
- s.mu.Lock()
- defer s.mu.Unlock()
- s.dedupeAttrs()
- return s.attributes
-}
-
-// dedupeAttrs deduplicates the attributes of s to fit capacity.
-//
-// This method assumes s.mu.Lock is held by the caller.
-func (s *recordingSpan) dedupeAttrs() {
- // Do not set a capacity when creating this map. Benchmark testing has
- // showed this to only add unused memory allocations in general use.
- exists := make(map[attribute.Key]int, len(s.attributes))
- s.dedupeAttrsFromRecord(exists)
-}
-
-// dedupeAttrsFromRecord deduplicates the attributes of s to fit capacity
-// using record as the record of unique attribute keys to their index.
-//
-// This method assumes s.mu.Lock is held by the caller.
-func (s *recordingSpan) dedupeAttrsFromRecord(record map[attribute.Key]int) {
- // Use the fact that slices share the same backing array.
- unique := s.attributes[:0]
- for _, a := range s.attributes {
- if idx, ok := record[a.Key]; ok {
- unique[idx] = a
- } else {
- unique = append(unique, a)
- record[a.Key] = len(unique) - 1
- }
- }
- clear(s.attributes[len(unique):]) // Erase unneeded elements to let GC collect objects.
- s.attributes = unique
-}
-
-// Links returns the links of this span.
-func (s *recordingSpan) Links() []Link {
- s.mu.Lock()
- defer s.mu.Unlock()
- if len(s.links.queue) == 0 {
- return []Link{}
- }
- return s.links.copy()
-}
-
-// Events returns the events of this span.
-func (s *recordingSpan) Events() []Event {
- s.mu.Lock()
- defer s.mu.Unlock()
- if len(s.events.queue) == 0 {
- return []Event{}
- }
- return s.events.copy()
-}
-
-// Status returns the status of this span.
-func (s *recordingSpan) Status() Status {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.status
-}
-
-// InstrumentationScope returns the instrumentation.Scope associated with
-// the Tracer that created this span.
-func (s *recordingSpan) InstrumentationScope() instrumentation.Scope {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.tracer.instrumentationScope
-}
-
-// InstrumentationLibrary returns the instrumentation.Library associated with
-// the Tracer that created this span.
-func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.tracer.instrumentationScope
-}
-
-// Resource returns the Resource associated with the Tracer that created this
-// span.
-func (s *recordingSpan) Resource() *resource.Resource {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.tracer.provider.resource
-}
-
-func (s *recordingSpan) AddLink(link trace.Link) {
- if s == nil {
- return
- }
- if !link.SpanContext.IsValid() && len(link.Attributes) == 0 &&
- link.SpanContext.TraceState().Len() == 0 {
- return
- }
-
- s.mu.Lock()
- defer s.mu.Unlock()
- if !s.isRecording() {
- return
- }
-
- l := Link{SpanContext: link.SpanContext, Attributes: link.Attributes}
-
- // Discard attributes over limit.
- limit := s.tracer.provider.spanLimits.AttributePerLinkCountLimit
- if limit == 0 {
- // Drop all attributes.
- l.DroppedAttributeCount = len(l.Attributes)
- l.Attributes = nil
- } else if limit > 0 && len(l.Attributes) > limit {
- l.DroppedAttributeCount = len(l.Attributes) - limit
- l.Attributes = l.Attributes[:limit]
- }
-
- s.links.add(l)
-}
-
-// DroppedAttributes returns the number of attributes dropped by the span
-// due to limits being reached.
-func (s *recordingSpan) DroppedAttributes() int {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.droppedAttributes
-}
-
-// DroppedLinks returns the number of links dropped by the span due to limits
-// being reached.
-func (s *recordingSpan) DroppedLinks() int {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.links.droppedCount
-}
-
-// DroppedEvents returns the number of events dropped by the span due to
-// limits being reached.
-func (s *recordingSpan) DroppedEvents() int {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.events.droppedCount
-}
-
-// ChildSpanCount returns the count of spans that consider the span a
-// direct parent.
-func (s *recordingSpan) ChildSpanCount() int {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.childSpanCount
-}
-
-// TracerProvider returns a trace.TracerProvider that can be used to generate
-// additional Spans on the same telemetry pipeline as the current Span.
-func (s *recordingSpan) TracerProvider() trace.TracerProvider {
- return s.tracer.provider
-}
-
-// snapshot creates a read-only copy of the current state of the span.
-func (s *recordingSpan) snapshot() ReadOnlySpan {
- var sd snapshot
- s.mu.Lock()
- defer s.mu.Unlock()
-
- sd.endTime = s.endTime
- sd.instrumentationScope = s.tracer.instrumentationScope
- sd.name = s.name
- sd.parent = s.parent
- sd.resource = s.tracer.provider.resource
- sd.spanContext = s.spanContext
- sd.spanKind = s.spanKind
- sd.startTime = s.startTime
- sd.status = s.status
- sd.childSpanCount = s.childSpanCount
-
- if len(s.attributes) > 0 {
- s.dedupeAttrs()
- sd.attributes = s.attributes
- }
- sd.droppedAttributeCount = s.droppedAttributes
- if len(s.events.queue) > 0 {
- sd.events = s.events.copy()
- sd.droppedEventCount = s.events.droppedCount
- }
- if len(s.links.queue) > 0 {
- sd.links = s.links.copy()
- sd.droppedLinkCount = s.links.droppedCount
- }
- return &sd
-}
-
-func (s *recordingSpan) addChild() {
- if s == nil {
- return
- }
-
- s.mu.Lock()
- defer s.mu.Unlock()
- if !s.isRecording() {
- return
- }
- s.childSpanCount++
-}
-
-func (*recordingSpan) private() {}
-
-// runtimeTrace starts a "runtime/trace".Task for the span and returns a
-// context containing the task.
-func (s *recordingSpan) runtimeTrace(ctx context.Context) context.Context {
- if !rt.IsEnabled() {
- // Avoid additional overhead if runtime/trace is not enabled.
- return ctx
- }
- nctx, task := rt.NewTask(ctx, s.name)
-
- s.mu.Lock()
- s.executionTracerTaskEnd = task.End
- s.mu.Unlock()
-
- return nctx
-}
-
-// nonRecordingSpan is a minimal implementation of the OpenTelemetry Span API
-// that wraps a SpanContext. It performs no operations other than to return
-// the wrapped SpanContext or TracerProvider that created it.
-type nonRecordingSpan struct {
- embedded.Span
-
- // tracer is the SDK tracer that created this span.
- tracer *tracer
- sc trace.SpanContext
-}
-
-var _ trace.Span = nonRecordingSpan{}
-
-// SpanContext returns the wrapped SpanContext.
-func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc }
-
-// IsRecording always returns false.
-func (nonRecordingSpan) IsRecording() bool { return false }
-
-// SetStatus does nothing.
-func (nonRecordingSpan) SetStatus(codes.Code, string) {}
-
-// SetError does nothing.
-func (nonRecordingSpan) SetError(bool) {}
-
-// SetAttributes does nothing.
-func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {}
-
-// End does nothing.
-func (nonRecordingSpan) End(...trace.SpanEndOption) {}
-
-// RecordError does nothing.
-func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {}
-
-// AddEvent does nothing.
-func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {}
-
-// AddLink does nothing.
-func (nonRecordingSpan) AddLink(trace.Link) {}
-
-// SetName does nothing.
-func (nonRecordingSpan) SetName(string) {}
-
-// TracerProvider returns the trace.TracerProvider that provided the Tracer
-// that created this span.
-func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider }
-
-func isRecording(s SamplingResult) bool {
- return s.Decision == RecordOnly || s.Decision == RecordAndSample
-}
-
-func isSampled(s SamplingResult) bool {
- return s.Decision == RecordAndSample
-}
-
-// Status is the classified state of a Span.
-type Status struct {
- // Code is an identifier of a Spans state classification.
- Code codes.Code
- // Description is a user hint about why that status was set. It is only
- // applicable when Code is Error.
- Description string
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go
deleted file mode 100644
index 6bdda3d94..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package trace // import "go.opentelemetry.io/otel/sdk/trace"
-
-import "context"
-
-// SpanExporter handles the delivery of spans to external receivers. This is
-// the final component in the trace export pipeline.
-type SpanExporter interface {
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-
- // ExportSpans exports a batch of spans.
- //
- // This function is called synchronously, so there is no concurrency
- // safety requirement. However, due to the synchronous calling pattern,
- // it is critical that all timeouts and cancellations contained in the
- // passed context must be honored.
- //
- // Any retry logic must be contained in this function. The SDK that
- // calls this function will not implement any retry logic. All errors
- // returned by this function are considered unrecoverable and will be
- // reported to a configured error Handler.
- ExportSpans(ctx context.Context, spans []ReadOnlySpan) error
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-
- // Shutdown notifies the exporter of a pending halt to operations. The
- // exporter is expected to perform any cleanup or synchronization it
- // requires while honoring all timeouts and cancellations contained in
- // the passed context.
- Shutdown(ctx context.Context) error
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go
deleted file mode 100644
index bec5e2097..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package trace // import "go.opentelemetry.io/otel/sdk/trace"
-
-import "go.opentelemetry.io/otel/sdk/internal/env"
-
-const (
- // DefaultAttributeValueLengthLimit is the default maximum allowed
- // attribute value length, unlimited.
- DefaultAttributeValueLengthLimit = -1
-
- // DefaultAttributeCountLimit is the default maximum number of attributes
- // a span can have.
- DefaultAttributeCountLimit = 128
-
- // DefaultEventCountLimit is the default maximum number of events a span
- // can have.
- DefaultEventCountLimit = 128
-
- // DefaultLinkCountLimit is the default maximum number of links a span can
- // have.
- DefaultLinkCountLimit = 128
-
- // DefaultAttributePerEventCountLimit is the default maximum number of
- // attributes a span event can have.
- DefaultAttributePerEventCountLimit = 128
-
- // DefaultAttributePerLinkCountLimit is the default maximum number of
- // attributes a span link can have.
- DefaultAttributePerLinkCountLimit = 128
-)
-
-// SpanLimits represents the limits of a span.
-type SpanLimits struct {
- // AttributeValueLengthLimit is the maximum allowed attribute value length.
- //
- // This limit only applies to string and string slice attribute values.
- // Any string longer than this value will be truncated to this length.
- //
- // Setting this to a negative value means no limit is applied.
- AttributeValueLengthLimit int
-
- // AttributeCountLimit is the maximum allowed span attribute count. Any
- // attribute added to a span once this limit is reached will be dropped.
- //
- // Setting this to zero means no attributes will be recorded.
- //
- // Setting this to a negative value means no limit is applied.
- AttributeCountLimit int
-
- // EventCountLimit is the maximum allowed span event count. Any event
- // added to a span once this limit is reached means it will be added but
- // the oldest event will be dropped.
- //
- // Setting this to zero means no events we be recorded.
- //
- // Setting this to a negative value means no limit is applied.
- EventCountLimit int
-
- // LinkCountLimit is the maximum allowed span link count. Any link added
- // to a span once this limit is reached means it will be added but the
- // oldest link will be dropped.
- //
- // Setting this to zero means no links we be recorded.
- //
- // Setting this to a negative value means no limit is applied.
- LinkCountLimit int
-
- // AttributePerEventCountLimit is the maximum number of attributes allowed
- // per span event. Any attribute added after this limit reached will be
- // dropped.
- //
- // Setting this to zero means no attributes will be recorded for events.
- //
- // Setting this to a negative value means no limit is applied.
- AttributePerEventCountLimit int
-
- // AttributePerLinkCountLimit is the maximum number of attributes allowed
- // per span link. Any attribute added after this limit reached will be
- // dropped.
- //
- // Setting this to zero means no attributes will be recorded for links.
- //
- // Setting this to a negative value means no limit is applied.
- AttributePerLinkCountLimit int
-}
-
-// NewSpanLimits returns a SpanLimits with all limits set to the value their
-// corresponding environment variable holds, or the default if unset.
-//
-// • AttributeValueLengthLimit: OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT
-// (default: unlimited)
-//
-// • AttributeCountLimit: OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT (default: 128)
-//
-// • EventCountLimit: OTEL_SPAN_EVENT_COUNT_LIMIT (default: 128)
-//
-// • AttributePerEventCountLimit: OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT (default:
-// 128)
-//
-// • LinkCountLimit: OTEL_SPAN_LINK_COUNT_LIMIT (default: 128)
-//
-// • AttributePerLinkCountLimit: OTEL_LINK_ATTRIBUTE_COUNT_LIMIT (default: 128)
-func NewSpanLimits() SpanLimits {
- return SpanLimits{
- AttributeValueLengthLimit: env.SpanAttributeValueLength(DefaultAttributeValueLengthLimit),
- AttributeCountLimit: env.SpanAttributeCount(DefaultAttributeCountLimit),
- EventCountLimit: env.SpanEventCount(DefaultEventCountLimit),
- LinkCountLimit: env.SpanLinkCount(DefaultLinkCountLimit),
- AttributePerEventCountLimit: env.SpanEventAttributeCount(DefaultAttributePerEventCountLimit),
- AttributePerLinkCountLimit: env.SpanLinkAttributeCount(DefaultAttributePerLinkCountLimit),
- }
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go
deleted file mode 100644
index af7f9177f..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package trace // import "go.opentelemetry.io/otel/sdk/trace"
-
-import (
- "context"
- "sync"
-)
-
-// SpanProcessor is a processing pipeline for spans in the trace signal.
-// SpanProcessors registered with a TracerProvider and are called at the start
-// and end of a Span's lifecycle, and are called in the order they are
-// registered.
-type SpanProcessor interface {
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-
- // OnStart is called when a span is started. It is called synchronously
- // and should not block.
- OnStart(parent context.Context, s ReadWriteSpan)
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-
- // OnEnd is called when span is finished. It is called synchronously and
- // hence not block.
- OnEnd(s ReadOnlySpan)
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-
- // Shutdown is called when the SDK shuts down. Any cleanup or release of
- // resources held by the processor should be done in this call.
- //
- // Calls to OnStart, OnEnd, or ForceFlush after this has been called
- // should be ignored.
- //
- // All timeouts and cancellations contained in ctx must be honored, this
- // should not block indefinitely.
- Shutdown(ctx context.Context) error
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-
- // ForceFlush exports all ended spans to the configured Exporter that have not yet
- // been exported. It should only be called when absolutely necessary, such as when
- // using a FaaS provider that may suspend the process after an invocation, but before
- // the Processor can export the completed spans.
- ForceFlush(ctx context.Context) error
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-}
-
-type spanProcessorState struct {
- sp SpanProcessor
- state sync.Once
-}
-
-func newSpanProcessorState(sp SpanProcessor) *spanProcessorState {
- return &spanProcessorState{sp: sp}
-}
-
-type spanProcessorStates []*spanProcessorState
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go
deleted file mode 100644
index 43419d3b5..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package trace // import "go.opentelemetry.io/otel/sdk/trace"
-
-import (
- "context"
- "time"
-
- "go.opentelemetry.io/otel/sdk/instrumentation"
- "go.opentelemetry.io/otel/trace"
- "go.opentelemetry.io/otel/trace/embedded"
-)
-
-type tracer struct {
- embedded.Tracer
-
- provider *TracerProvider
- instrumentationScope instrumentation.Scope
-}
-
-var _ trace.Tracer = &tracer{}
-
-// Start starts a Span and returns it along with a context containing it.
-//
-// The Span is created with the provided name and as a child of any existing
-// span context found in the passed context. The created Span will be
-// configured appropriately by any SpanOption passed.
-func (tr *tracer) Start(ctx context.Context, name string, options ...trace.SpanStartOption) (context.Context, trace.Span) {
- config := trace.NewSpanStartConfig(options...)
-
- if ctx == nil {
- // Prevent trace.ContextWithSpan from panicking.
- ctx = context.Background()
- }
-
- // For local spans created by this SDK, track child span count.
- if p := trace.SpanFromContext(ctx); p != nil {
- if sdkSpan, ok := p.(*recordingSpan); ok {
- sdkSpan.addChild()
- }
- }
-
- s := tr.newSpan(ctx, name, &config)
- if rw, ok := s.(ReadWriteSpan); ok && s.IsRecording() {
- sps := tr.provider.getSpanProcessors()
- for _, sp := range sps {
- sp.sp.OnStart(ctx, rw)
- }
- }
- if rtt, ok := s.(runtimeTracer); ok {
- ctx = rtt.runtimeTrace(ctx)
- }
-
- return trace.ContextWithSpan(ctx, s), s
-}
-
-type runtimeTracer interface {
- // runtimeTrace starts a "runtime/trace".Task for the span and
- // returns a context containing the task.
- runtimeTrace(ctx context.Context) context.Context
-}
-
-// newSpan returns a new configured span.
-func (tr *tracer) newSpan(ctx context.Context, name string, config *trace.SpanConfig) trace.Span {
- // If told explicitly to make this a new root use a zero value SpanContext
- // as a parent which contains an invalid trace ID and is not remote.
- var psc trace.SpanContext
- if config.NewRoot() {
- ctx = trace.ContextWithSpanContext(ctx, psc)
- } else {
- psc = trace.SpanContextFromContext(ctx)
- }
-
- // If there is a valid parent trace ID, use it to ensure the continuity of
- // the trace. Always generate a new span ID so other components can rely
- // on a unique span ID, even if the Span is non-recording.
- var tid trace.TraceID
- var sid trace.SpanID
- if !psc.TraceID().IsValid() {
- tid, sid = tr.provider.idGenerator.NewIDs(ctx)
- } else {
- tid = psc.TraceID()
- sid = tr.provider.idGenerator.NewSpanID(ctx, tid)
- }
-
- samplingResult := tr.provider.sampler.ShouldSample(SamplingParameters{
- ParentContext: ctx,
- TraceID: tid,
- Name: name,
- Kind: config.SpanKind(),
- Attributes: config.Attributes(),
- Links: config.Links(),
- })
-
- scc := trace.SpanContextConfig{
- TraceID: tid,
- SpanID: sid,
- TraceState: samplingResult.Tracestate,
- }
- if isSampled(samplingResult) {
- scc.TraceFlags = psc.TraceFlags() | trace.FlagsSampled
- } else {
- scc.TraceFlags = psc.TraceFlags() &^ trace.FlagsSampled
- }
- sc := trace.NewSpanContext(scc)
-
- if !isRecording(samplingResult) {
- return tr.newNonRecordingSpan(sc)
- }
- return tr.newRecordingSpan(psc, sc, name, samplingResult, config)
-}
-
-// newRecordingSpan returns a new configured recordingSpan.
-func (tr *tracer) newRecordingSpan(psc, sc trace.SpanContext, name string, sr SamplingResult, config *trace.SpanConfig) *recordingSpan {
- startTime := config.Timestamp()
- if startTime.IsZero() {
- startTime = time.Now()
- }
-
- s := &recordingSpan{
- // Do not pre-allocate the attributes slice here! Doing so will
- // allocate memory that is likely never going to be used, or if used,
- // will be over-sized. The default Go compiler has been tested to
- // dynamically allocate needed space very well. Benchmarking has shown
- // it to be more performant than what we can predetermine here,
- // especially for the common use case of few to no added
- // attributes.
-
- parent: psc,
- spanContext: sc,
- spanKind: trace.ValidateSpanKind(config.SpanKind()),
- name: name,
- startTime: startTime,
- events: newEvictedQueueEvent(tr.provider.spanLimits.EventCountLimit),
- links: newEvictedQueueLink(tr.provider.spanLimits.LinkCountLimit),
- tracer: tr,
- }
-
- for _, l := range config.Links() {
- s.AddLink(l)
- }
-
- s.SetAttributes(sr.Attributes...)
- s.SetAttributes(config.Attributes()...)
-
- return s
-}
-
-// newNonRecordingSpan returns a new configured nonRecordingSpan.
-func (tr *tracer) newNonRecordingSpan(sc trace.SpanContext) nonRecordingSpan {
- return nonRecordingSpan{tracer: tr, sc: sc}
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/version.go b/vendor/go.opentelemetry.io/otel/sdk/trace/version.go
deleted file mode 100644
index b84dd2c5e..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/version.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package trace // import "go.opentelemetry.io/otel/sdk/trace"
-
-// version is the current release version of the metric SDK in use.
-func version() string {
- return "1.16.0-rc.1"
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go
deleted file mode 100644
index 6b4038510..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/version.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package sdk // import "go.opentelemetry.io/otel/sdk"
-
-// Version is the current release version of the OpenTelemetry SDK in use.
-func Version() string {
- return "1.34.0"
-}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/http.go b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go
deleted file mode 100644
index d5197e16c..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/internal/http.go
+++ /dev/null
@@ -1,327 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package internal // import "go.opentelemetry.io/otel/semconv/internal"
-
-import (
- "fmt"
- "net"
- "net/http"
- "strconv"
- "strings"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/codes"
- "go.opentelemetry.io/otel/trace"
-)
-
-// SemanticConventions are the semantic convention values defined for a
-// version of the OpenTelemetry specification.
-type SemanticConventions struct {
- EnduserIDKey attribute.Key
- HTTPClientIPKey attribute.Key
- HTTPFlavorKey attribute.Key
- HTTPHostKey attribute.Key
- HTTPMethodKey attribute.Key
- HTTPRequestContentLengthKey attribute.Key
- HTTPRouteKey attribute.Key
- HTTPSchemeHTTP attribute.KeyValue
- HTTPSchemeHTTPS attribute.KeyValue
- HTTPServerNameKey attribute.Key
- HTTPStatusCodeKey attribute.Key
- HTTPTargetKey attribute.Key
- HTTPURLKey attribute.Key
- HTTPUserAgentKey attribute.Key
- NetHostIPKey attribute.Key
- NetHostNameKey attribute.Key
- NetHostPortKey attribute.Key
- NetPeerIPKey attribute.Key
- NetPeerNameKey attribute.Key
- NetPeerPortKey attribute.Key
- NetTransportIP attribute.KeyValue
- NetTransportOther attribute.KeyValue
- NetTransportTCP attribute.KeyValue
- NetTransportUDP attribute.KeyValue
- NetTransportUnix attribute.KeyValue
-}
-
-// NetAttributesFromHTTPRequest generates attributes of the net
-// namespace as specified by the OpenTelemetry specification for a
-// span. The network parameter is a string that net.Dial function
-// from standard library can understand.
-func (sc *SemanticConventions) NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue {
- attrs := []attribute.KeyValue{}
-
- switch network {
- case "tcp", "tcp4", "tcp6":
- attrs = append(attrs, sc.NetTransportTCP)
- case "udp", "udp4", "udp6":
- attrs = append(attrs, sc.NetTransportUDP)
- case "ip", "ip4", "ip6":
- attrs = append(attrs, sc.NetTransportIP)
- case "unix", "unixgram", "unixpacket":
- attrs = append(attrs, sc.NetTransportUnix)
- default:
- attrs = append(attrs, sc.NetTransportOther)
- }
-
- peerIP, peerName, peerPort := hostIPNamePort(request.RemoteAddr)
- if peerIP != "" {
- attrs = append(attrs, sc.NetPeerIPKey.String(peerIP))
- }
- if peerName != "" {
- attrs = append(attrs, sc.NetPeerNameKey.String(peerName))
- }
- if peerPort != 0 {
- attrs = append(attrs, sc.NetPeerPortKey.Int(peerPort))
- }
-
- hostIP, hostName, hostPort := "", "", 0
- for _, someHost := range []string{request.Host, request.Header.Get("Host"), request.URL.Host} {
- hostIP, hostName, hostPort = hostIPNamePort(someHost)
- if hostIP != "" || hostName != "" || hostPort != 0 {
- break
- }
- }
- if hostIP != "" {
- attrs = append(attrs, sc.NetHostIPKey.String(hostIP))
- }
- if hostName != "" {
- attrs = append(attrs, sc.NetHostNameKey.String(hostName))
- }
- if hostPort != 0 {
- attrs = append(attrs, sc.NetHostPortKey.Int(hostPort))
- }
-
- return attrs
-}
-
-// hostIPNamePort extracts the IP address, name and (optional) port from hostWithPort.
-// It handles both IPv4 and IPv6 addresses. If the host portion is not recognized
-// as a valid IPv4 or IPv6 address, the `ip` result will be empty and the
-// host portion will instead be returned in `name`.
-func hostIPNamePort(hostWithPort string) (ip string, name string, port int) {
- var (
- hostPart, portPart string
- parsedPort uint64
- err error
- )
- if hostPart, portPart, err = net.SplitHostPort(hostWithPort); err != nil {
- hostPart, portPart = hostWithPort, ""
- }
- if parsedIP := net.ParseIP(hostPart); parsedIP != nil {
- ip = parsedIP.String()
- } else {
- name = hostPart
- }
- if parsedPort, err = strconv.ParseUint(portPart, 10, 16); err == nil {
- port = int(parsedPort) // nolint: gosec // Bit size of 16 checked above.
- }
- return
-}
-
-// EndUserAttributesFromHTTPRequest generates attributes of the
-// enduser namespace as specified by the OpenTelemetry specification
-// for a span.
-func (sc *SemanticConventions) EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue {
- if username, _, ok := request.BasicAuth(); ok {
- return []attribute.KeyValue{sc.EnduserIDKey.String(username)}
- }
- return nil
-}
-
-// HTTPClientAttributesFromHTTPRequest generates attributes of the
-// http namespace as specified by the OpenTelemetry specification for
-// a span on the client side.
-func (sc *SemanticConventions) HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue {
- attrs := []attribute.KeyValue{}
-
- // remove any username/password info that may be in the URL
- // before adding it to the attributes
- userinfo := request.URL.User
- request.URL.User = nil
-
- attrs = append(attrs, sc.HTTPURLKey.String(request.URL.String()))
-
- // restore any username/password info that was removed
- request.URL.User = userinfo
-
- return append(attrs, sc.httpCommonAttributesFromHTTPRequest(request)...)
-}
-
-func (sc *SemanticConventions) httpCommonAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue {
- attrs := []attribute.KeyValue{}
- if ua := request.UserAgent(); ua != "" {
- attrs = append(attrs, sc.HTTPUserAgentKey.String(ua))
- }
- if request.ContentLength > 0 {
- attrs = append(attrs, sc.HTTPRequestContentLengthKey.Int64(request.ContentLength))
- }
-
- return append(attrs, sc.httpBasicAttributesFromHTTPRequest(request)...)
-}
-
-func (sc *SemanticConventions) httpBasicAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue {
- // as these attributes are used by HTTPServerMetricAttributesFromHTTPRequest, they should be low-cardinality
- attrs := []attribute.KeyValue{}
-
- if request.TLS != nil {
- attrs = append(attrs, sc.HTTPSchemeHTTPS)
- } else {
- attrs = append(attrs, sc.HTTPSchemeHTTP)
- }
-
- if request.Host != "" {
- attrs = append(attrs, sc.HTTPHostKey.String(request.Host))
- } else if request.URL != nil && request.URL.Host != "" {
- attrs = append(attrs, sc.HTTPHostKey.String(request.URL.Host))
- }
-
- flavor := ""
- if request.ProtoMajor == 1 {
- flavor = fmt.Sprintf("1.%d", request.ProtoMinor)
- } else if request.ProtoMajor == 2 {
- flavor = "2"
- }
- if flavor != "" {
- attrs = append(attrs, sc.HTTPFlavorKey.String(flavor))
- }
-
- if request.Method != "" {
- attrs = append(attrs, sc.HTTPMethodKey.String(request.Method))
- } else {
- attrs = append(attrs, sc.HTTPMethodKey.String(http.MethodGet))
- }
-
- return attrs
-}
-
-// HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes
-// to be used with server-side HTTP metrics.
-func (sc *SemanticConventions) HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue {
- attrs := []attribute.KeyValue{}
- if serverName != "" {
- attrs = append(attrs, sc.HTTPServerNameKey.String(serverName))
- }
- return append(attrs, sc.httpBasicAttributesFromHTTPRequest(request)...)
-}
-
-// HTTPServerAttributesFromHTTPRequest generates attributes of the
-// http namespace as specified by the OpenTelemetry specification for
-// a span on the server side. Currently, only basic authentication is
-// supported.
-func (sc *SemanticConventions) HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue {
- attrs := []attribute.KeyValue{
- sc.HTTPTargetKey.String(request.RequestURI),
- }
-
- if serverName != "" {
- attrs = append(attrs, sc.HTTPServerNameKey.String(serverName))
- }
- if route != "" {
- attrs = append(attrs, sc.HTTPRouteKey.String(route))
- }
- if values := request.Header["X-Forwarded-For"]; len(values) > 0 {
- addr := values[0]
- if i := strings.Index(addr, ","); i > 0 {
- addr = addr[:i]
- }
- attrs = append(attrs, sc.HTTPClientIPKey.String(addr))
- }
-
- return append(attrs, sc.httpCommonAttributesFromHTTPRequest(request)...)
-}
-
-// HTTPAttributesFromHTTPStatusCode generates attributes of the http
-// namespace as specified by the OpenTelemetry specification for a
-// span.
-func (sc *SemanticConventions) HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue {
- attrs := []attribute.KeyValue{
- sc.HTTPStatusCodeKey.Int(code),
- }
- return attrs
-}
-
-type codeRange struct {
- fromInclusive int
- toInclusive int
-}
-
-func (r codeRange) contains(code int) bool {
- return r.fromInclusive <= code && code <= r.toInclusive
-}
-
-var validRangesPerCategory = map[int][]codeRange{
- 1: {
- {http.StatusContinue, http.StatusEarlyHints},
- },
- 2: {
- {http.StatusOK, http.StatusAlreadyReported},
- {http.StatusIMUsed, http.StatusIMUsed},
- },
- 3: {
- {http.StatusMultipleChoices, http.StatusUseProxy},
- {http.StatusTemporaryRedirect, http.StatusPermanentRedirect},
- },
- 4: {
- {http.StatusBadRequest, http.StatusTeapot}, // yes, teapot is so useful…
- {http.StatusMisdirectedRequest, http.StatusUpgradeRequired},
- {http.StatusPreconditionRequired, http.StatusTooManyRequests},
- {http.StatusRequestHeaderFieldsTooLarge, http.StatusRequestHeaderFieldsTooLarge},
- {http.StatusUnavailableForLegalReasons, http.StatusUnavailableForLegalReasons},
- },
- 5: {
- {http.StatusInternalServerError, http.StatusLoopDetected},
- {http.StatusNotExtended, http.StatusNetworkAuthenticationRequired},
- },
-}
-
-// SpanStatusFromHTTPStatusCode generates a status code and a message
-// as specified by the OpenTelemetry specification for a span.
-func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) {
- spanCode, valid := validateHTTPStatusCode(code)
- if !valid {
- return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code)
- }
- return spanCode, ""
-}
-
-// SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message
-// as specified by the OpenTelemetry specification for a span.
-// Exclude 4xx for SERVER to set the appropriate status.
-func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) {
- spanCode, valid := validateHTTPStatusCode(code)
- if !valid {
- return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code)
- }
- category := code / 100
- if spanKind == trace.SpanKindServer && category == 4 {
- return codes.Unset, ""
- }
- return spanCode, ""
-}
-
-// validateHTTPStatusCode validates the HTTP status code and returns
-// corresponding span status code. If the `code` is not a valid HTTP status
-// code, returns span status Error and false.
-func validateHTTPStatusCode(code int) (codes.Code, bool) {
- category := code / 100
- ranges, ok := validRangesPerCategory[category]
- if !ok {
- return codes.Error, false
- }
- ok = false
- for _, crange := range ranges {
- ok = crange.contains(code)
- if ok {
- break
- }
- }
- if !ok {
- return codes.Error, false
- }
- if category > 0 && category < 4 {
- return codes.Unset, true
- }
- return codes.Error, true
-}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/v4/http.go b/vendor/go.opentelemetry.io/otel/semconv/internal/v4/http.go
deleted file mode 100644
index aab73ffe1..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/internal/v4/http.go
+++ /dev/null
@@ -1,394 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package internal // import "go.opentelemetry.io/otel/semconv/internal/v4"
-
-import (
- "fmt"
- "net/http"
- "strings"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/codes"
-)
-
-// HTTPConv are the HTTP semantic convention attributes defined for a version
-// of the OpenTelemetry specification.
-type HTTPConv struct {
- NetConv *NetConv
-
- EnduserIDKey attribute.Key
- HTTPClientIPKey attribute.Key
- NetProtocolNameKey attribute.Key
- NetProtocolVersionKey attribute.Key
- HTTPMethodKey attribute.Key
- HTTPRequestContentLengthKey attribute.Key
- HTTPResponseContentLengthKey attribute.Key
- HTTPRouteKey attribute.Key
- HTTPSchemeHTTP attribute.KeyValue
- HTTPSchemeHTTPS attribute.KeyValue
- HTTPStatusCodeKey attribute.Key
- HTTPTargetKey attribute.Key
- HTTPURLKey attribute.Key
- UserAgentOriginalKey attribute.Key
-}
-
-// ClientResponse returns attributes for an HTTP response received by a client
-// from a server. The following attributes are returned if the related values
-// are defined in resp: "http.status.code", "http.response_content_length".
-//
-// This does not add all OpenTelemetry required attributes for an HTTP event,
-// it assumes ClientRequest was used to create the span with a complete set of
-// attributes. If a complete set of attributes can be generated using the
-// request contained in resp. For example:
-//
-// append(ClientResponse(resp), ClientRequest(resp.Request)...)
-func (c *HTTPConv) ClientResponse(resp *http.Response) []attribute.KeyValue {
- var n int
- if resp.StatusCode > 0 {
- n++
- }
- if resp.ContentLength > 0 {
- n++
- }
-
- attrs := make([]attribute.KeyValue, 0, n)
- if resp.StatusCode > 0 {
- attrs = append(attrs, c.HTTPStatusCodeKey.Int(resp.StatusCode))
- }
- if resp.ContentLength > 0 {
- attrs = append(attrs, c.HTTPResponseContentLengthKey.Int(int(resp.ContentLength)))
- }
- return attrs
-}
-
-// ClientRequest returns attributes for an HTTP request made by a client. The
-// following attributes are always returned: "http.url", "http.flavor",
-// "http.method", "net.peer.name". The following attributes are returned if the
-// related values are defined in req: "net.peer.port", "http.user_agent",
-// "http.request_content_length", "enduser.id".
-func (c *HTTPConv) ClientRequest(req *http.Request) []attribute.KeyValue {
- n := 3 // URL, peer name, proto, and method.
- var h string
- if req.URL != nil {
- h = req.URL.Host
- }
- peer, p := firstHostPort(h, req.Header.Get("Host"))
- port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p)
- if port > 0 {
- n++
- }
- useragent := req.UserAgent()
- if useragent != "" {
- n++
- }
- if req.ContentLength > 0 {
- n++
- }
- userID, _, hasUserID := req.BasicAuth()
- if hasUserID {
- n++
- }
- attrs := make([]attribute.KeyValue, 0, n)
-
- attrs = append(attrs, c.method(req.Method))
- attrs = append(attrs, c.proto(req.Proto))
-
- var u string
- if req.URL != nil {
- // Remove any username/password info that may be in the URL.
- userinfo := req.URL.User
- req.URL.User = nil
- u = req.URL.String()
- // Restore any username/password info that was removed.
- req.URL.User = userinfo
- }
- attrs = append(attrs, c.HTTPURLKey.String(u))
-
- attrs = append(attrs, c.NetConv.PeerName(peer))
- if port > 0 {
- attrs = append(attrs, c.NetConv.PeerPort(port))
- }
-
- if useragent != "" {
- attrs = append(attrs, c.UserAgentOriginalKey.String(useragent))
- }
-
- if l := req.ContentLength; l > 0 {
- attrs = append(attrs, c.HTTPRequestContentLengthKey.Int64(l))
- }
-
- if hasUserID {
- attrs = append(attrs, c.EnduserIDKey.String(userID))
- }
-
- return attrs
-}
-
-// ServerRequest returns attributes for an HTTP request received by a server.
-//
-// The server must be the primary server name if it is known. For example this
-// would be the ServerName directive
-// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
-// server, and the server_name directive
-// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
-// nginx server. More generically, the primary server name would be the host
-// header value that matches the default virtual host of an HTTP server. It
-// should include the host identifier and if a port is used to route to the
-// server that port identifier should be included as an appropriate port
-// suffix.
-//
-// If the primary server name is not known, server should be an empty string.
-// The req Host will be used to determine the server instead.
-//
-// The following attributes are always returned: "http.method", "http.scheme",
-// "http.flavor", "http.target", "net.host.name". The following attributes are
-// returned if they related values are defined in req: "net.host.port",
-// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id",
-// "http.client_ip".
-func (c *HTTPConv) ServerRequest(server string, req *http.Request) []attribute.KeyValue {
- // TODO: This currently does not add the specification required
- // `http.target` attribute. It has too high of a cardinality to safely be
- // added. An alternate should be added, or this comment removed, when it is
- // addressed by the specification. If it is ultimately decided to continue
- // not including the attribute, the HTTPTargetKey field of the HTTPConv
- // should be removed as well.
-
- n := 4 // Method, scheme, proto, and host name.
- var host string
- var p int
- if server == "" {
- host, p = splitHostPort(req.Host)
- } else {
- // Prioritize the primary server name.
- host, p = splitHostPort(server)
- if p < 0 {
- _, p = splitHostPort(req.Host)
- }
- }
- hostPort := requiredHTTPPort(req.TLS != nil, p)
- if hostPort > 0 {
- n++
- }
- peer, peerPort := splitHostPort(req.RemoteAddr)
- if peer != "" {
- n++
- if peerPort > 0 {
- n++
- }
- }
- useragent := req.UserAgent()
- if useragent != "" {
- n++
- }
- userID, _, hasUserID := req.BasicAuth()
- if hasUserID {
- n++
- }
- clientIP := serverClientIP(req.Header.Get("X-Forwarded-For"))
- if clientIP != "" {
- n++
- }
- attrs := make([]attribute.KeyValue, 0, n)
-
- attrs = append(attrs, c.method(req.Method))
- attrs = append(attrs, c.scheme(req.TLS != nil))
- attrs = append(attrs, c.proto(req.Proto))
- attrs = append(attrs, c.NetConv.HostName(host))
-
- if hostPort > 0 {
- attrs = append(attrs, c.NetConv.HostPort(hostPort))
- }
-
- if peer != "" {
- // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a
- // file-path that would be interpreted with a sock family.
- attrs = append(attrs, c.NetConv.SockPeerAddr(peer))
- if peerPort > 0 {
- attrs = append(attrs, c.NetConv.SockPeerPort(peerPort))
- }
- }
-
- if useragent != "" {
- attrs = append(attrs, c.UserAgentOriginalKey.String(useragent))
- }
-
- if hasUserID {
- attrs = append(attrs, c.EnduserIDKey.String(userID))
- }
-
- if clientIP != "" {
- attrs = append(attrs, c.HTTPClientIPKey.String(clientIP))
- }
-
- return attrs
-}
-
-func (c *HTTPConv) method(method string) attribute.KeyValue {
- if method == "" {
- return c.HTTPMethodKey.String(http.MethodGet)
- }
- return c.HTTPMethodKey.String(method)
-}
-
-func (c *HTTPConv) scheme(https bool) attribute.KeyValue { // nolint:revive
- if https {
- return c.HTTPSchemeHTTPS
- }
- return c.HTTPSchemeHTTP
-}
-
-func (c *HTTPConv) proto(proto string) attribute.KeyValue {
- switch proto {
- case "HTTP/1.0":
- return c.NetProtocolVersionKey.String("1.0")
- case "HTTP/1.1":
- return c.NetProtocolVersionKey.String("1.1")
- case "HTTP/2":
- return c.NetProtocolVersionKey.String("2.0")
- case "HTTP/3":
- return c.NetProtocolVersionKey.String("3.0")
- default:
- return c.NetProtocolNameKey.String(proto)
- }
-}
-
-func serverClientIP(xForwardedFor string) string {
- if idx := strings.Index(xForwardedFor, ","); idx >= 0 {
- xForwardedFor = xForwardedFor[:idx]
- }
- return xForwardedFor
-}
-
-func requiredHTTPPort(https bool, port int) int { // nolint:revive
- if https {
- if port > 0 && port != 443 {
- return port
- }
- } else {
- if port > 0 && port != 80 {
- return port
- }
- }
- return -1
-}
-
-// Return the request host and port from the first non-empty source.
-func firstHostPort(source ...string) (host string, port int) {
- for _, hostport := range source {
- host, port = splitHostPort(hostport)
- if host != "" || port > 0 {
- break
- }
- }
- return
-}
-
-// RequestHeader returns the contents of h as OpenTelemetry attributes.
-func (c *HTTPConv) RequestHeader(h http.Header) []attribute.KeyValue {
- return c.header("http.request.header", h)
-}
-
-// ResponseHeader returns the contents of h as OpenTelemetry attributes.
-func (c *HTTPConv) ResponseHeader(h http.Header) []attribute.KeyValue {
- return c.header("http.response.header", h)
-}
-
-func (c *HTTPConv) header(prefix string, h http.Header) []attribute.KeyValue {
- key := func(k string) attribute.Key {
- k = strings.ToLower(k)
- k = strings.ReplaceAll(k, "-", "_")
- k = fmt.Sprintf("%s.%s", prefix, k)
- return attribute.Key(k)
- }
-
- attrs := make([]attribute.KeyValue, 0, len(h))
- for k, v := range h {
- attrs = append(attrs, key(k).StringSlice(v))
- }
- return attrs
-}
-
-// ClientStatus returns a span status code and message for an HTTP status code
-// value received by a client.
-func (c *HTTPConv) ClientStatus(code int) (codes.Code, string) {
- stat, valid := validateHTTPStatusCode(code)
- if !valid {
- return stat, fmt.Sprintf("Invalid HTTP status code %d", code)
- }
- return stat, ""
-}
-
-// ServerStatus returns a span status code and message for an HTTP status code
-// value returned by a server. Status codes in the 400-499 range are not
-// returned as errors.
-func (c *HTTPConv) ServerStatus(code int) (codes.Code, string) {
- stat, valid := validateHTTPStatusCode(code)
- if !valid {
- return stat, fmt.Sprintf("Invalid HTTP status code %d", code)
- }
-
- if code/100 == 4 {
- return codes.Unset, ""
- }
- return stat, ""
-}
-
-type codeRange struct {
- fromInclusive int
- toInclusive int
-}
-
-func (r codeRange) contains(code int) bool {
- return r.fromInclusive <= code && code <= r.toInclusive
-}
-
-var validRangesPerCategory = map[int][]codeRange{
- 1: {
- {http.StatusContinue, http.StatusEarlyHints},
- },
- 2: {
- {http.StatusOK, http.StatusAlreadyReported},
- {http.StatusIMUsed, http.StatusIMUsed},
- },
- 3: {
- {http.StatusMultipleChoices, http.StatusUseProxy},
- {http.StatusTemporaryRedirect, http.StatusPermanentRedirect},
- },
- 4: {
- {http.StatusBadRequest, http.StatusTeapot}, // yes, teapot is so useful…
- {http.StatusMisdirectedRequest, http.StatusUpgradeRequired},
- {http.StatusPreconditionRequired, http.StatusTooManyRequests},
- {http.StatusRequestHeaderFieldsTooLarge, http.StatusRequestHeaderFieldsTooLarge},
- {http.StatusUnavailableForLegalReasons, http.StatusUnavailableForLegalReasons},
- },
- 5: {
- {http.StatusInternalServerError, http.StatusLoopDetected},
- {http.StatusNotExtended, http.StatusNetworkAuthenticationRequired},
- },
-}
-
-// validateHTTPStatusCode validates the HTTP status code and returns
-// corresponding span status code. If the `code` is not a valid HTTP status
-// code, returns span status Error and false.
-func validateHTTPStatusCode(code int) (codes.Code, bool) {
- category := code / 100
- ranges, ok := validRangesPerCategory[category]
- if !ok {
- return codes.Error, false
- }
- ok = false
- for _, crange := range ranges {
- ok = crange.contains(code)
- if ok {
- break
- }
- }
- if !ok {
- return codes.Error, false
- }
- if category > 0 && category < 4 {
- return codes.Unset, true
- }
- return codes.Error, true
-}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/v4/net.go b/vendor/go.opentelemetry.io/otel/semconv/internal/v4/net.go
deleted file mode 100644
index f240b9af0..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/internal/v4/net.go
+++ /dev/null
@@ -1,313 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package internal // import "go.opentelemetry.io/otel/semconv/internal/v4"
-
-import (
- "net"
- "strconv"
- "strings"
-
- "go.opentelemetry.io/otel/attribute"
-)
-
-// NetConv are the network semantic convention attributes defined for a version
-// of the OpenTelemetry specification.
-type NetConv struct {
- NetHostNameKey attribute.Key
- NetHostPortKey attribute.Key
- NetPeerNameKey attribute.Key
- NetPeerPortKey attribute.Key
- NetSockFamilyKey attribute.Key
- NetSockPeerAddrKey attribute.Key
- NetSockPeerPortKey attribute.Key
- NetSockHostAddrKey attribute.Key
- NetSockHostPortKey attribute.Key
- NetTransportOther attribute.KeyValue
- NetTransportTCP attribute.KeyValue
- NetTransportUDP attribute.KeyValue
- NetTransportInProc attribute.KeyValue
-}
-
-func (c *NetConv) Transport(network string) attribute.KeyValue {
- switch network {
- case "tcp", "tcp4", "tcp6":
- return c.NetTransportTCP
- case "udp", "udp4", "udp6":
- return c.NetTransportUDP
- case "unix", "unixgram", "unixpacket":
- return c.NetTransportInProc
- default:
- // "ip:*", "ip4:*", and "ip6:*" all are considered other.
- return c.NetTransportOther
- }
-}
-
-// Host returns attributes for a network host address.
-func (c *NetConv) Host(address string) []attribute.KeyValue {
- h, p := splitHostPort(address)
- var n int
- if h != "" {
- n++
- if p > 0 {
- n++
- }
- }
-
- if n == 0 {
- return nil
- }
-
- attrs := make([]attribute.KeyValue, 0, n)
- attrs = append(attrs, c.HostName(h))
- if p > 0 {
- attrs = append(attrs, c.HostPort(p))
- }
- return attrs
-}
-
-// Server returns attributes for a network listener listening at address. See
-// net.Listen for information about acceptable address values, address should
-// be the same as the one used to create ln. If ln is nil, only network host
-// attributes will be returned that describe address. Otherwise, the socket
-// level information about ln will also be included.
-func (c *NetConv) Server(address string, ln net.Listener) []attribute.KeyValue {
- if ln == nil {
- return c.Host(address)
- }
-
- lAddr := ln.Addr()
- if lAddr == nil {
- return c.Host(address)
- }
-
- hostName, hostPort := splitHostPort(address)
- sockHostAddr, sockHostPort := splitHostPort(lAddr.String())
- network := lAddr.Network()
- sockFamily := family(network, sockHostAddr)
-
- n := nonZeroStr(hostName, network, sockHostAddr, sockFamily)
- n += positiveInt(hostPort, sockHostPort)
- attr := make([]attribute.KeyValue, 0, n)
- if hostName != "" {
- attr = append(attr, c.HostName(hostName))
- if hostPort > 0 {
- // Only if net.host.name is set should net.host.port be.
- attr = append(attr, c.HostPort(hostPort))
- }
- }
- if network != "" {
- attr = append(attr, c.Transport(network))
- }
- if sockFamily != "" {
- attr = append(attr, c.NetSockFamilyKey.String(sockFamily))
- }
- if sockHostAddr != "" {
- attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr))
- if sockHostPort > 0 {
- // Only if net.sock.host.addr is set should net.sock.host.port be.
- attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort))
- }
- }
- return attr
-}
-
-func (c *NetConv) HostName(name string) attribute.KeyValue {
- return c.NetHostNameKey.String(name)
-}
-
-func (c *NetConv) HostPort(port int) attribute.KeyValue {
- return c.NetHostPortKey.Int(port)
-}
-
-// Client returns attributes for a client network connection to address. See
-// net.Dial for information about acceptable address values, address should be
-// the same as the one used to create conn. If conn is nil, only network peer
-// attributes will be returned that describe address. Otherwise, the socket
-// level information about conn will also be included.
-func (c *NetConv) Client(address string, conn net.Conn) []attribute.KeyValue {
- if conn == nil {
- return c.Peer(address)
- }
-
- lAddr, rAddr := conn.LocalAddr(), conn.RemoteAddr()
-
- var network string
- switch {
- case lAddr != nil:
- network = lAddr.Network()
- case rAddr != nil:
- network = rAddr.Network()
- default:
- return c.Peer(address)
- }
-
- peerName, peerPort := splitHostPort(address)
- var (
- sockFamily string
- sockPeerAddr string
- sockPeerPort int
- sockHostAddr string
- sockHostPort int
- )
-
- if lAddr != nil {
- sockHostAddr, sockHostPort = splitHostPort(lAddr.String())
- }
-
- if rAddr != nil {
- sockPeerAddr, sockPeerPort = splitHostPort(rAddr.String())
- }
-
- switch {
- case sockHostAddr != "":
- sockFamily = family(network, sockHostAddr)
- case sockPeerAddr != "":
- sockFamily = family(network, sockPeerAddr)
- }
-
- n := nonZeroStr(peerName, network, sockPeerAddr, sockHostAddr, sockFamily)
- n += positiveInt(peerPort, sockPeerPort, sockHostPort)
- attr := make([]attribute.KeyValue, 0, n)
- if peerName != "" {
- attr = append(attr, c.PeerName(peerName))
- if peerPort > 0 {
- // Only if net.peer.name is set should net.peer.port be.
- attr = append(attr, c.PeerPort(peerPort))
- }
- }
- if network != "" {
- attr = append(attr, c.Transport(network))
- }
- if sockFamily != "" {
- attr = append(attr, c.NetSockFamilyKey.String(sockFamily))
- }
- if sockPeerAddr != "" {
- attr = append(attr, c.NetSockPeerAddrKey.String(sockPeerAddr))
- if sockPeerPort > 0 {
- // Only if net.sock.peer.addr is set should net.sock.peer.port be.
- attr = append(attr, c.NetSockPeerPortKey.Int(sockPeerPort))
- }
- }
- if sockHostAddr != "" {
- attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr))
- if sockHostPort > 0 {
- // Only if net.sock.host.addr is set should net.sock.host.port be.
- attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort))
- }
- }
- return attr
-}
-
-func family(network, address string) string {
- switch network {
- case "unix", "unixgram", "unixpacket":
- return "unix"
- default:
- if ip := net.ParseIP(address); ip != nil {
- if ip.To4() == nil {
- return "inet6"
- }
- return "inet"
- }
- }
- return ""
-}
-
-func nonZeroStr(strs ...string) int {
- var n int
- for _, str := range strs {
- if str != "" {
- n++
- }
- }
- return n
-}
-
-func positiveInt(ints ...int) int {
- var n int
- for _, i := range ints {
- if i > 0 {
- n++
- }
- }
- return n
-}
-
-// Peer returns attributes for a network peer address.
-func (c *NetConv) Peer(address string) []attribute.KeyValue {
- h, p := splitHostPort(address)
- var n int
- if h != "" {
- n++
- if p > 0 {
- n++
- }
- }
-
- if n == 0 {
- return nil
- }
-
- attrs := make([]attribute.KeyValue, 0, n)
- attrs = append(attrs, c.PeerName(h))
- if p > 0 {
- attrs = append(attrs, c.PeerPort(p))
- }
- return attrs
-}
-
-func (c *NetConv) PeerName(name string) attribute.KeyValue {
- return c.NetPeerNameKey.String(name)
-}
-
-func (c *NetConv) PeerPort(port int) attribute.KeyValue {
- return c.NetPeerPortKey.Int(port)
-}
-
-func (c *NetConv) SockPeerAddr(addr string) attribute.KeyValue {
- return c.NetSockPeerAddrKey.String(addr)
-}
-
-func (c *NetConv) SockPeerPort(port int) attribute.KeyValue {
- return c.NetSockPeerPortKey.Int(port)
-}
-
-// splitHostPort splits a network address hostport of the form "host",
-// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port",
-// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and
-// port.
-//
-// An empty host is returned if it is not provided or unparsable. A negative
-// port is returned if it is not provided or unparsable.
-func splitHostPort(hostport string) (host string, port int) {
- port = -1
-
- if strings.HasPrefix(hostport, "[") {
- addrEnd := strings.LastIndex(hostport, "]")
- if addrEnd < 0 {
- // Invalid hostport.
- return
- }
- if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 {
- host = hostport[1:addrEnd]
- return
- }
- } else {
- if i := strings.LastIndex(hostport, ":"); i < 0 {
- host = hostport
- return
- }
- }
-
- host, pStr, err := net.SplitHostPort(hostport)
- if err != nil {
- return
- }
-
- p, err := strconv.ParseUint(pStr, 10, 16)
- if err != nil {
- return
- }
- return host, int(p) // nolint: gosec // Bit size of 16 checked above.
-}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/README.md
deleted file mode 100644
index c692442c3..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Semconv v1.10.0
-
-[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.10.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.10.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/doc.go
deleted file mode 100644
index 60e7be59f..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/doc.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Package semconv implements OpenTelemetry semantic conventions.
-//
-// OpenTelemetry semantic conventions are agreed standardized naming
-// patterns for OpenTelemetry things. This package represents the conventions
-// as of the v1.10.0 version of the OpenTelemetry specification.
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.10.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/exception.go
deleted file mode 100644
index 3c042d4f9..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/exception.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.10.0"
-
-const (
- // ExceptionEventName is the name of the Span event representing an exception.
- ExceptionEventName = "exception"
-)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/http.go
deleted file mode 100644
index f08308586..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/http.go
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.10.0"
-
-import (
- "net/http"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/codes"
- "go.opentelemetry.io/otel/semconv/internal"
- "go.opentelemetry.io/otel/trace"
-)
-
-// HTTP scheme attributes.
-var (
- HTTPSchemeHTTP = HTTPSchemeKey.String("http")
- HTTPSchemeHTTPS = HTTPSchemeKey.String("https")
-)
-
-var sc = &internal.SemanticConventions{
- EnduserIDKey: EnduserIDKey,
- HTTPClientIPKey: HTTPClientIPKey,
- HTTPFlavorKey: HTTPFlavorKey,
- HTTPHostKey: HTTPHostKey,
- HTTPMethodKey: HTTPMethodKey,
- HTTPRequestContentLengthKey: HTTPRequestContentLengthKey,
- HTTPRouteKey: HTTPRouteKey,
- HTTPSchemeHTTP: HTTPSchemeHTTP,
- HTTPSchemeHTTPS: HTTPSchemeHTTPS,
- HTTPServerNameKey: HTTPServerNameKey,
- HTTPStatusCodeKey: HTTPStatusCodeKey,
- HTTPTargetKey: HTTPTargetKey,
- HTTPURLKey: HTTPURLKey,
- HTTPUserAgentKey: HTTPUserAgentKey,
- NetHostIPKey: NetHostIPKey,
- NetHostNameKey: NetHostNameKey,
- NetHostPortKey: NetHostPortKey,
- NetPeerIPKey: NetPeerIPKey,
- NetPeerNameKey: NetPeerNameKey,
- NetPeerPortKey: NetPeerPortKey,
- NetTransportIP: NetTransportIP,
- NetTransportOther: NetTransportOther,
- NetTransportTCP: NetTransportTCP,
- NetTransportUDP: NetTransportUDP,
- NetTransportUnix: NetTransportUnix,
-}
-
-// NetAttributesFromHTTPRequest generates attributes of the net
-// namespace as specified by the OpenTelemetry specification for a
-// span. The network parameter is a string that net.Dial function
-// from standard library can understand.
-func NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue {
- return sc.NetAttributesFromHTTPRequest(network, request)
-}
-
-// EndUserAttributesFromHTTPRequest generates attributes of the
-// enduser namespace as specified by the OpenTelemetry specification
-// for a span.
-func EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue {
- return sc.EndUserAttributesFromHTTPRequest(request)
-}
-
-// HTTPClientAttributesFromHTTPRequest generates attributes of the
-// http namespace as specified by the OpenTelemetry specification for
-// a span on the client side.
-func HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue {
- return sc.HTTPClientAttributesFromHTTPRequest(request)
-}
-
-// HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes
-// to be used with server-side HTTP metrics.
-func HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue {
- return sc.HTTPServerMetricAttributesFromHTTPRequest(serverName, request)
-}
-
-// HTTPServerAttributesFromHTTPRequest generates attributes of the
-// http namespace as specified by the OpenTelemetry specification for
-// a span on the server side. Currently, only basic authentication is
-// supported.
-func HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue {
- return sc.HTTPServerAttributesFromHTTPRequest(serverName, route, request)
-}
-
-// HTTPAttributesFromHTTPStatusCode generates attributes of the http
-// namespace as specified by the OpenTelemetry specification for a
-// span.
-func HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue {
- return sc.HTTPAttributesFromHTTPStatusCode(code)
-}
-
-// SpanStatusFromHTTPStatusCode generates a status code and a message
-// as specified by the OpenTelemetry specification for a span.
-func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) {
- return internal.SpanStatusFromHTTPStatusCode(code)
-}
-
-// SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message
-// as specified by the OpenTelemetry specification for a span.
-// Exclude 4xx for SERVER to set the appropriate status.
-func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) {
- return internal.SpanStatusFromHTTPStatusCodeAndSpanKind(code, spanKind)
-}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/resource.go
deleted file mode 100644
index 27c52f4b1..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/resource.go
+++ /dev/null
@@ -1,970 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.10.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// A cloud environment (e.g. GCP, Azure, AWS)
-const (
- // Name of the cloud provider.
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- CloudProviderKey = attribute.Key("cloud.provider")
- // The cloud account ID the resource is assigned to.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '111111111111', 'opentelemetry'
- CloudAccountIDKey = attribute.Key("cloud.account.id")
- // The geographical region the resource is running.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'us-central1', 'us-east-1'
- // Note: Refer to your provider's docs to see the available regions, for example
- // [Alibaba Cloud regions](https://www.alibabacloud.com/help/doc-
- // detail/40654.htm), [AWS regions](https://aws.amazon.com/about-aws/global-
- // infrastructure/regions_az/), [Azure regions](https://azure.microsoft.com/en-
- // us/global-infrastructure/geographies/), [Google Cloud
- // regions](https://cloud.google.com/about/locations), or [Tencent Cloud
- // regions](https://intl.cloud.tencent.com/document/product/213/6091).
- CloudRegionKey = attribute.Key("cloud.region")
- // Cloud regions often have multiple, isolated locations known as zones to
- // increase availability. Availability zone represents the zone where the resource
- // is running.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'us-east-1c'
- // Note: Availability zones are called "zones" on Alibaba Cloud and Google Cloud.
- CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
- // The cloud platform in use.
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- // Note: The prefix of the service SHOULD match the one specified in
- // `cloud.provider`.
- CloudPlatformKey = attribute.Key("cloud.platform")
-)
-
-var (
- // Alibaba Cloud
- CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
- // Amazon Web Services
- CloudProviderAWS = CloudProviderKey.String("aws")
- // Microsoft Azure
- CloudProviderAzure = CloudProviderKey.String("azure")
- // Google Cloud Platform
- CloudProviderGCP = CloudProviderKey.String("gcp")
- // Tencent Cloud
- CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
-)
-
-var (
- // Alibaba Cloud Elastic Compute Service
- CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
- // Alibaba Cloud Function Compute
- CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
- // AWS Elastic Compute Cloud
- CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
- // AWS Elastic Container Service
- CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
- // AWS Elastic Kubernetes Service
- CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
- // AWS Lambda
- CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
- // AWS Elastic Beanstalk
- CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
- // AWS App Runner
- CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
- // Azure Virtual Machines
- CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
- // Azure Container Instances
- CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
- // Azure Kubernetes Service
- CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
- // Azure Functions
- CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
- // Azure App Service
- CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
- // Google Cloud Compute Engine (GCE)
- CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
- // Google Cloud Run
- CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
- // Google Cloud Kubernetes Engine (GKE)
- CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
- // Google Cloud Functions (GCF)
- CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
- // Google Cloud App Engine (GAE)
- CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
- // Tencent Cloud Cloud Virtual Machine (CVM)
- CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
- // Tencent Cloud Elastic Kubernetes Service (EKS)
- CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
- // Tencent Cloud Serverless Cloud Function (SCF)
- CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
-)
-
-// Resources used by AWS Elastic Container Service (ECS).
-const (
- // The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws.
- // amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'arn:aws:ecs:us-
- // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
- AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
- // The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/develo
- // perguide/clusters.html).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
- AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
- // The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/l
- // aunch_types.html) for an ECS task.
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
- // The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/lates
- // t/developerguide/task_definitions.html).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'arn:aws:ecs:us-
- // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b'
- AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
- // The task definition family this task definition is a member of.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'opentelemetry-family'
- AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
- // The revision for this task definition.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '8', '26'
- AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
-)
-
-var (
- // ec2
- AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
- // fargate
- AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
-)
-
-// Resources used by AWS Elastic Kubernetes Service (EKS).
-const (
- // The ARN of an EKS cluster.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
- AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
-)
-
-// Resources specific to Amazon Web Services.
-const (
- // The name(s) of the AWS log group(s) an application is writing to.
- //
- // Type: string[]
- // Required: No
- // Stability: stable
- // Examples: '/aws/lambda/my-function', 'opentelemetry-service'
- // Note: Multiple log groups must be supported for cases like multi-container
- // applications, where a single application has sidecar containers, and each write
- // to their own log group.
- AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
- // The Amazon Resource Name(s) (ARN) of the AWS log group(s).
- //
- // Type: string[]
- // Required: No
- // Stability: stable
- // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
- // Note: See the [log group ARN format
- // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-
- // access-control-overview-cwl.html#CWL_ARN_Format).
- AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
- // The name(s) of the AWS log stream(s) an application is writing to.
- //
- // Type: string[]
- // Required: No
- // Stability: stable
- // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
- AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
- // The ARN(s) of the AWS log stream(s).
- //
- // Type: string[]
- // Required: No
- // Stability: stable
- // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-
- // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
- // Note: See the [log stream ARN format
- // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-
- // access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain
- // several log streams, so these ARNs necessarily identify both a log group and a
- // log stream.
- AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
-)
-
-// A container instance.
-const (
- // Container name used by container runtime.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'opentelemetry-autoconf'
- ContainerNameKey = attribute.Key("container.name")
- // Container ID. Usually a UUID, as for example used to [identify Docker
- // containers](https://docs.docker.com/engine/reference/run/#container-
- // identification). The UUID might be abbreviated.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'a3bf90e006b2'
- ContainerIDKey = attribute.Key("container.id")
- // The container runtime managing this container.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'docker', 'containerd', 'rkt'
- ContainerRuntimeKey = attribute.Key("container.runtime")
- // Name of the image the container was built on.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'gcr.io/opentelemetry/operator'
- ContainerImageNameKey = attribute.Key("container.image.name")
- // Container image tag.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '0.1'
- ContainerImageTagKey = attribute.Key("container.image.tag")
-)
-
-// The software deployment.
-const (
- // Name of the [deployment
- // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
- // deployment tier).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'staging', 'production'
- DeploymentEnvironmentKey = attribute.Key("deployment.environment")
-)
-
-// The device on which the process represented by this resource is running.
-const (
- // A unique identifier representing the device
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
- // Note: The device identifier MUST only be defined using the values outlined
- // below. This value is not an advertising identifier and MUST NOT be used as
- // such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor id
- // entifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-iden
- // tifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the
- // Firebase Installation ID or a globally unique UUID which is persisted across
- // sessions in your application. More information can be found
- // [here](https://developer.android.com/training/articles/user-data-ids) on best
- // practices and exact implementation details. Caution should be taken when
- // storing personal data or anything which can identify a user. GDPR and data
- // protection laws may apply, ensure you do your own due diligence.
- DeviceIDKey = attribute.Key("device.id")
- // The model identifier for the device
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'iPhone3,4', 'SM-G920F'
- // Note: It's recommended this value represents a machine readable version of the
- // model identifier rather than the market or consumer-friendly name of the
- // device.
- DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
- // The marketing name for the device model
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
- // Note: It's recommended this value represents a human readable version of the
- // device model rather than a machine readable alternative.
- DeviceModelNameKey = attribute.Key("device.model.name")
- // The name of the device manufacturer
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'Apple', 'Samsung'
- // Note: The Android OS provides this field via
- // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
- // iOS apps SHOULD hardcode the value `Apple`.
- DeviceManufacturerKey = attribute.Key("device.manufacturer")
-)
-
-// A serverless instance.
-const (
- // The name of the single function that this runtime instance executes.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'my-function'
- // Note: This is the name of the function as configured/deployed on the FaaS
- // platform and is usually different from the name of the callback function (which
- // may be stored in the
- // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-
- // general.md#source-code-attributes) span attributes).
- FaaSNameKey = attribute.Key("faas.name")
- // The unique ID of the single function that this runtime instance executes.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function'
- // Note: Depending on the cloud provider, use:
-
- // * **AWS Lambda:** The function
- // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-
- // namespaces.html).
- // Take care not to use the "invoked ARN" directly but replace any
- // [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-
- // aliases.html) with the resolved function version, as the same runtime instance
- // may be invokable with multiple
- // different aliases.
- // * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full-
- // resource-names)
- // * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en-
- // us/rest/api/resources/resources/get-by-id).
-
- // On some providers, it may not be possible to determine the full ID at startup,
- // which is why this field cannot be made required. For example, on AWS the
- // account ID
- // part of the ARN is not available without calling another AWS API
- // which may be deemed too slow for a short-running lambda function.
- // As an alternative, consider setting `faas.id` as a span attribute instead.
- FaaSIDKey = attribute.Key("faas.id")
- // The immutable version of the function being executed.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '26', 'pinkfroid-00002'
- // Note: Depending on the cloud provider and platform, use:
-
- // * **AWS Lambda:** The [function
- // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-
- // versions.html)
- // (an integer represented as a decimal string).
- // * **Google Cloud Run:** The
- // [revision](https://cloud.google.com/run/docs/managing/revisions)
- // (i.e., the function name plus the revision suffix).
- // * **Google Cloud Functions:** The value of the
- // [`K_REVISION` environment
- // variable](https://cloud.google.com/functions/docs/env-
- // var#runtime_environment_variables_set_automatically).
- // * **Azure Functions:** Not applicable. Do not set this attribute.
- FaaSVersionKey = attribute.Key("faas.version")
- // The execution environment ID as a string, that will be potentially reused for
- // other invocations to the same function/function version.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
- // Note: * **AWS Lambda:** Use the (full) log stream name.
- FaaSInstanceKey = attribute.Key("faas.instance")
- // The amount of memory available to the serverless function in MiB.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 128
- // Note: It's recommended to set this attribute since e.g. too little memory can
- // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda,
- // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this
- // information.
- FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
-)
-
-// A host is defined as a general computing instance.
-const (
- // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud
- // provider.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'opentelemetry-test'
- HostIDKey = attribute.Key("host.id")
- // Name of the host. On Unix systems, it may contain what the hostname command
- // returns, or the fully qualified hostname, or another name specified by the
- // user.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'opentelemetry-test'
- HostNameKey = attribute.Key("host.name")
- // Type of host. For Cloud, this must be the machine type.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'n1-standard-1'
- HostTypeKey = attribute.Key("host.type")
- // The CPU architecture the host system is running on.
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- HostArchKey = attribute.Key("host.arch")
- // Name of the VM image or OS install the host was instantiated from.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
- HostImageNameKey = attribute.Key("host.image.name")
- // VM image ID. For Cloud, this value is from the provider.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'ami-07b06b442921831e5'
- HostImageIDKey = attribute.Key("host.image.id")
- // The version string of the VM image as defined in [Version
- // Attributes](README.md#version-attributes).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '0.1'
- HostImageVersionKey = attribute.Key("host.image.version")
-)
-
-var (
- // AMD64
- HostArchAMD64 = HostArchKey.String("amd64")
- // ARM32
- HostArchARM32 = HostArchKey.String("arm32")
- // ARM64
- HostArchARM64 = HostArchKey.String("arm64")
- // Itanium
- HostArchIA64 = HostArchKey.String("ia64")
- // 32-bit PowerPC
- HostArchPPC32 = HostArchKey.String("ppc32")
- // 64-bit PowerPC
- HostArchPPC64 = HostArchKey.String("ppc64")
- // IBM z/Architecture
- HostArchS390x = HostArchKey.String("s390x")
- // 32-bit x86
- HostArchX86 = HostArchKey.String("x86")
-)
-
-// A Kubernetes Cluster.
-const (
- // The name of the cluster.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'opentelemetry-cluster'
- K8SClusterNameKey = attribute.Key("k8s.cluster.name")
-)
-
-// A Kubernetes Node object.
-const (
- // The name of the Node.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'node-1'
- K8SNodeNameKey = attribute.Key("k8s.node.name")
- // The UID of the Node.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
- K8SNodeUIDKey = attribute.Key("k8s.node.uid")
-)
-
-// A Kubernetes Namespace.
-const (
- // The name of the namespace that the pod is running in.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'default'
- K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
-)
-
-// A Kubernetes Pod object.
-const (
- // The UID of the Pod.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SPodUIDKey = attribute.Key("k8s.pod.uid")
- // The name of the Pod.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'opentelemetry-pod-autoconf'
- K8SPodNameKey = attribute.Key("k8s.pod.name")
-)
-
-// A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates).
-const (
- // The name of the Container from Pod specification, must be unique within a Pod.
- // Container runtime usually uses different globally unique name
- // (`container.name`).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'redis'
- K8SContainerNameKey = attribute.Key("k8s.container.name")
- // Number of times the container was restarted. This attribute can be used to
- // identify a particular container (running or stopped) within a container spec.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 0, 2
- K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
-)
-
-// A Kubernetes ReplicaSet object.
-const (
- // The UID of the ReplicaSet.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
- // The name of the ReplicaSet.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
-)
-
-// A Kubernetes Deployment object.
-const (
- // The UID of the Deployment.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
- // The name of the Deployment.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
-)
-
-// A Kubernetes StatefulSet object.
-const (
- // The UID of the StatefulSet.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
- // The name of the StatefulSet.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
-)
-
-// A Kubernetes DaemonSet object.
-const (
- // The UID of the DaemonSet.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
- // The name of the DaemonSet.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
-)
-
-// A Kubernetes Job object.
-const (
- // The UID of the Job.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SJobUIDKey = attribute.Key("k8s.job.uid")
- // The name of the Job.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SJobNameKey = attribute.Key("k8s.job.name")
-)
-
-// A Kubernetes CronJob object.
-const (
- // The UID of the CronJob.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
- // The name of the CronJob.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
-)
-
-// The operating system (OS) on which the process represented by this resource is running.
-const (
- // The operating system type.
- //
- // Type: Enum
- // Required: Always
- // Stability: stable
- OSTypeKey = attribute.Key("os.type")
- // Human readable (not intended to be parsed) OS version information, like e.g.
- // reported by `ver` or `lsb_release -a` commands.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS'
- OSDescriptionKey = attribute.Key("os.description")
- // Human readable operating system name.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'iOS', 'Android', 'Ubuntu'
- OSNameKey = attribute.Key("os.name")
- // The version string of the operating system as defined in [Version
- // Attributes](../../resource/semantic_conventions/README.md#version-attributes).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '14.2.1', '18.04.1'
- OSVersionKey = attribute.Key("os.version")
-)
-
-var (
- // Microsoft Windows
- OSTypeWindows = OSTypeKey.String("windows")
- // Linux
- OSTypeLinux = OSTypeKey.String("linux")
- // Apple Darwin
- OSTypeDarwin = OSTypeKey.String("darwin")
- // FreeBSD
- OSTypeFreeBSD = OSTypeKey.String("freebsd")
- // NetBSD
- OSTypeNetBSD = OSTypeKey.String("netbsd")
- // OpenBSD
- OSTypeOpenBSD = OSTypeKey.String("openbsd")
- // DragonFly BSD
- OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
- // HP-UX (Hewlett Packard Unix)
- OSTypeHPUX = OSTypeKey.String("hpux")
- // AIX (Advanced Interactive eXecutive)
- OSTypeAIX = OSTypeKey.String("aix")
- // Oracle Solaris
- OSTypeSolaris = OSTypeKey.String("solaris")
- // IBM z/OS
- OSTypeZOS = OSTypeKey.String("z_os")
-)
-
-// An operating system process.
-const (
- // Process identifier (PID).
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 1234
- ProcessPIDKey = attribute.Key("process.pid")
- // The name of the process executable. On Linux based systems, can be set to the
- // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of
- // `GetProcessImageFileNameW`.
- //
- // Type: string
- // Required: See below
- // Stability: stable
- // Examples: 'otelcol'
- ProcessExecutableNameKey = attribute.Key("process.executable.name")
- // The full path to the process executable. On Linux based systems, can be set to
- // the target of `proc/[pid]/exe`. On Windows, can be set to the result of
- // `GetProcessImageFileNameW`.
- //
- // Type: string
- // Required: See below
- // Stability: stable
- // Examples: '/usr/bin/cmd/otelcol'
- ProcessExecutablePathKey = attribute.Key("process.executable.path")
- // The command used to launch the process (i.e. the command name). On Linux based
- // systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows,
- // can be set to the first parameter extracted from `GetCommandLineW`.
- //
- // Type: string
- // Required: See below
- // Stability: stable
- // Examples: 'cmd/otelcol'
- ProcessCommandKey = attribute.Key("process.command")
- // The full command used to launch the process as a single string representing the
- // full command. On Windows, can be set to the result of `GetCommandLineW`. Do not
- // set this if you have to assemble it just for monitoring; use
- // `process.command_args` instead.
- //
- // Type: string
- // Required: See below
- // Stability: stable
- // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
- ProcessCommandLineKey = attribute.Key("process.command_line")
- // All the command arguments (including the command/executable itself) as received
- // by the process. On Linux-based systems (and some other Unixoid systems
- // supporting procfs), can be set according to the list of null-delimited strings
- // extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be
- // the full argv vector passed to `main`.
- //
- // Type: string[]
- // Required: See below
- // Stability: stable
- // Examples: 'cmd/otecol', '--config=config.yaml'
- ProcessCommandArgsKey = attribute.Key("process.command_args")
- // The username of the user that owns the process.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'root'
- ProcessOwnerKey = attribute.Key("process.owner")
-)
-
-// The single (language) runtime instance which is monitored.
-const (
- // The name of the runtime of this process. For compiled native binaries, this
- // SHOULD be the name of the compiler.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'OpenJDK Runtime Environment'
- ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
- // The version of the runtime of this process, as returned by the runtime without
- // modification.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '14.0.2'
- ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
- // An additional description about the runtime of the process, for example a
- // specific vendor customization of the runtime environment.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
- ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
-)
-
-// A service instance.
-const (
- // Logical name of the service.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'shoppingcart'
- // Note: MUST be the same for all instances of horizontally scaled services. If
- // the value was not specified, SDKs MUST fallback to `unknown_service:`
- // concatenated with [`process.executable.name`](process.md#process), e.g.
- // `unknown_service:bash`. If `process.executable.name` is not available, the
- // value MUST be set to `unknown_service`.
- ServiceNameKey = attribute.Key("service.name")
- // A namespace for `service.name`.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'Shop'
- // Note: A string value having a meaning that helps to distinguish a group of
- // services, for example the team name that owns a group of services.
- // `service.name` is expected to be unique within the same namespace. If
- // `service.namespace` is not specified in the Resource then `service.name` is
- // expected to be unique for all services that have no explicit namespace defined
- // (so the empty/unspecified namespace is simply one more valid namespace). Zero-
- // length namespace string is assumed equal to unspecified namespace.
- ServiceNamespaceKey = attribute.Key("service.namespace")
- // The string ID of the service instance.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '627cc493-f310-47de-96bd-71410b7dec09'
- // Note: MUST be unique for each instance of the same
- // `service.namespace,service.name` pair (in other words
- // `service.namespace,service.name,service.instance.id` triplet MUST be globally
- // unique). The ID helps to distinguish instances of the same service that exist
- // at the same time (e.g. instances of a horizontally scaled service). It is
- // preferable for the ID to be persistent and stay the same for the lifetime of
- // the service instance, however it is acceptable that the ID is ephemeral and
- // changes during important lifetime events for the service (e.g. service
- // restarts). If the service has no inherent unique ID that can be used as the
- // value of this attribute it is recommended to generate a random Version 1 or
- // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use
- // Version 5, see RFC 4122 for more recommendations).
- ServiceInstanceIDKey = attribute.Key("service.instance.id")
- // The version string of the service API or implementation.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '2.0.0'
- ServiceVersionKey = attribute.Key("service.version")
-)
-
-// The telemetry SDK used to capture data recorded by the instrumentation libraries.
-const (
- // The name of the telemetry SDK as defined above.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'opentelemetry'
- TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
- // The language of the telemetry SDK.
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
- // The version string of the telemetry SDK.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '1.2.3'
- TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
- // The version string of the auto instrumentation agent, if used.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '1.2.3'
- TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version")
-)
-
-var (
- // cpp
- TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
- // dotnet
- TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
- // erlang
- TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
- // go
- TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
- // java
- TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
- // nodejs
- TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
- // php
- TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
- // python
- TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
- // ruby
- TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
- // webjs
- TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
- // swift
- TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
-)
-
-// Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime.
-const (
- // The name of the web engine.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'WildFly'
- WebEngineNameKey = attribute.Key("webengine.name")
- // The version of the web engine.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '21.0.0'
- WebEngineVersionKey = attribute.Key("webengine.version")
- // Additional description of the web engine (e.g. detailed version and edition
- // information).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final'
- WebEngineDescriptionKey = attribute.Key("webengine.description")
-)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/schema.go
deleted file mode 100644
index 9eebb78ce..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/schema.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.10.0"
-
-// SchemaURL is the schema URL that matches the version of the semantic conventions
-// that this package defines. Semconv packages starting from v1.4.0 must declare
-// non-empty schema URL in the form https://opentelemetry.io/schemas/<version>
-const SchemaURL = "https://opentelemetry.io/schemas/1.10.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/trace.go
deleted file mode 100644
index 001d5cbf3..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/trace.go
+++ /dev/null
@@ -1,1689 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.10.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// Span attributes used by AWS Lambda (in addition to general `faas` attributes).
-const (
- // The full invoked ARN as provided on the `Context` passed to the function
- // (`Lambda-Runtime-Invoked-Function-ARN` header on the `/runtime/invocation/next`
- // applicable).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
- // Note: This may be different from `faas.id` if an alias is involved.
- AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
-)
-
-// This document defines attributes for CloudEvents. CloudEvents is a specification on how to define event data in a standard way. These attributes can be attached to spans when performing operations with CloudEvents, regardless of the protocol being used.
-const (
- // The [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec
- // .md#id) uniquely identifies the event.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
- CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
- // The [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.m
- // d#source-1) identifies the context in which an event happened.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'https://github.com/cloudevents', '/cloudevents/spec/pull/123', 'my-
- // service'
- CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
- // The [version of the CloudEvents specification](https://github.com/cloudevents/s
- // pec/blob/v1.0.2/cloudevents/spec.md#specversion) which the event uses.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: '1.0'
- CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
- // The [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/sp
- // ec.md#type) contains a value describing the type of event related to the
- // originating occurrence.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'com.github.pull_request.opened', 'com.example.object.deleted.v2'
- CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
- // The [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.
- // md#subject) of the event in the context of the event producer (identified by
- // source).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'mynewfile.jpg'
- CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
-)
-
-// This document defines semantic conventions for the OpenTracing Shim
-const (
- // Parent-child Reference type
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- // Note: The causal relationship between a child Span and a parent Span.
- OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
-)
-
-var (
- // The parent Span depends on the child Span in some capacity
- OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
- // The parent Span does not depend in any way on the result of the child Span
- OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
-)
-
-// This document defines the attributes used to perform database client calls.
-const (
- // An identifier for the database management system (DBMS) product being used. See
- // below for a list of well-known identifiers.
- //
- // Type: Enum
- // Required: Always
- // Stability: stable
- DBSystemKey = attribute.Key("db.system")
- // The connection string used to connect to the database. It is recommended to
- // remove embedded credentials.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;'
- DBConnectionStringKey = attribute.Key("db.connection_string")
- // Username for accessing the database.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'readonly_user', 'reporting_user'
- DBUserKey = attribute.Key("db.user")
- // The fully-qualified class name of the [Java Database Connectivity
- // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver
- // used to connect.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'org.postgresql.Driver',
- // 'com.microsoft.sqlserver.jdbc.SQLServerDriver'
- DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname")
- // This attribute is used to report the name of the database being accessed. For
- // commands that switch the database, this should be set to the target database
- // (even if the command fails).
- //
- // Type: string
- // Required: Required, if applicable.
- // Stability: stable
- // Examples: 'customers', 'main'
- // Note: In some SQL databases, the database name to be used is called "schema
- // name". In case there are multiple layers that could be considered for database
- // name (e.g. Oracle instance name and schema name), the database name to be used
- // is the more specific layer (e.g. Oracle schema name).
- DBNameKey = attribute.Key("db.name")
- // The database statement being executed.
- //
- // Type: string
- // Required: Required if applicable and not explicitly disabled via
- // instrumentation configuration.
- // Stability: stable
- // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"'
- // Note: The value may be sanitized to exclude sensitive information.
- DBStatementKey = attribute.Key("db.statement")
- // The name of the operation being executed, e.g. the [MongoDB command
- // name](https://docs.mongodb.com/manual/reference/command/#database-operations)
- // such as `findAndModify`, or the SQL keyword.
- //
- // Type: string
- // Required: Required, if `db.statement` is not applicable.
- // Stability: stable
- // Examples: 'findAndModify', 'HMSET', 'SELECT'
- // Note: When setting this to an SQL keyword, it is not recommended to attempt any
- // client-side parsing of `db.statement` just to get this property, but it should
- // be set if the operation name is provided by the library being instrumented. If
- // the SQL statement has an ambiguous operation, or performs more than one
- // operation, this value may be omitted.
- DBOperationKey = attribute.Key("db.operation")
-)
-
-var (
- // Some other SQL database. Fallback only. See notes
- DBSystemOtherSQL = DBSystemKey.String("other_sql")
- // Microsoft SQL Server
- DBSystemMSSQL = DBSystemKey.String("mssql")
- // MySQL
- DBSystemMySQL = DBSystemKey.String("mysql")
- // Oracle Database
- DBSystemOracle = DBSystemKey.String("oracle")
- // IBM DB2
- DBSystemDB2 = DBSystemKey.String("db2")
- // PostgreSQL
- DBSystemPostgreSQL = DBSystemKey.String("postgresql")
- // Amazon Redshift
- DBSystemRedshift = DBSystemKey.String("redshift")
- // Apache Hive
- DBSystemHive = DBSystemKey.String("hive")
- // Cloudscape
- DBSystemCloudscape = DBSystemKey.String("cloudscape")
- // HyperSQL DataBase
- DBSystemHSQLDB = DBSystemKey.String("hsqldb")
- // Progress Database
- DBSystemProgress = DBSystemKey.String("progress")
- // SAP MaxDB
- DBSystemMaxDB = DBSystemKey.String("maxdb")
- // SAP HANA
- DBSystemHanaDB = DBSystemKey.String("hanadb")
- // Ingres
- DBSystemIngres = DBSystemKey.String("ingres")
- // FirstSQL
- DBSystemFirstSQL = DBSystemKey.String("firstsql")
- // EnterpriseDB
- DBSystemEDB = DBSystemKey.String("edb")
- // InterSystems Caché
- DBSystemCache = DBSystemKey.String("cache")
- // Adabas (Adaptable Database System)
- DBSystemAdabas = DBSystemKey.String("adabas")
- // Firebird
- DBSystemFirebird = DBSystemKey.String("firebird")
- // Apache Derby
- DBSystemDerby = DBSystemKey.String("derby")
- // FileMaker
- DBSystemFilemaker = DBSystemKey.String("filemaker")
- // Informix
- DBSystemInformix = DBSystemKey.String("informix")
- // InstantDB
- DBSystemInstantDB = DBSystemKey.String("instantdb")
- // InterBase
- DBSystemInterbase = DBSystemKey.String("interbase")
- // MariaDB
- DBSystemMariaDB = DBSystemKey.String("mariadb")
- // Netezza
- DBSystemNetezza = DBSystemKey.String("netezza")
- // Pervasive PSQL
- DBSystemPervasive = DBSystemKey.String("pervasive")
- // PointBase
- DBSystemPointbase = DBSystemKey.String("pointbase")
- // SQLite
- DBSystemSqlite = DBSystemKey.String("sqlite")
- // Sybase
- DBSystemSybase = DBSystemKey.String("sybase")
- // Teradata
- DBSystemTeradata = DBSystemKey.String("teradata")
- // Vertica
- DBSystemVertica = DBSystemKey.String("vertica")
- // H2
- DBSystemH2 = DBSystemKey.String("h2")
- // ColdFusion IMQ
- DBSystemColdfusion = DBSystemKey.String("coldfusion")
- // Apache Cassandra
- DBSystemCassandra = DBSystemKey.String("cassandra")
- // Apache HBase
- DBSystemHBase = DBSystemKey.String("hbase")
- // MongoDB
- DBSystemMongoDB = DBSystemKey.String("mongodb")
- // Redis
- DBSystemRedis = DBSystemKey.String("redis")
- // Couchbase
- DBSystemCouchbase = DBSystemKey.String("couchbase")
- // CouchDB
- DBSystemCouchDB = DBSystemKey.String("couchdb")
- // Microsoft Azure Cosmos DB
- DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
- // Amazon DynamoDB
- DBSystemDynamoDB = DBSystemKey.String("dynamodb")
- // Neo4j
- DBSystemNeo4j = DBSystemKey.String("neo4j")
- // Apache Geode
- DBSystemGeode = DBSystemKey.String("geode")
- // Elasticsearch
- DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
- // Memcached
- DBSystemMemcached = DBSystemKey.String("memcached")
- // CockroachDB
- DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
-)
-
-// Connection-level attributes for Microsoft SQL Server
-const (
- // The Microsoft SQL Server [instance name](https://docs.microsoft.com/en-
- // us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
- // connecting to. This name is used to determine the port of a named instance.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'MSSQLSERVER'
- // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no longer
- // required (but still recommended if non-standard).
- DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name")
-)
-
-// Call-level attributes for Cassandra
-const (
- // The fetch size used for paging, i.e. how many rows will be returned at once.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 5000
- DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
- // The consistency level of the query. Based on consistency values from
- // [CQL](https://docs.datastax.com/en/cassandra-
- // oss/3.0/cassandra/dml/dmlConfigConsistency.html).
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
- // The name of the primary table that the operation is acting upon, including the
- // keyspace name (if applicable).
- //
- // Type: string
- // Required: Recommended if available.
- // Stability: stable
- // Examples: 'mytable'
- // Note: This mirrors the db.sql.table attribute but references cassandra rather
- // than sql. It is not recommended to attempt any client-side parsing of
- // `db.statement` just to get this property, but it should be set if it is
- // provided by the library being instrumented. If the operation is acting upon an
- // anonymous table, or more than one table, this value MUST NOT be set.
- DBCassandraTableKey = attribute.Key("db.cassandra.table")
- // Whether or not the query is idempotent.
- //
- // Type: boolean
- // Required: No
- // Stability: stable
- DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
- // The number of times a query was speculatively executed. Not set or `0` if the
- // query was not executed speculatively.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 0, 2
- DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
- // The ID of the coordinating node for a query.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
- DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
- // The data center of the coordinating node for a query.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'us-west-2'
- DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
-)
-
-var (
- // all
- DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
- // each_quorum
- DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
- // quorum
- DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
- // local_quorum
- DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
- // one
- DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
- // two
- DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
- // three
- DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
- // local_one
- DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
- // any
- DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
- // serial
- DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
- // local_serial
- DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
-)
-
-// Call-level attributes for Redis
-const (
- // The index of the database being accessed as used in the [`SELECT`
- // command](https://redis.io/commands/select), provided as an integer. To be used
- // instead of the generic `db.name` attribute.
- //
- // Type: int
- // Required: Required, if other than the default database (`0`).
- // Stability: stable
- // Examples: 0, 1, 15
- DBRedisDBIndexKey = attribute.Key("db.redis.database_index")
-)
-
-// Call-level attributes for MongoDB
-const (
- // The collection being accessed within the database stated in `db.name`.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'customers', 'products'
- DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection")
-)
-
-// Call-level attributes for SQL databases
-const (
- // The name of the primary table that the operation is acting upon, including the
- // database name (if applicable).
- //
- // Type: string
- // Required: Recommended if available.
- // Stability: stable
- // Examples: 'public.users', 'customers'
- // Note: It is not recommended to attempt any client-side parsing of
- // `db.statement` just to get this property, but it should be set if it is
- // provided by the library being instrumented. If the operation is acting upon an
- // anonymous table, or more than one table, this value MUST NOT be set.
- DBSQLTableKey = attribute.Key("db.sql.table")
-)
-
-// This document defines the attributes used to report a single exception associated with a span.
-const (
- // The type of the exception (its fully-qualified class name, if applicable). The
- // dynamic type of the exception should be preferred over the static type in
- // languages that support it.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'java.net.ConnectException', 'OSError'
- ExceptionTypeKey = attribute.Key("exception.type")
- // The exception message.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly"
- ExceptionMessageKey = attribute.Key("exception.message")
- // A stacktrace as a string in the natural representation for the language
- // runtime. The representation is to be determined and documented by each language
- // SIG.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
- // exception\\n at '
- // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
- // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
- // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
- ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
- // SHOULD be set to true if the exception event is recorded at a point where it is
- // known that the exception is escaping the scope of the span.
- //
- // Type: boolean
- // Required: No
- // Stability: stable
- // Note: An exception is considered to have escaped (or left) the scope of a span,
- // if that span is ended while the exception is still logically "in flight".
- // This may be actually "in flight" in some languages (e.g. if the exception
- // is passed to a Context manager's `__exit__` method in Python) but will
- // usually be caught at the point of recording the exception in most languages.
-
- // It is usually not possible to determine at the point where an exception is
- // thrown
- // whether it will escape the scope of a span.
- // However, it is trivial to know that an exception
- // will escape, if one checks for an active exception just before ending the span,
- // as done in the [example above](#recording-an-exception).
-
- // It follows that an exception may still escape the scope of the span
- // even if the `exception.escaped` attribute was not set or set to false,
- // since the event might have been recorded at a time where it was not
- // clear whether the exception will escape.
- ExceptionEscapedKey = attribute.Key("exception.escaped")
-)
-
-// This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans.
-const (
- // Type of the trigger which caused this function execution.
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- // Note: For the server/consumer span on the incoming side,
- // `faas.trigger` MUST be set.
-
- // Clients invoking FaaS instances usually cannot set `faas.trigger`,
- // since they would typically need to look in the payload to determine
- // the event type. If clients set it, it should be the same as the
- // trigger that corresponding incoming would have (i.e., this has
- // nothing to do with the underlying transport used to make the API
- // call to invoke the lambda, which is often HTTP).
- FaaSTriggerKey = attribute.Key("faas.trigger")
- // The execution ID of the current function execution.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
- FaaSExecutionKey = attribute.Key("faas.execution")
-)
-
-var (
- // A response to some data source operation such as a database or filesystem read/write
- FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
- // To provide an answer to an inbound HTTP request
- FaaSTriggerHTTP = FaaSTriggerKey.String("http")
- // A function is set to be executed when messages are sent to a messaging system
- FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
- // A function is scheduled to be executed regularly
- FaaSTriggerTimer = FaaSTriggerKey.String("timer")
- // If none of the others apply
- FaaSTriggerOther = FaaSTriggerKey.String("other")
-)
-
-// Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write.
-const (
- // The name of the source on which the triggering operation was performed. For
- // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos
- // DB to the database name.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'myBucketName', 'myDBName'
- FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
- // Describes the type of the operation that was performed on the data.
- //
- // Type: Enum
- // Required: Always
- // Stability: stable
- FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
- // A string containing the time when the data was accessed in the [ISO
- // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed
- // in [UTC](https://www.w3.org/TR/NOTE-datetime).
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: '2020-01-23T13:47:06Z'
- FaaSDocumentTimeKey = attribute.Key("faas.document.time")
- // The document name/table subjected to the operation. For example, in Cloud
- // Storage or S3 is the name of the file, and in Cosmos DB the table name.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'myFile.txt', 'myTableName'
- FaaSDocumentNameKey = attribute.Key("faas.document.name")
-)
-
-var (
- // When a new object is created
- FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
- // When an object is modified
- FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
- // When an object is deleted
- FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
-)
-
-// Semantic Convention for FaaS scheduled to be executed regularly.
-const (
- // A string containing the function invocation time in the [ISO
- // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed
- // in [UTC](https://www.w3.org/TR/NOTE-datetime).
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: '2020-01-23T13:47:06Z'
- FaaSTimeKey = attribute.Key("faas.time")
- // A string containing the schedule period as [Cron Expression](https://docs.oracl
- // e.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '0/5 * * * ? *'
- FaaSCronKey = attribute.Key("faas.cron")
-)
-
-// Contains additional attributes for incoming FaaS spans.
-const (
- // A boolean that is true if the serverless function is executed for the first
- // time (aka cold-start).
- //
- // Type: boolean
- // Required: No
- // Stability: stable
- FaaSColdstartKey = attribute.Key("faas.coldstart")
-)
-
-// Contains additional attributes for outgoing FaaS spans.
-const (
- // The name of the invoked function.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'my-function'
- // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked
- // function.
- FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
- // The cloud provider of the invoked function.
- //
- // Type: Enum
- // Required: Always
- // Stability: stable
- // Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked
- // function.
- FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
- // The cloud region of the invoked function.
- //
- // Type: string
- // Required: For some cloud providers, like AWS or GCP, the region in which a
- // function is hosted is essential to uniquely identify the function and also part
- // of its endpoint. Since it's part of the endpoint being called, the region is
- // always known to clients. In these cases, `faas.invoked_region` MUST be set
- // accordingly. If the region is unknown to the client or not required for
- // identifying the invoked function, setting `faas.invoked_region` is optional.
- // Stability: stable
- // Examples: 'eu-central-1'
- // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked
- // function.
- FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
-)
-
-var (
- // Alibaba Cloud
- FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
- // Amazon Web Services
- FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
- // Microsoft Azure
- FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
- // Google Cloud Platform
- FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
- // Tencent Cloud
- FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
-)
-
-// These attributes may be used for any network related operation.
-const (
- // Transport protocol used. See note below.
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- NetTransportKey = attribute.Key("net.transport")
- // Remote address of the peer (dotted decimal for IPv4 or
- // [RFC5952](https://tools.ietf.org/html/rfc5952) for IPv6)
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '127.0.0.1'
- NetPeerIPKey = attribute.Key("net.peer.ip")
- // Remote port number.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 80, 8080, 443
- NetPeerPortKey = attribute.Key("net.peer.port")
- // Remote hostname or similar, see note below.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'example.com'
- NetPeerNameKey = attribute.Key("net.peer.name")
- // Like `net.peer.ip` but for the host IP. Useful in case of a multi-IP host.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '192.168.0.1'
- NetHostIPKey = attribute.Key("net.host.ip")
- // Like `net.peer.port` but for the host port.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 35555
- NetHostPortKey = attribute.Key("net.host.port")
- // Local hostname or similar, see note below.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'localhost'
- NetHostNameKey = attribute.Key("net.host.name")
- // The internet connection type currently being used by the host.
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- // Examples: 'wifi'
- NetHostConnectionTypeKey = attribute.Key("net.host.connection.type")
- // This describes more details regarding the connection.type. It may be the type
- // of cell technology connection, but it could be used for describing details
- // about a wifi connection.
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- // Examples: 'LTE'
- NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype")
- // The name of the mobile carrier.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'sprint'
- NetHostCarrierNameKey = attribute.Key("net.host.carrier.name")
- // The mobile carrier country code.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '310'
- NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc")
- // The mobile carrier network code.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '001'
- NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc")
- // The ISO 3166-1 alpha-2 2-character country code associated with the mobile
- // carrier network.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'DE'
- NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc")
-)
-
-var (
- // ip_tcp
- NetTransportTCP = NetTransportKey.String("ip_tcp")
- // ip_udp
- NetTransportUDP = NetTransportKey.String("ip_udp")
- // Another IP-based protocol
- NetTransportIP = NetTransportKey.String("ip")
- // Unix Domain socket. See below
- NetTransportUnix = NetTransportKey.String("unix")
- // Named or anonymous pipe. See note below
- NetTransportPipe = NetTransportKey.String("pipe")
- // In-process communication
- NetTransportInProc = NetTransportKey.String("inproc")
- // Something else (non IP-based)
- NetTransportOther = NetTransportKey.String("other")
-)
-
-var (
- // wifi
- NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi")
- // wired
- NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired")
- // cell
- NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell")
- // unavailable
- NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable")
- // unknown
- NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown")
-)
-
-var (
- // GPRS
- NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs")
- // EDGE
- NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge")
- // UMTS
- NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts")
- // CDMA
- NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma")
- // EVDO Rel. 0
- NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0")
- // EVDO Rev. A
- NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a")
- // CDMA2000 1XRTT
- NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt")
- // HSDPA
- NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa")
- // HSUPA
- NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa")
- // HSPA
- NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa")
- // IDEN
- NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden")
- // EVDO Rev. B
- NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b")
- // LTE
- NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte")
- // EHRPD
- NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd")
- // HSPAP
- NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap")
- // GSM
- NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm")
- // TD-SCDMA
- NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma")
- // IWLAN
- NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan")
- // 5G NR (New Radio)
- NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr")
- // 5G NRNSA (New Radio Non-Standalone)
- NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa")
- // LTE CA
- NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca")
-)
-
-// Operations that access some remote service.
-const (
- // The [`service.name`](../../resource/semantic_conventions/README.md#service) of
- // the remote service. SHOULD be equal to the actual `service.name` resource
- // attribute of the remote service if any.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'AuthTokenCache'
- PeerServiceKey = attribute.Key("peer.service")
-)
-
-// These attributes may be used for any operation with an authenticated and/or authorized enduser.
-const (
- // Username or client_id extracted from the access token or
- // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the
- // inbound request from outside the system.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'username'
- EnduserIDKey = attribute.Key("enduser.id")
- // Actual/assumed role the client is making the request under extracted from token
- // or application security context.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'admin'
- EnduserRoleKey = attribute.Key("enduser.role")
- // Scopes or granted authorities the client currently possesses extracted from
- // token or application security context. The value would come from the scope
- // associated with an [OAuth 2.0 Access
- // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value
- // in a [SAML 2.0 Assertion](http://docs.oasis-
- // open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'read:message, write:files'
- EnduserScopeKey = attribute.Key("enduser.scope")
-)
-
-// These attributes may be used for any operation to store information about a thread that started a span.
-const (
- // Current "managed" thread ID (as opposed to OS thread ID).
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 42
- ThreadIDKey = attribute.Key("thread.id")
- // Current thread name.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'main'
- ThreadNameKey = attribute.Key("thread.name")
-)
-
-// These attributes allow to report this unit of code and therefore to provide more context about the span.
-const (
- // The method or function name, or equivalent (usually rightmost part of the code
- // unit's name).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'serveRequest'
- CodeFunctionKey = attribute.Key("code.function")
- // The "namespace" within which `code.function` is defined. Usually the qualified
- // class or module name, such that `code.namespace` + some separator +
- // `code.function` form a unique identifier for the code unit.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'com.example.MyHTTPService'
- CodeNamespaceKey = attribute.Key("code.namespace")
- // The source code file name that identifies the code unit as uniquely as possible
- // (preferably an absolute file path).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '/usr/local/MyApplication/content_root/app/index.php'
- CodeFilepathKey = attribute.Key("code.filepath")
- // The line number in `code.filepath` best representing the operation. It SHOULD
- // point within the code unit named in `code.function`.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 42
- CodeLineNumberKey = attribute.Key("code.lineno")
-)
-
-// This document defines semantic conventions for HTTP client and server Spans.
-const (
- // HTTP request method.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'GET', 'POST', 'HEAD'
- HTTPMethodKey = attribute.Key("http.method")
- // Full HTTP request URL in the form `scheme://host[:port]/path?query[#fragment]`.
- // Usually the fragment is not transmitted over HTTP, but if it is known, it
- // should be included nevertheless.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv'
- // Note: `http.url` MUST NOT contain credentials passed via URL in form of
- // `https://username:password@www.example.com/`. In such case the attribute's
- // value should be `https://www.example.com/`.
- HTTPURLKey = attribute.Key("http.url")
- // The full request target as passed in a HTTP request line or equivalent.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '/path/12314/?q=ddds#123'
- HTTPTargetKey = attribute.Key("http.target")
- // The value of the [HTTP host
- // header](https://tools.ietf.org/html/rfc7230#section-5.4). An empty Host header
- // should also be reported, see note.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'www.example.org'
- // Note: When the header is present but empty the attribute SHOULD be set to the
- // empty string. Note that this is a valid situation that is expected in certain
- // cases, according the aforementioned [section of RFC
- // 7230](https://tools.ietf.org/html/rfc7230#section-5.4). When the header is not
- // set the attribute MUST NOT be set.
- HTTPHostKey = attribute.Key("http.host")
- // The URI scheme identifying the used protocol.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'http', 'https'
- HTTPSchemeKey = attribute.Key("http.scheme")
- // [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6).
- //
- // Type: int
- // Required: If and only if one was received/sent.
- // Stability: stable
- // Examples: 200
- HTTPStatusCodeKey = attribute.Key("http.status_code")
- // Kind of HTTP protocol used.
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- // Note: If `net.transport` is not specified, it can be assumed to be `IP.TCP`
- // except if `http.flavor` is `QUIC`, in which case `IP.UDP` is assumed.
- HTTPFlavorKey = attribute.Key("http.flavor")
- // Value of the [HTTP User-
- // Agent](https://tools.ietf.org/html/rfc7231#section-5.5.3) header sent by the
- // client.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'CERN-LineMode/2.15 libwww/2.17b3'
- HTTPUserAgentKey = attribute.Key("http.user_agent")
- // The size of the request payload body in bytes. This is the number of bytes
- // transferred excluding headers and is often, but not always, present as the
- // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For
- // requests using transport encoding, this should be the compressed size.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 3495
- HTTPRequestContentLengthKey = attribute.Key("http.request_content_length")
- // The size of the uncompressed request payload body after transport decoding. Not
- // set if transport encoding not used.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 5493
- HTTPRequestContentLengthUncompressedKey = attribute.Key("http.request_content_length_uncompressed")
- // The size of the response payload body in bytes. This is the number of bytes
- // transferred excluding headers and is often, but not always, present as the
- // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For
- // requests using transport encoding, this should be the compressed size.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 3495
- HTTPResponseContentLengthKey = attribute.Key("http.response_content_length")
- // The size of the uncompressed response payload body after transport decoding.
- // Not set if transport encoding not used.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 5493
- HTTPResponseContentLengthUncompressedKey = attribute.Key("http.response_content_length_uncompressed")
- // The ordinal number of request re-sending attempt.
- //
- // Type: int
- // Required: If and only if a request was retried.
- // Stability: stable
- // Examples: 3
- HTTPRetryCountKey = attribute.Key("http.retry_count")
-)
-
-var (
- // HTTP 1.0
- HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0")
- // HTTP 1.1
- HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1")
- // HTTP 2
- HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0")
- // SPDY protocol
- HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY")
- // QUIC protocol
- HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC")
-)
-
-// Semantic Convention for HTTP Server
-const (
- // The primary server name of the matched virtual host. This should be obtained
- // via configuration. If no such configuration can be obtained, this attribute
- // MUST NOT be set ( `net.host.name` should be used instead).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'example.com'
- // Note: `http.url` is usually not readily available on the server side but would
- // have to be assembled in a cumbersome and sometimes lossy process from other
- // information (see e.g. open-telemetry/opentelemetry-python/pull/148). It is thus
- // preferred to supply the raw data that is available.
- HTTPServerNameKey = attribute.Key("http.server_name")
- // The matched route (path template).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '/users/:userID?'
- HTTPRouteKey = attribute.Key("http.route")
- // The IP address of the original client behind all proxies, if known (e.g. from
- // [X-Forwarded-For](https://developer.mozilla.org/en-
- // US/docs/Web/HTTP/Headers/X-Forwarded-For)).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '83.164.160.102'
- // Note: This is not necessarily the same as `net.peer.ip`, which would
- // identify the network-level peer, which may be a proxy.
-
- // This attribute should be set when a source of information different
- // from the one used for `net.peer.ip`, is available even if that other
- // source just confirms the same value as `net.peer.ip`.
- // Rationale: For `net.peer.ip`, one typically does not know if it
- // comes from a proxy, reverse proxy, or the actual client. Setting
- // `http.client_ip` when it's the same as `net.peer.ip` means that
- // one is at least somewhat confident that the address is not that of
- // the closest proxy.
- HTTPClientIPKey = attribute.Key("http.client_ip")
-)
-
-// Attributes that exist for multiple DynamoDB request types.
-const (
- // The keys in the `RequestItems` object field.
- //
- // Type: string[]
- // Required: No
- // Stability: stable
- // Examples: 'Users', 'Cats'
- AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
- // The JSON-serialized value of each item in the `ConsumedCapacity` response
- // field.
- //
- // Type: string[]
- // Required: No
- // Stability: stable
- // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : {
- // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits":
- // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number,
- // "ReadCapacityUnits": number, "WriteCapacityUnits": number } },
- // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number,
- // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName":
- // "string", "WriteCapacityUnits": number }'
- AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
- // The JSON-serialized value of the `ItemCollectionMetrics` response field.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob,
- // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" :
- // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S":
- // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }'
- AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
- // The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter.
- //
- // Type: double
- // Required: No
- // Stability: stable
- // Examples: 1.0, 2.0
- AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
- // The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter.
- //
- // Type: double
- // Required: No
- // Stability: stable
- // Examples: 1.0, 2.0
- AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
- // The value of the `ConsistentRead` request parameter.
- //
- // Type: boolean
- // Required: No
- // Stability: stable
- AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
- // The value of the `ProjectionExpression` request parameter.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems,
- // ProductReviews'
- AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
- // The value of the `Limit` request parameter.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 10
- AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
- // The value of the `AttributesToGet` request parameter.
- //
- // Type: string[]
- // Required: No
- // Stability: stable
- // Examples: 'lives', 'id'
- AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
- // The value of the `IndexName` request parameter.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'name_to_group'
- AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
- // The value of the `Select` request parameter.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'ALL_ATTRIBUTES', 'COUNT'
- AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
-)
-
-// DynamoDB.CreateTable
-const (
- // The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request
- // field
- //
- // Type: string[]
- // Required: No
- // Stability: stable
- // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string",
- // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ],
- // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits":
- // number, "WriteCapacityUnits": number } }'
- AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
- // The JSON-serialized value of each item of the `LocalSecondaryIndexes` request
- // field.
- //
- // Type: string[]
- // Required: No
- // Stability: stable
- // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes":
- // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string",
- // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ],
- // "ProjectionType": "string" } }'
- AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
-)
-
-// DynamoDB.ListTables
-const (
- // The value of the `ExclusiveStartTableName` request parameter.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'Users', 'CatsTable'
- AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
- // The number of items in the `TableNames` response parameter.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 20
- AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
-)
-
-// DynamoDB.Query
-const (
- // The value of the `ScanIndexForward` request parameter.
- //
- // Type: boolean
- // Required: No
- // Stability: stable
- AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
-)
-
-// DynamoDB.Scan
-const (
- // The value of the `Segment` request parameter.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 10
- AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
- // The value of the `TotalSegments` request parameter.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 100
- AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
- // The value of the `Count` response parameter.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 10
- AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
- // The value of the `ScannedCount` response parameter.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 50
- AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
-)
-
-// DynamoDB.UpdateTable
-const (
- // The JSON-serialized value of each item in the `AttributeDefinitions` request
- // field.
- //
- // Type: string[]
- // Required: No
- // Stability: stable
- // Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
- AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
- // The JSON-serialized value of each item in the `GlobalSecondaryIndexUpdates`
- // request field.
- //
- // Type: string[]
- // Required: No
- // Stability: stable
- // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
- // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
- // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
- // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits":
- // number } }'
- AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
-)
-
-// This document defines the attributes used in messaging systems.
-const (
- // A string identifying the messaging system.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS'
- MessagingSystemKey = attribute.Key("messaging.system")
- // The message destination name. This might be equal to the span name but is
- // required nevertheless.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'MyQueue', 'MyTopic'
- MessagingDestinationKey = attribute.Key("messaging.destination")
- // The kind of message destination
- //
- // Type: Enum
- // Required: Required only if the message destination is either a `queue` or
- // `topic`.
- // Stability: stable
- MessagingDestinationKindKey = attribute.Key("messaging.destination_kind")
- // A boolean that is true if the message destination is temporary.
- //
- // Type: boolean
- // Required: If missing, it is assumed to be false.
- // Stability: stable
- MessagingTempDestinationKey = attribute.Key("messaging.temp_destination")
- // The name of the transport protocol.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'AMQP', 'MQTT'
- MessagingProtocolKey = attribute.Key("messaging.protocol")
- // The version of the transport protocol.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '0.9.1'
- MessagingProtocolVersionKey = attribute.Key("messaging.protocol_version")
- // Connection string.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'tibjmsnaming://localhost:7222',
- // 'https://queue.amazonaws.com/80398EXAMPLE/MyQueue'
- MessagingURLKey = attribute.Key("messaging.url")
- // A value used by the messaging system as an identifier for the message,
- // represented as a string.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '452a7c7c7c7048c2f887f61572b18fc2'
- MessagingMessageIDKey = attribute.Key("messaging.message_id")
- // The [conversation ID](#conversations) identifying the conversation to which the
- // message belongs, represented as a string. Sometimes called "Correlation ID".
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'MyConversationID'
- MessagingConversationIDKey = attribute.Key("messaging.conversation_id")
- // The (uncompressed) size of the message payload in bytes. Also use this
- // attribute if it is unknown whether the compressed or uncompressed payload size
- // is reported.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 2738
- MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message_payload_size_bytes")
- // The compressed size of the message payload in bytes.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 2048
- MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message_payload_compressed_size_bytes")
-)
-
-var (
- // A message sent to a queue
- MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue")
- // A message sent to a topic
- MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic")
-)
-
-// Semantic convention for a consumer of messages received from a messaging system
-const (
- // A string identifying the kind of message consumption as defined in the
- // [Operation names](#operation-names) section above. If the operation is "send",
- // this attribute MUST NOT be set, since the operation can be inferred from the
- // span kind in that case.
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- MessagingOperationKey = attribute.Key("messaging.operation")
- // The identifier for the consumer receiving a message. For Kafka, set it to
- // `{messaging.kafka.consumer_group} - {messaging.kafka.client_id}`, if both are
- // present, or only `messaging.kafka.consumer_group`. For brokers, such as
- // RabbitMQ and Artemis, set it to the `client_id` of the client consuming the
- // message.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'mygroup - client-6'
- MessagingConsumerIDKey = attribute.Key("messaging.consumer_id")
-)
-
-var (
- // receive
- MessagingOperationReceive = MessagingOperationKey.String("receive")
- // process
- MessagingOperationProcess = MessagingOperationKey.String("process")
-)
-
-// Attributes for RabbitMQ
-const (
- // RabbitMQ message routing key.
- //
- // Type: string
- // Required: Unless it is empty.
- // Stability: stable
- // Examples: 'myKey'
- MessagingRabbitmqRoutingKeyKey = attribute.Key("messaging.rabbitmq.routing_key")
-)
-
-// Attributes for Apache Kafka
-const (
- // Message keys in Kafka are used for grouping alike messages to ensure they're
- // processed on the same partition. They differ from `messaging.message_id` in
- // that they're not unique. If the key is `null`, the attribute MUST NOT be set.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'myKey'
- // Note: If the key type is not string, it's string representation has to be
- // supplied for the attribute. If the key has no unambiguous, canonical string
- // form, don't include its value.
- MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message_key")
- // Name of the Kafka Consumer Group that is handling the message. Only applies to
- // consumers, not producers.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'my-group'
- MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer_group")
- // Client ID for the Consumer or Producer that is handling the message.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'client-5'
- MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id")
- // Partition the message is sent to.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 2
- MessagingKafkaPartitionKey = attribute.Key("messaging.kafka.partition")
- // A boolean that is true if the message is a tombstone.
- //
- // Type: boolean
- // Required: If missing, it is assumed to be false.
- // Stability: stable
- MessagingKafkaTombstoneKey = attribute.Key("messaging.kafka.tombstone")
-)
-
-// Attributes for Apache RocketMQ
-const (
- // Namespace of RocketMQ resources, resources in different namespaces are
- // individual.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'myNamespace'
- MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
- // Name of the RocketMQ producer/consumer group that is handling the message. The
- // client type is identified by the SpanKind.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'myConsumerGroup'
- MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
- // The unique identifier for each client.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'myhost@8742@s8083jm'
- MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id")
- // Type of message.
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message_type")
- // The secondary classifier of message besides topic.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'tagA'
- MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message_tag")
- // Key(s) of message, another way to mark message besides message id.
- //
- // Type: string[]
- // Required: No
- // Stability: stable
- // Examples: 'keyA', 'keyB'
- MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message_keys")
- // Model of message consumption. This only applies to consumer spans.
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
-)
-
-var (
- // Normal message
- MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
- // FIFO message
- MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
- // Delay message
- MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
- // Transaction message
- MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
-)
-
-var (
- // Clustering consumption model
- MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
- // Broadcasting consumption model
- MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
-)
-
-// This document defines semantic conventions for remote procedure calls.
-const (
- // A string identifying the remoting system. See below for a list of well-known
- // identifiers.
- //
- // Type: Enum
- // Required: Always
- // Stability: stable
- RPCSystemKey = attribute.Key("rpc.system")
- // The full (logical) name of the service being called, including its package
- // name, if applicable.
- //
- // Type: string
- // Required: No, but recommended
- // Stability: stable
- // Examples: 'myservice.EchoService'
- // Note: This is the logical name of the service from the RPC interface
- // perspective, which can be different from the name of any implementing class.
- // The `code.namespace` attribute may be used to store the latter (despite the
- // attribute name, it may include a class name; e.g., class with method actually
- // executing the call on the server side, RPC client stub class on the client
- // side).
- RPCServiceKey = attribute.Key("rpc.service")
- // The name of the (logical) method being called, must be equal to the $method
- // part in the span name.
- //
- // Type: string
- // Required: No, but recommended
- // Stability: stable
- // Examples: 'exampleMethod'
- // Note: This is the logical name of the method from the RPC interface
- // perspective, which can be different from the name of any implementing
- // method/function. The `code.function` attribute may be used to store the latter
- // (e.g., method actually executing the call on the server side, RPC client stub
- // method on the client side).
- RPCMethodKey = attribute.Key("rpc.method")
-)
-
-var (
- // gRPC
- RPCSystemGRPC = RPCSystemKey.String("grpc")
- // Java RMI
- RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
- // .NET WCF
- RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
- // Apache Dubbo
- RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
-)
-
-// Tech-specific attributes for gRPC.
-const (
- // The [numeric status
- // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC
- // request.
- //
- // Type: Enum
- // Required: Always
- // Stability: stable
- RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
-)
-
-var (
- // OK
- RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
- // CANCELLED
- RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
- // UNKNOWN
- RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
- // INVALID_ARGUMENT
- RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
- // DEADLINE_EXCEEDED
- RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
- // NOT_FOUND
- RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
- // ALREADY_EXISTS
- RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
- // PERMISSION_DENIED
- RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
- // RESOURCE_EXHAUSTED
- RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
- // FAILED_PRECONDITION
- RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
- // ABORTED
- RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
- // OUT_OF_RANGE
- RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
- // UNIMPLEMENTED
- RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
- // INTERNAL
- RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
- // UNAVAILABLE
- RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
- // DATA_LOSS
- RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
- // UNAUTHENTICATED
- RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
-)
-
-// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/).
-const (
- // Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC
- // 1.0 does not specify this, the value can be omitted.
- //
- // Type: string
- // Required: If missing, it is assumed to be "1.0".
- // Stability: stable
- // Examples: '2.0', '1.0'
- RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
- // `id` property of request or response. Since protocol allows id to be int,
- // string, `null` or missing (for notifications), value is expected to be cast to
- // string for simplicity. Use empty string in case of `null` value. Omit entirely
- // if this is a notification.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '10', 'request-7', ''
- RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
- // `error.code` property of response if it is an error response.
- //
- // Type: int
- // Required: If missing, response is assumed to be successful.
- // Stability: stable
- // Examples: -32700, 100
- RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
- // `error.message` property of response if it is an error response.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'Parse error', 'User already exists'
- RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
-)
-
-// RPC received/sent message.
-const (
- // Whether this is a received or sent message.
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- MessageTypeKey = attribute.Key("message.type")
- // MUST be calculated as two different counters starting from `1` one for sent
- // messages and one for received message.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Note: This way we guarantee that the values will be consistent between
- // different implementations.
- MessageIDKey = attribute.Key("message.id")
- // Compressed size of the message in bytes.
- //
- // Type: int
- // Required: No
- // Stability: stable
- MessageCompressedSizeKey = attribute.Key("message.compressed_size")
- // Uncompressed size of the message in bytes.
- //
- // Type: int
- // Required: No
- // Stability: stable
- MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
-)
-
-var (
- // sent
- MessageTypeSent = MessageTypeKey.String("SENT")
- // received
- MessageTypeReceived = MessageTypeKey.String("RECEIVED")
-)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/README.md
deleted file mode 100644
index 6a273180f..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Semconv v1.12.0
-
-[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.12.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.12.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/doc.go
deleted file mode 100644
index fc255ef05..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/doc.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Package semconv implements OpenTelemetry semantic conventions.
-//
-// OpenTelemetry semantic conventions are agreed standardized naming
-// patterns for OpenTelemetry things. This package represents the conventions
-// as of the v1.12.0 version of the OpenTelemetry specification.
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/exception.go
deleted file mode 100644
index f0e12957e..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/exception.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0"
-
-const (
- // ExceptionEventName is the name of the Span event representing an exception.
- ExceptionEventName = "exception"
-)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/http.go
deleted file mode 100644
index 4e19ca342..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/http.go
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0"
-
-import (
- "net/http"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/codes"
- "go.opentelemetry.io/otel/semconv/internal"
- "go.opentelemetry.io/otel/trace"
-)
-
-// HTTP scheme attributes.
-var (
- HTTPSchemeHTTP = HTTPSchemeKey.String("http")
- HTTPSchemeHTTPS = HTTPSchemeKey.String("https")
-)
-
-var sc = &internal.SemanticConventions{
- EnduserIDKey: EnduserIDKey,
- HTTPClientIPKey: HTTPClientIPKey,
- HTTPFlavorKey: HTTPFlavorKey,
- HTTPHostKey: HTTPHostKey,
- HTTPMethodKey: HTTPMethodKey,
- HTTPRequestContentLengthKey: HTTPRequestContentLengthKey,
- HTTPRouteKey: HTTPRouteKey,
- HTTPSchemeHTTP: HTTPSchemeHTTP,
- HTTPSchemeHTTPS: HTTPSchemeHTTPS,
- HTTPServerNameKey: HTTPServerNameKey,
- HTTPStatusCodeKey: HTTPStatusCodeKey,
- HTTPTargetKey: HTTPTargetKey,
- HTTPURLKey: HTTPURLKey,
- HTTPUserAgentKey: HTTPUserAgentKey,
- NetHostIPKey: NetHostIPKey,
- NetHostNameKey: NetHostNameKey,
- NetHostPortKey: NetHostPortKey,
- NetPeerIPKey: NetPeerIPKey,
- NetPeerNameKey: NetPeerNameKey,
- NetPeerPortKey: NetPeerPortKey,
- NetTransportIP: NetTransportIP,
- NetTransportOther: NetTransportOther,
- NetTransportTCP: NetTransportTCP,
- NetTransportUDP: NetTransportUDP,
- NetTransportUnix: NetTransportUnix,
-}
-
-// NetAttributesFromHTTPRequest generates attributes of the net
-// namespace as specified by the OpenTelemetry specification for a
-// span. The network parameter is a string that net.Dial function
-// from standard library can understand.
-func NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue {
- return sc.NetAttributesFromHTTPRequest(network, request)
-}
-
-// EndUserAttributesFromHTTPRequest generates attributes of the
-// enduser namespace as specified by the OpenTelemetry specification
-// for a span.
-func EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue {
- return sc.EndUserAttributesFromHTTPRequest(request)
-}
-
-// HTTPClientAttributesFromHTTPRequest generates attributes of the
-// http namespace as specified by the OpenTelemetry specification for
-// a span on the client side.
-func HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue {
- return sc.HTTPClientAttributesFromHTTPRequest(request)
-}
-
-// HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes
-// to be used with server-side HTTP metrics.
-func HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue {
- return sc.HTTPServerMetricAttributesFromHTTPRequest(serverName, request)
-}
-
-// HTTPServerAttributesFromHTTPRequest generates attributes of the
-// http namespace as specified by the OpenTelemetry specification for
-// a span on the server side. Currently, only basic authentication is
-// supported.
-func HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue {
- return sc.HTTPServerAttributesFromHTTPRequest(serverName, route, request)
-}
-
-// HTTPAttributesFromHTTPStatusCode generates attributes of the http
-// namespace as specified by the OpenTelemetry specification for a
-// span.
-func HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue {
- return sc.HTTPAttributesFromHTTPStatusCode(code)
-}
-
-// SpanStatusFromHTTPStatusCode generates a status code and a message
-// as specified by the OpenTelemetry specification for a span.
-func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) {
- return internal.SpanStatusFromHTTPStatusCode(code)
-}
-
-// SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message
-// as specified by the OpenTelemetry specification for a span.
-// Exclude 4xx for SERVER to set the appropriate status.
-func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) {
- return internal.SpanStatusFromHTTPStatusCodeAndSpanKind(code, spanKind)
-}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/resource.go
deleted file mode 100644
index 45951685a..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/resource.go
+++ /dev/null
@@ -1,1031 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// The web browser in which the application represented by the resource is running. The `browser.*` attributes MUST be used only for resources that represent applications running in a web browser (regardless of whether running on a mobile or desktop device).
-const (
- // Array of brand name and version separated by a space
- //
- // Type: string[]
- // Required: No
- // Stability: stable
- // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
- // Note: This value is intended to be taken from the [UA client hints
- // API](https://wicg.github.io/ua-client-hints/#interface)
- // (navigator.userAgentData.brands).
- BrowserBrandsKey = attribute.Key("browser.brands")
- // The platform on which the browser is running
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'Windows', 'macOS', 'Android'
- // Note: This value is intended to be taken from the [UA client hints
- // API](https://wicg.github.io/ua-client-hints/#interface)
- // (navigator.userAgentData.platform). If unavailable, the legacy
- // `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD
- // be left unset in order for the values to be consistent.
- // The list of possible values is defined in the [W3C User-Agent Client Hints
- // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
- // Note that some (but not all) of these values can overlap with values in the
- // [os.type and os.name attributes](./os.md). However, for consistency, the values
- // in the `browser.platform` attribute should capture the exact value that the
- // user agent provides.
- BrowserPlatformKey = attribute.Key("browser.platform")
- // Full user-agent string provided by the browser
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36
- // (KHTML, '
- // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36'
- // Note: The user-agent value SHOULD be provided only from browsers that do not
- // have a mechanism to retrieve brands and platform individually from the User-
- // Agent Client Hints API. To retrieve the value, the legacy `navigator.userAgent`
- // API can be used.
- BrowserUserAgentKey = attribute.Key("browser.user_agent")
-)
-
-// A cloud environment (e.g. GCP, Azure, AWS)
-const (
- // Name of the cloud provider.
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- CloudProviderKey = attribute.Key("cloud.provider")
- // The cloud account ID the resource is assigned to.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '111111111111', 'opentelemetry'
- CloudAccountIDKey = attribute.Key("cloud.account.id")
- // The geographical region the resource is running.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'us-central1', 'us-east-1'
- // Note: Refer to your provider's docs to see the available regions, for example
- // [Alibaba Cloud regions](https://www.alibabacloud.com/help/doc-
- // detail/40654.htm), [AWS regions](https://aws.amazon.com/about-aws/global-
- // infrastructure/regions_az/), [Azure regions](https://azure.microsoft.com/en-
- // us/global-infrastructure/geographies/), [Google Cloud
- // regions](https://cloud.google.com/about/locations), or [Tencent Cloud
- // regions](https://intl.cloud.tencent.com/document/product/213/6091).
- CloudRegionKey = attribute.Key("cloud.region")
- // Cloud regions often have multiple, isolated locations known as zones to
- // increase availability. Availability zone represents the zone where the resource
- // is running.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'us-east-1c'
- // Note: Availability zones are called "zones" on Alibaba Cloud and Google Cloud.
- CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
- // The cloud platform in use.
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- // Note: The prefix of the service SHOULD match the one specified in
- // `cloud.provider`.
- CloudPlatformKey = attribute.Key("cloud.platform")
-)
-
-var (
- // Alibaba Cloud
- CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
- // Amazon Web Services
- CloudProviderAWS = CloudProviderKey.String("aws")
- // Microsoft Azure
- CloudProviderAzure = CloudProviderKey.String("azure")
- // Google Cloud Platform
- CloudProviderGCP = CloudProviderKey.String("gcp")
- // Tencent Cloud
- CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
-)
-
-var (
- // Alibaba Cloud Elastic Compute Service
- CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
- // Alibaba Cloud Function Compute
- CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
- // AWS Elastic Compute Cloud
- CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
- // AWS Elastic Container Service
- CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
- // AWS Elastic Kubernetes Service
- CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
- // AWS Lambda
- CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
- // AWS Elastic Beanstalk
- CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
- // AWS App Runner
- CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
- // Azure Virtual Machines
- CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
- // Azure Container Instances
- CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
- // Azure Kubernetes Service
- CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
- // Azure Functions
- CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
- // Azure App Service
- CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
- // Google Cloud Compute Engine (GCE)
- CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
- // Google Cloud Run
- CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
- // Google Cloud Kubernetes Engine (GKE)
- CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
- // Google Cloud Functions (GCF)
- CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
- // Google Cloud App Engine (GAE)
- CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
- // Tencent Cloud Cloud Virtual Machine (CVM)
- CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
- // Tencent Cloud Elastic Kubernetes Service (EKS)
- CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
- // Tencent Cloud Serverless Cloud Function (SCF)
- CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
-)
-
-// Resources used by AWS Elastic Container Service (ECS).
-const (
- // The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws.
- // amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'arn:aws:ecs:us-
- // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
- AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
- // The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/develo
- // perguide/clusters.html).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
- AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
- // The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/l
- // aunch_types.html) for an ECS task.
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
- // The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/lates
- // t/developerguide/task_definitions.html).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'arn:aws:ecs:us-
- // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b'
- AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
- // The task definition family this task definition is a member of.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'opentelemetry-family'
- AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
- // The revision for this task definition.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '8', '26'
- AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
-)
-
-var (
- // ec2
- AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
- // fargate
- AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
-)
-
-// Resources used by AWS Elastic Kubernetes Service (EKS).
-const (
- // The ARN of an EKS cluster.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
- AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
-)
-
-// Resources specific to Amazon Web Services.
-const (
- // The name(s) of the AWS log group(s) an application is writing to.
- //
- // Type: string[]
- // Required: No
- // Stability: stable
- // Examples: '/aws/lambda/my-function', 'opentelemetry-service'
- // Note: Multiple log groups must be supported for cases like multi-container
- // applications, where a single application has sidecar containers, and each write
- // to their own log group.
- AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
- // The Amazon Resource Name(s) (ARN) of the AWS log group(s).
- //
- // Type: string[]
- // Required: No
- // Stability: stable
- // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
- // Note: See the [log group ARN format
- // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-
- // access-control-overview-cwl.html#CWL_ARN_Format).
- AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
- // The name(s) of the AWS log stream(s) an application is writing to.
- //
- // Type: string[]
- // Required: No
- // Stability: stable
- // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
- AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
- // The ARN(s) of the AWS log stream(s).
- //
- // Type: string[]
- // Required: No
- // Stability: stable
- // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-
- // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
- // Note: See the [log stream ARN format
- // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-
- // access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain
- // several log streams, so these ARNs necessarily identify both a log group and a
- // log stream.
- AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
-)
-
-// A container instance.
-const (
- // Container name used by container runtime.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'opentelemetry-autoconf'
- ContainerNameKey = attribute.Key("container.name")
- // Container ID. Usually a UUID, as for example used to [identify Docker
- // containers](https://docs.docker.com/engine/reference/run/#container-
- // identification). The UUID might be abbreviated.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'a3bf90e006b2'
- ContainerIDKey = attribute.Key("container.id")
- // The container runtime managing this container.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'docker', 'containerd', 'rkt'
- ContainerRuntimeKey = attribute.Key("container.runtime")
- // Name of the image the container was built on.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'gcr.io/opentelemetry/operator'
- ContainerImageNameKey = attribute.Key("container.image.name")
- // Container image tag.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '0.1'
- ContainerImageTagKey = attribute.Key("container.image.tag")
-)
-
-// The software deployment.
-const (
- // Name of the [deployment
- // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
- // deployment tier).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'staging', 'production'
- DeploymentEnvironmentKey = attribute.Key("deployment.environment")
-)
-
-// The device on which the process represented by this resource is running.
-const (
- // A unique identifier representing the device
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
- // Note: The device identifier MUST only be defined using the values outlined
- // below. This value is not an advertising identifier and MUST NOT be used as
- // such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor id
- // entifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-iden
- // tifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the
- // Firebase Installation ID or a globally unique UUID which is persisted across
- // sessions in your application. More information can be found
- // [here](https://developer.android.com/training/articles/user-data-ids) on best
- // practices and exact implementation details. Caution should be taken when
- // storing personal data or anything which can identify a user. GDPR and data
- // protection laws may apply, ensure you do your own due diligence.
- DeviceIDKey = attribute.Key("device.id")
- // The model identifier for the device
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'iPhone3,4', 'SM-G920F'
- // Note: It's recommended this value represents a machine readable version of the
- // model identifier rather than the market or consumer-friendly name of the
- // device.
- DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
- // The marketing name for the device model
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
- // Note: It's recommended this value represents a human readable version of the
- // device model rather than a machine readable alternative.
- DeviceModelNameKey = attribute.Key("device.model.name")
- // The name of the device manufacturer
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'Apple', 'Samsung'
- // Note: The Android OS provides this field via
- // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
- // iOS apps SHOULD hardcode the value `Apple`.
- DeviceManufacturerKey = attribute.Key("device.manufacturer")
-)
-
-// A serverless instance.
-const (
- // The name of the single function that this runtime instance executes.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'my-function', 'myazurefunctionapp/some-function-name'
- // Note: This is the name of the function as configured/deployed on the FaaS
- // platform and is usually different from the name of the callback
- // function (which may be stored in the
- // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-
- // general.md#source-code-attributes)
- // span attributes).
-
- // For some cloud providers, the above definition is ambiguous. The following
- // definition of function name MUST be used for this attribute
- // (and consequently the span name) for the listed cloud providers/products:
-
- // * **Azure:** The full name `<FUNCAPP>/<FUNC>`, i.e., function app name
- // followed by a forward slash followed by the function name (this form
- // can also be seen in the resource JSON for the function).
- // This means that a span attribute MUST be used, as an Azure function
- // app can host multiple functions that would usually share
- // a TracerProvider (see also the `faas.id` attribute).
- FaaSNameKey = attribute.Key("faas.name")
- // The unique ID of the single function that this runtime instance executes.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function'
- // Note: On some cloud providers, it may not be possible to determine the full ID
- // at startup,
- // so consider setting `faas.id` as a span attribute instead.
-
- // The exact value to use for `faas.id` depends on the cloud provider:
-
- // * **AWS Lambda:** The function
- // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-
- // namespaces.html).
- // Take care not to use the "invoked ARN" directly but replace any
- // [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-
- // aliases.html)
- // with the resolved function version, as the same runtime instance may be
- // invokable with
- // multiple different aliases.
- // * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full-
- // resource-names)
- // * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en-
- // us/rest/api/resources/resources/get-by-id) of the invoked function,
- // *not* the function app, having the form
- // `/subscriptions/<SUBSCIPTION_GUID>/resourceGroups/<RG>/providers/Microsoft.We
- // b/sites/<FUNCAPP>/functions/<FUNC>`.
- // This means that a span attribute MUST be used, as an Azure function app can
- // host multiple functions that would usually share
- // a TracerProvider.
- FaaSIDKey = attribute.Key("faas.id")
- // The immutable version of the function being executed.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '26', 'pinkfroid-00002'
- // Note: Depending on the cloud provider and platform, use:
-
- // * **AWS Lambda:** The [function
- // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-
- // versions.html)
- // (an integer represented as a decimal string).
- // * **Google Cloud Run:** The
- // [revision](https://cloud.google.com/run/docs/managing/revisions)
- // (i.e., the function name plus the revision suffix).
- // * **Google Cloud Functions:** The value of the
- // [`K_REVISION` environment
- // variable](https://cloud.google.com/functions/docs/env-
- // var#runtime_environment_variables_set_automatically).
- // * **Azure Functions:** Not applicable. Do not set this attribute.
- FaaSVersionKey = attribute.Key("faas.version")
- // The execution environment ID as a string, that will be potentially reused for
- // other invocations to the same function/function version.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
- // Note: * **AWS Lambda:** Use the (full) log stream name.
- FaaSInstanceKey = attribute.Key("faas.instance")
- // The amount of memory available to the serverless function in MiB.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 128
- // Note: It's recommended to set this attribute since e.g. too little memory can
- // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda,
- // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this
- // information.
- FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
-)
-
-// A host is defined as a general computing instance.
-const (
- // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud
- // provider.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'opentelemetry-test'
- HostIDKey = attribute.Key("host.id")
- // Name of the host. On Unix systems, it may contain what the hostname command
- // returns, or the fully qualified hostname, or another name specified by the
- // user.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'opentelemetry-test'
- HostNameKey = attribute.Key("host.name")
- // Type of host. For Cloud, this must be the machine type.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'n1-standard-1'
- HostTypeKey = attribute.Key("host.type")
- // The CPU architecture the host system is running on.
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- HostArchKey = attribute.Key("host.arch")
- // Name of the VM image or OS install the host was instantiated from.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
- HostImageNameKey = attribute.Key("host.image.name")
- // VM image ID. For Cloud, this value is from the provider.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'ami-07b06b442921831e5'
- HostImageIDKey = attribute.Key("host.image.id")
- // The version string of the VM image as defined in [Version
- // Attributes](README.md#version-attributes).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '0.1'
- HostImageVersionKey = attribute.Key("host.image.version")
-)
-
-var (
- // AMD64
- HostArchAMD64 = HostArchKey.String("amd64")
- // ARM32
- HostArchARM32 = HostArchKey.String("arm32")
- // ARM64
- HostArchARM64 = HostArchKey.String("arm64")
- // Itanium
- HostArchIA64 = HostArchKey.String("ia64")
- // 32-bit PowerPC
- HostArchPPC32 = HostArchKey.String("ppc32")
- // 64-bit PowerPC
- HostArchPPC64 = HostArchKey.String("ppc64")
- // IBM z/Architecture
- HostArchS390x = HostArchKey.String("s390x")
- // 32-bit x86
- HostArchX86 = HostArchKey.String("x86")
-)
-
-// A Kubernetes Cluster.
-const (
- // The name of the cluster.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'opentelemetry-cluster'
- K8SClusterNameKey = attribute.Key("k8s.cluster.name")
-)
-
-// A Kubernetes Node object.
-const (
- // The name of the Node.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'node-1'
- K8SNodeNameKey = attribute.Key("k8s.node.name")
- // The UID of the Node.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
- K8SNodeUIDKey = attribute.Key("k8s.node.uid")
-)
-
-// A Kubernetes Namespace.
-const (
- // The name of the namespace that the pod is running in.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'default'
- K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
-)
-
-// A Kubernetes Pod object.
-const (
- // The UID of the Pod.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SPodUIDKey = attribute.Key("k8s.pod.uid")
- // The name of the Pod.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'opentelemetry-pod-autoconf'
- K8SPodNameKey = attribute.Key("k8s.pod.name")
-)
-
-// A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates).
-const (
- // The name of the Container from Pod specification, must be unique within a Pod.
- // Container runtime usually uses different globally unique name
- // (`container.name`).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'redis'
- K8SContainerNameKey = attribute.Key("k8s.container.name")
- // Number of times the container was restarted. This attribute can be used to
- // identify a particular container (running or stopped) within a container spec.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 0, 2
- K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
-)
-
-// A Kubernetes ReplicaSet object.
-const (
- // The UID of the ReplicaSet.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
- // The name of the ReplicaSet.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
-)
-
-// A Kubernetes Deployment object.
-const (
- // The UID of the Deployment.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
- // The name of the Deployment.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
-)
-
-// A Kubernetes StatefulSet object.
-const (
- // The UID of the StatefulSet.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
- // The name of the StatefulSet.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
-)
-
-// A Kubernetes DaemonSet object.
-const (
- // The UID of the DaemonSet.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
- // The name of the DaemonSet.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
-)
-
-// A Kubernetes Job object.
-const (
- // The UID of the Job.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SJobUIDKey = attribute.Key("k8s.job.uid")
- // The name of the Job.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SJobNameKey = attribute.Key("k8s.job.name")
-)
-
-// A Kubernetes CronJob object.
-const (
- // The UID of the CronJob.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
- // The name of the CronJob.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
-)
-
-// The operating system (OS) on which the process represented by this resource is running.
-const (
- // The operating system type.
- //
- // Type: Enum
- // Required: Always
- // Stability: stable
- OSTypeKey = attribute.Key("os.type")
- // Human readable (not intended to be parsed) OS version information, like e.g.
- // reported by `ver` or `lsb_release -a` commands.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS'
- OSDescriptionKey = attribute.Key("os.description")
- // Human readable operating system name.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'iOS', 'Android', 'Ubuntu'
- OSNameKey = attribute.Key("os.name")
- // The version string of the operating system as defined in [Version
- // Attributes](../../resource/semantic_conventions/README.md#version-attributes).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '14.2.1', '18.04.1'
- OSVersionKey = attribute.Key("os.version")
-)
-
-var (
- // Microsoft Windows
- OSTypeWindows = OSTypeKey.String("windows")
- // Linux
- OSTypeLinux = OSTypeKey.String("linux")
- // Apple Darwin
- OSTypeDarwin = OSTypeKey.String("darwin")
- // FreeBSD
- OSTypeFreeBSD = OSTypeKey.String("freebsd")
- // NetBSD
- OSTypeNetBSD = OSTypeKey.String("netbsd")
- // OpenBSD
- OSTypeOpenBSD = OSTypeKey.String("openbsd")
- // DragonFly BSD
- OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
- // HP-UX (Hewlett Packard Unix)
- OSTypeHPUX = OSTypeKey.String("hpux")
- // AIX (Advanced Interactive eXecutive)
- OSTypeAIX = OSTypeKey.String("aix")
- // SunOS, Oracle Solaris
- OSTypeSolaris = OSTypeKey.String("solaris")
- // IBM z/OS
- OSTypeZOS = OSTypeKey.String("z_os")
-)
-
-// An operating system process.
-const (
- // Process identifier (PID).
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 1234
- ProcessPIDKey = attribute.Key("process.pid")
- // The name of the process executable. On Linux based systems, can be set to the
- // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of
- // `GetProcessImageFileNameW`.
- //
- // Type: string
- // Required: See below
- // Stability: stable
- // Examples: 'otelcol'
- ProcessExecutableNameKey = attribute.Key("process.executable.name")
- // The full path to the process executable. On Linux based systems, can be set to
- // the target of `proc/[pid]/exe`. On Windows, can be set to the result of
- // `GetProcessImageFileNameW`.
- //
- // Type: string
- // Required: See below
- // Stability: stable
- // Examples: '/usr/bin/cmd/otelcol'
- ProcessExecutablePathKey = attribute.Key("process.executable.path")
- // The command used to launch the process (i.e. the command name). On Linux based
- // systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows,
- // can be set to the first parameter extracted from `GetCommandLineW`.
- //
- // Type: string
- // Required: See below
- // Stability: stable
- // Examples: 'cmd/otelcol'
- ProcessCommandKey = attribute.Key("process.command")
- // The full command used to launch the process as a single string representing the
- // full command. On Windows, can be set to the result of `GetCommandLineW`. Do not
- // set this if you have to assemble it just for monitoring; use
- // `process.command_args` instead.
- //
- // Type: string
- // Required: See below
- // Stability: stable
- // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
- ProcessCommandLineKey = attribute.Key("process.command_line")
- // All the command arguments (including the command/executable itself) as received
- // by the process. On Linux-based systems (and some other Unixoid systems
- // supporting procfs), can be set according to the list of null-delimited strings
- // extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be
- // the full argv vector passed to `main`.
- //
- // Type: string[]
- // Required: See below
- // Stability: stable
- // Examples: 'cmd/otecol', '--config=config.yaml'
- ProcessCommandArgsKey = attribute.Key("process.command_args")
- // The username of the user that owns the process.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'root'
- ProcessOwnerKey = attribute.Key("process.owner")
-)
-
-// The single (language) runtime instance which is monitored.
-const (
- // The name of the runtime of this process. For compiled native binaries, this
- // SHOULD be the name of the compiler.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'OpenJDK Runtime Environment'
- ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
- // The version of the runtime of this process, as returned by the runtime without
- // modification.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '14.0.2'
- ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
- // An additional description about the runtime of the process, for example a
- // specific vendor customization of the runtime environment.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
- ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
-)
-
-// A service instance.
-const (
- // Logical name of the service.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'shoppingcart'
- // Note: MUST be the same for all instances of horizontally scaled services. If
- // the value was not specified, SDKs MUST fallback to `unknown_service:`
- // concatenated with [`process.executable.name`](process.md#process), e.g.
- // `unknown_service:bash`. If `process.executable.name` is not available, the
- // value MUST be set to `unknown_service`.
- ServiceNameKey = attribute.Key("service.name")
- // A namespace for `service.name`.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'Shop'
- // Note: A string value having a meaning that helps to distinguish a group of
- // services, for example the team name that owns a group of services.
- // `service.name` is expected to be unique within the same namespace. If
- // `service.namespace` is not specified in the Resource then `service.name` is
- // expected to be unique for all services that have no explicit namespace defined
- // (so the empty/unspecified namespace is simply one more valid namespace). Zero-
- // length namespace string is assumed equal to unspecified namespace.
- ServiceNamespaceKey = attribute.Key("service.namespace")
- // The string ID of the service instance.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '627cc493-f310-47de-96bd-71410b7dec09'
- // Note: MUST be unique for each instance of the same
- // `service.namespace,service.name` pair (in other words
- // `service.namespace,service.name,service.instance.id` triplet MUST be globally
- // unique). The ID helps to distinguish instances of the same service that exist
- // at the same time (e.g. instances of a horizontally scaled service). It is
- // preferable for the ID to be persistent and stay the same for the lifetime of
- // the service instance, however it is acceptable that the ID is ephemeral and
- // changes during important lifetime events for the service (e.g. service
- // restarts). If the service has no inherent unique ID that can be used as the
- // value of this attribute it is recommended to generate a random Version 1 or
- // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use
- // Version 5, see RFC 4122 for more recommendations).
- ServiceInstanceIDKey = attribute.Key("service.instance.id")
- // The version string of the service API or implementation.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '2.0.0'
- ServiceVersionKey = attribute.Key("service.version")
-)
-
-// The telemetry SDK used to capture data recorded by the instrumentation libraries.
-const (
- // The name of the telemetry SDK as defined above.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'opentelemetry'
- TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
- // The language of the telemetry SDK.
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
- // The version string of the telemetry SDK.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '1.2.3'
- TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
- // The version string of the auto instrumentation agent, if used.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '1.2.3'
- TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version")
-)
-
-var (
- // cpp
- TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
- // dotnet
- TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
- // erlang
- TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
- // go
- TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
- // java
- TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
- // nodejs
- TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
- // php
- TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
- // python
- TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
- // ruby
- TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
- // webjs
- TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
- // swift
- TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
-)
-
-// Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime.
-const (
- // The name of the web engine.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'WildFly'
- WebEngineNameKey = attribute.Key("webengine.name")
- // The version of the web engine.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '21.0.0'
- WebEngineVersionKey = attribute.Key("webengine.version")
- // Additional description of the web engine (e.g. detailed version and edition
- // information).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final'
- WebEngineDescriptionKey = attribute.Key("webengine.description")
-)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/schema.go
deleted file mode 100644
index f01d515bc..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/schema.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0"
-
-// SchemaURL is the schema URL that matches the version of the semantic conventions
-// that this package defines. Semconv packages starting from v1.4.0 must declare
-// non-empty schema URL in the form https://opentelemetry.io/schemas/<version>
-const SchemaURL = "https://opentelemetry.io/schemas/1.12.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/trace.go
deleted file mode 100644
index 70c25dc21..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/trace.go
+++ /dev/null
@@ -1,1693 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// Span attributes used by AWS Lambda (in addition to general `faas` attributes).
-const (
- // The full invoked ARN as provided on the `Context` passed to the function
- // (`Lambda-Runtime-Invoked-Function-ARN` header on the `/runtime/invocation/next`
- // applicable).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
- // Note: This may be different from `faas.id` if an alias is involved.
- AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
-)
-
-// This document defines attributes for CloudEvents. CloudEvents is a specification on how to define event data in a standard way. These attributes can be attached to spans when performing operations with CloudEvents, regardless of the protocol being used.
-const (
- // The [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec
- // .md#id) uniquely identifies the event.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
- CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
- // The [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.m
- // d#source-1) identifies the context in which an event happened.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'https://github.com/cloudevents', '/cloudevents/spec/pull/123', 'my-
- // service'
- CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
- // The [version of the CloudEvents specification](https://github.com/cloudevents/s
- // pec/blob/v1.0.2/cloudevents/spec.md#specversion) which the event uses.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: '1.0'
- CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
- // The [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/sp
- // ec.md#type) contains a value describing the type of event related to the
- // originating occurrence.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'com.github.pull_request.opened', 'com.example.object.deleted.v2'
- CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
- // The [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.
- // md#subject) of the event in the context of the event producer (identified by
- // source).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'mynewfile.jpg'
- CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
-)
-
-// This document defines semantic conventions for the OpenTracing Shim
-const (
- // Parent-child Reference type
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- // Note: The causal relationship between a child Span and a parent Span.
- OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
-)
-
-var (
- // The parent Span depends on the child Span in some capacity
- OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
- // The parent Span does not depend in any way on the result of the child Span
- OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
-)
-
-// This document defines the attributes used to perform database client calls.
-const (
- // An identifier for the database management system (DBMS) product being used. See
- // below for a list of well-known identifiers.
- //
- // Type: Enum
- // Required: Always
- // Stability: stable
- DBSystemKey = attribute.Key("db.system")
- // The connection string used to connect to the database. It is recommended to
- // remove embedded credentials.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;'
- DBConnectionStringKey = attribute.Key("db.connection_string")
- // Username for accessing the database.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'readonly_user', 'reporting_user'
- DBUserKey = attribute.Key("db.user")
- // The fully-qualified class name of the [Java Database Connectivity
- // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver
- // used to connect.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'org.postgresql.Driver',
- // 'com.microsoft.sqlserver.jdbc.SQLServerDriver'
- DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname")
- // This attribute is used to report the name of the database being accessed. For
- // commands that switch the database, this should be set to the target database
- // (even if the command fails).
- //
- // Type: string
- // Required: Required, if applicable.
- // Stability: stable
- // Examples: 'customers', 'main'
- // Note: In some SQL databases, the database name to be used is called "schema
- // name". In case there are multiple layers that could be considered for database
- // name (e.g. Oracle instance name and schema name), the database name to be used
- // is the more specific layer (e.g. Oracle schema name).
- DBNameKey = attribute.Key("db.name")
- // The database statement being executed.
- //
- // Type: string
- // Required: Required if applicable and not explicitly disabled via
- // instrumentation configuration.
- // Stability: stable
- // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"'
- // Note: The value may be sanitized to exclude sensitive information.
- DBStatementKey = attribute.Key("db.statement")
- // The name of the operation being executed, e.g. the [MongoDB command
- // name](https://docs.mongodb.com/manual/reference/command/#database-operations)
- // such as `findAndModify`, or the SQL keyword.
- //
- // Type: string
- // Required: Required, if `db.statement` is not applicable.
- // Stability: stable
- // Examples: 'findAndModify', 'HMSET', 'SELECT'
- // Note: When setting this to an SQL keyword, it is not recommended to attempt any
- // client-side parsing of `db.statement` just to get this property, but it should
- // be set if the operation name is provided by the library being instrumented. If
- // the SQL statement has an ambiguous operation, or performs more than one
- // operation, this value may be omitted.
- DBOperationKey = attribute.Key("db.operation")
-)
-
-var (
- // Some other SQL database. Fallback only. See notes
- DBSystemOtherSQL = DBSystemKey.String("other_sql")
- // Microsoft SQL Server
- DBSystemMSSQL = DBSystemKey.String("mssql")
- // MySQL
- DBSystemMySQL = DBSystemKey.String("mysql")
- // Oracle Database
- DBSystemOracle = DBSystemKey.String("oracle")
- // IBM DB2
- DBSystemDB2 = DBSystemKey.String("db2")
- // PostgreSQL
- DBSystemPostgreSQL = DBSystemKey.String("postgresql")
- // Amazon Redshift
- DBSystemRedshift = DBSystemKey.String("redshift")
- // Apache Hive
- DBSystemHive = DBSystemKey.String("hive")
- // Cloudscape
- DBSystemCloudscape = DBSystemKey.String("cloudscape")
- // HyperSQL DataBase
- DBSystemHSQLDB = DBSystemKey.String("hsqldb")
- // Progress Database
- DBSystemProgress = DBSystemKey.String("progress")
- // SAP MaxDB
- DBSystemMaxDB = DBSystemKey.String("maxdb")
- // SAP HANA
- DBSystemHanaDB = DBSystemKey.String("hanadb")
- // Ingres
- DBSystemIngres = DBSystemKey.String("ingres")
- // FirstSQL
- DBSystemFirstSQL = DBSystemKey.String("firstsql")
- // EnterpriseDB
- DBSystemEDB = DBSystemKey.String("edb")
- // InterSystems Caché
- DBSystemCache = DBSystemKey.String("cache")
- // Adabas (Adaptable Database System)
- DBSystemAdabas = DBSystemKey.String("adabas")
- // Firebird
- DBSystemFirebird = DBSystemKey.String("firebird")
- // Apache Derby
- DBSystemDerby = DBSystemKey.String("derby")
- // FileMaker
- DBSystemFilemaker = DBSystemKey.String("filemaker")
- // Informix
- DBSystemInformix = DBSystemKey.String("informix")
- // InstantDB
- DBSystemInstantDB = DBSystemKey.String("instantdb")
- // InterBase
- DBSystemInterbase = DBSystemKey.String("interbase")
- // MariaDB
- DBSystemMariaDB = DBSystemKey.String("mariadb")
- // Netezza
- DBSystemNetezza = DBSystemKey.String("netezza")
- // Pervasive PSQL
- DBSystemPervasive = DBSystemKey.String("pervasive")
- // PointBase
- DBSystemPointbase = DBSystemKey.String("pointbase")
- // SQLite
- DBSystemSqlite = DBSystemKey.String("sqlite")
- // Sybase
- DBSystemSybase = DBSystemKey.String("sybase")
- // Teradata
- DBSystemTeradata = DBSystemKey.String("teradata")
- // Vertica
- DBSystemVertica = DBSystemKey.String("vertica")
- // H2
- DBSystemH2 = DBSystemKey.String("h2")
- // ColdFusion IMQ
- DBSystemColdfusion = DBSystemKey.String("coldfusion")
- // Apache Cassandra
- DBSystemCassandra = DBSystemKey.String("cassandra")
- // Apache HBase
- DBSystemHBase = DBSystemKey.String("hbase")
- // MongoDB
- DBSystemMongoDB = DBSystemKey.String("mongodb")
- // Redis
- DBSystemRedis = DBSystemKey.String("redis")
- // Couchbase
- DBSystemCouchbase = DBSystemKey.String("couchbase")
- // CouchDB
- DBSystemCouchDB = DBSystemKey.String("couchdb")
- // Microsoft Azure Cosmos DB
- DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
- // Amazon DynamoDB
- DBSystemDynamoDB = DBSystemKey.String("dynamodb")
- // Neo4j
- DBSystemNeo4j = DBSystemKey.String("neo4j")
- // Apache Geode
- DBSystemGeode = DBSystemKey.String("geode")
- // Elasticsearch
- DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
- // Memcached
- DBSystemMemcached = DBSystemKey.String("memcached")
- // CockroachDB
- DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
-)
-
-// Connection-level attributes for Microsoft SQL Server
-const (
- // The Microsoft SQL Server [instance name](https://docs.microsoft.com/en-
- // us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
- // connecting to. This name is used to determine the port of a named instance.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'MSSQLSERVER'
- // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no longer
- // required (but still recommended if non-standard).
- DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name")
-)
-
-// Call-level attributes for Cassandra
-const (
- // The fetch size used for paging, i.e. how many rows will be returned at once.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 5000
- DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
- // The consistency level of the query. Based on consistency values from
- // [CQL](https://docs.datastax.com/en/cassandra-
- // oss/3.0/cassandra/dml/dmlConfigConsistency.html).
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
- // The name of the primary table that the operation is acting upon, including the
- // keyspace name (if applicable).
- //
- // Type: string
- // Required: Recommended if available.
- // Stability: stable
- // Examples: 'mytable'
- // Note: This mirrors the db.sql.table attribute but references cassandra rather
- // than sql. It is not recommended to attempt any client-side parsing of
- // `db.statement` just to get this property, but it should be set if it is
- // provided by the library being instrumented. If the operation is acting upon an
- // anonymous table, or more than one table, this value MUST NOT be set.
- DBCassandraTableKey = attribute.Key("db.cassandra.table")
- // Whether or not the query is idempotent.
- //
- // Type: boolean
- // Required: No
- // Stability: stable
- DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
- // The number of times a query was speculatively executed. Not set or `0` if the
- // query was not executed speculatively.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 0, 2
- DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
- // The ID of the coordinating node for a query.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
- DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
- // The data center of the coordinating node for a query.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'us-west-2'
- DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
-)
-
-var (
- // all
- DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
- // each_quorum
- DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
- // quorum
- DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
- // local_quorum
- DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
- // one
- DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
- // two
- DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
- // three
- DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
- // local_one
- DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
- // any
- DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
- // serial
- DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
- // local_serial
- DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
-)
-
-// Call-level attributes for Redis
-const (
- // The index of the database being accessed as used in the [`SELECT`
- // command](https://redis.io/commands/select), provided as an integer. To be used
- // instead of the generic `db.name` attribute.
- //
- // Type: int
- // Required: Required, if other than the default database (`0`).
- // Stability: stable
- // Examples: 0, 1, 15
- DBRedisDBIndexKey = attribute.Key("db.redis.database_index")
-)
-
-// Call-level attributes for MongoDB
-const (
- // The collection being accessed within the database stated in `db.name`.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'customers', 'products'
- DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection")
-)
-
-// Call-level attributes for SQL databases
-const (
- // The name of the primary table that the operation is acting upon, including the
- // database name (if applicable).
- //
- // Type: string
- // Required: Recommended if available.
- // Stability: stable
- // Examples: 'public.users', 'customers'
- // Note: It is not recommended to attempt any client-side parsing of
- // `db.statement` just to get this property, but it should be set if it is
- // provided by the library being instrumented. If the operation is acting upon an
- // anonymous table, or more than one table, this value MUST NOT be set.
- DBSQLTableKey = attribute.Key("db.sql.table")
-)
-
-// This document defines the attributes used to report a single exception associated with a span.
-const (
- // The type of the exception (its fully-qualified class name, if applicable). The
- // dynamic type of the exception should be preferred over the static type in
- // languages that support it.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'java.net.ConnectException', 'OSError'
- ExceptionTypeKey = attribute.Key("exception.type")
- // The exception message.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly"
- ExceptionMessageKey = attribute.Key("exception.message")
- // A stacktrace as a string in the natural representation for the language
- // runtime. The representation is to be determined and documented by each language
- // SIG.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
- // exception\\n at '
- // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
- // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
- // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
- ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
- // SHOULD be set to true if the exception event is recorded at a point where it is
- // known that the exception is escaping the scope of the span.
- //
- // Type: boolean
- // Required: No
- // Stability: stable
- // Note: An exception is considered to have escaped (or left) the scope of a span,
- // if that span is ended while the exception is still logically "in flight".
- // This may be actually "in flight" in some languages (e.g. if the exception
- // is passed to a Context manager's `__exit__` method in Python) but will
- // usually be caught at the point of recording the exception in most languages.
-
- // It is usually not possible to determine at the point where an exception is
- // thrown
- // whether it will escape the scope of a span.
- // However, it is trivial to know that an exception
- // will escape, if one checks for an active exception just before ending the span,
- // as done in the [example above](#recording-an-exception).
-
- // It follows that an exception may still escape the scope of the span
- // even if the `exception.escaped` attribute was not set or set to false,
- // since the event might have been recorded at a time where it was not
- // clear whether the exception will escape.
- ExceptionEscapedKey = attribute.Key("exception.escaped")
-)
-
-// This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans.
-const (
- // Type of the trigger which caused this function execution.
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- // Note: For the server/consumer span on the incoming side,
- // `faas.trigger` MUST be set.
-
- // Clients invoking FaaS instances usually cannot set `faas.trigger`,
- // since they would typically need to look in the payload to determine
- // the event type. If clients set it, it should be the same as the
- // trigger that corresponding incoming would have (i.e., this has
- // nothing to do with the underlying transport used to make the API
- // call to invoke the lambda, which is often HTTP).
- FaaSTriggerKey = attribute.Key("faas.trigger")
- // The execution ID of the current function execution.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
- FaaSExecutionKey = attribute.Key("faas.execution")
-)
-
-var (
- // A response to some data source operation such as a database or filesystem read/write
- FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
- // To provide an answer to an inbound HTTP request
- FaaSTriggerHTTP = FaaSTriggerKey.String("http")
- // A function is set to be executed when messages are sent to a messaging system
- FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
- // A function is scheduled to be executed regularly
- FaaSTriggerTimer = FaaSTriggerKey.String("timer")
- // If none of the others apply
- FaaSTriggerOther = FaaSTriggerKey.String("other")
-)
-
-// Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write.
-const (
- // The name of the source on which the triggering operation was performed. For
- // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos
- // DB to the database name.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'myBucketName', 'myDBName'
- FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
- // Describes the type of the operation that was performed on the data.
- //
- // Type: Enum
- // Required: Always
- // Stability: stable
- FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
- // A string containing the time when the data was accessed in the [ISO
- // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed
- // in [UTC](https://www.w3.org/TR/NOTE-datetime).
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: '2020-01-23T13:47:06Z'
- FaaSDocumentTimeKey = attribute.Key("faas.document.time")
- // The document name/table subjected to the operation. For example, in Cloud
- // Storage or S3 is the name of the file, and in Cosmos DB the table name.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'myFile.txt', 'myTableName'
- FaaSDocumentNameKey = attribute.Key("faas.document.name")
-)
-
-var (
- // When a new object is created
- FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
- // When an object is modified
- FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
- // When an object is deleted
- FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
-)
-
-// Semantic Convention for FaaS scheduled to be executed regularly.
-const (
- // A string containing the function invocation time in the [ISO
- // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed
- // in [UTC](https://www.w3.org/TR/NOTE-datetime).
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: '2020-01-23T13:47:06Z'
- FaaSTimeKey = attribute.Key("faas.time")
- // A string containing the schedule period as [Cron Expression](https://docs.oracl
- // e.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '0/5 * * * ? *'
- FaaSCronKey = attribute.Key("faas.cron")
-)
-
-// Contains additional attributes for incoming FaaS spans.
-const (
- // A boolean that is true if the serverless function is executed for the first
- // time (aka cold-start).
- //
- // Type: boolean
- // Required: No
- // Stability: stable
- FaaSColdstartKey = attribute.Key("faas.coldstart")
-)
-
-// Contains additional attributes for outgoing FaaS spans.
-const (
- // The name of the invoked function.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'my-function'
- // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked
- // function.
- FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
- // The cloud provider of the invoked function.
- //
- // Type: Enum
- // Required: Always
- // Stability: stable
- // Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked
- // function.
- FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
- // The cloud region of the invoked function.
- //
- // Type: string
- // Required: For some cloud providers, like AWS or GCP, the region in which a
- // function is hosted is essential to uniquely identify the function and also part
- // of its endpoint. Since it's part of the endpoint being called, the region is
- // always known to clients. In these cases, `faas.invoked_region` MUST be set
- // accordingly. If the region is unknown to the client or not required for
- // identifying the invoked function, setting `faas.invoked_region` is optional.
- // Stability: stable
- // Examples: 'eu-central-1'
- // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked
- // function.
- FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
-)
-
-var (
- // Alibaba Cloud
- FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
- // Amazon Web Services
- FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
- // Microsoft Azure
- FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
- // Google Cloud Platform
- FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
- // Tencent Cloud
- FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
-)
-
-// These attributes may be used for any network related operation.
-const (
- // Transport protocol used. See note below.
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- NetTransportKey = attribute.Key("net.transport")
- // Remote address of the peer (dotted decimal for IPv4 or
- // [RFC5952](https://tools.ietf.org/html/rfc5952) for IPv6)
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '127.0.0.1'
- NetPeerIPKey = attribute.Key("net.peer.ip")
- // Remote port number.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 80, 8080, 443
- NetPeerPortKey = attribute.Key("net.peer.port")
- // Remote hostname or similar, see note below.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'example.com'
- // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an extra
- // DNS lookup.
- NetPeerNameKey = attribute.Key("net.peer.name")
- // Like `net.peer.ip` but for the host IP. Useful in case of a multi-IP host.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '192.168.0.1'
- NetHostIPKey = attribute.Key("net.host.ip")
- // Like `net.peer.port` but for the host port.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 35555
- NetHostPortKey = attribute.Key("net.host.port")
- // Local hostname or similar, see note below.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'localhost'
- NetHostNameKey = attribute.Key("net.host.name")
- // The internet connection type currently being used by the host.
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- // Examples: 'wifi'
- NetHostConnectionTypeKey = attribute.Key("net.host.connection.type")
- // This describes more details regarding the connection.type. It may be the type
- // of cell technology connection, but it could be used for describing details
- // about a wifi connection.
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- // Examples: 'LTE'
- NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype")
- // The name of the mobile carrier.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'sprint'
- NetHostCarrierNameKey = attribute.Key("net.host.carrier.name")
- // The mobile carrier country code.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '310'
- NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc")
- // The mobile carrier network code.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '001'
- NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc")
- // The ISO 3166-1 alpha-2 2-character country code associated with the mobile
- // carrier network.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'DE'
- NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc")
-)
-
-var (
- // ip_tcp
- NetTransportTCP = NetTransportKey.String("ip_tcp")
- // ip_udp
- NetTransportUDP = NetTransportKey.String("ip_udp")
- // Another IP-based protocol
- NetTransportIP = NetTransportKey.String("ip")
- // Unix Domain socket. See below
- NetTransportUnix = NetTransportKey.String("unix")
- // Named or anonymous pipe. See note below
- NetTransportPipe = NetTransportKey.String("pipe")
- // In-process communication
- NetTransportInProc = NetTransportKey.String("inproc")
- // Something else (non IP-based)
- NetTransportOther = NetTransportKey.String("other")
-)
-
-var (
- // wifi
- NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi")
- // wired
- NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired")
- // cell
- NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell")
- // unavailable
- NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable")
- // unknown
- NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown")
-)
-
-var (
- // GPRS
- NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs")
- // EDGE
- NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge")
- // UMTS
- NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts")
- // CDMA
- NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma")
- // EVDO Rel. 0
- NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0")
- // EVDO Rev. A
- NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a")
- // CDMA2000 1XRTT
- NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt")
- // HSDPA
- NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa")
- // HSUPA
- NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa")
- // HSPA
- NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa")
- // IDEN
- NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden")
- // EVDO Rev. B
- NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b")
- // LTE
- NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte")
- // EHRPD
- NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd")
- // HSPAP
- NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap")
- // GSM
- NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm")
- // TD-SCDMA
- NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma")
- // IWLAN
- NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan")
- // 5G NR (New Radio)
- NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr")
- // 5G NRNSA (New Radio Non-Standalone)
- NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa")
- // LTE CA
- NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca")
-)
-
-// Operations that access some remote service.
-const (
- // The [`service.name`](../../resource/semantic_conventions/README.md#service) of
- // the remote service. SHOULD be equal to the actual `service.name` resource
- // attribute of the remote service if any.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'AuthTokenCache'
- PeerServiceKey = attribute.Key("peer.service")
-)
-
-// These attributes may be used for any operation with an authenticated and/or authorized enduser.
-const (
- // Username or client_id extracted from the access token or
- // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the
- // inbound request from outside the system.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'username'
- EnduserIDKey = attribute.Key("enduser.id")
- // Actual/assumed role the client is making the request under extracted from token
- // or application security context.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'admin'
- EnduserRoleKey = attribute.Key("enduser.role")
- // Scopes or granted authorities the client currently possesses extracted from
- // token or application security context. The value would come from the scope
- // associated with an [OAuth 2.0 Access
- // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value
- // in a [SAML 2.0 Assertion](http://docs.oasis-
- // open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'read:message, write:files'
- EnduserScopeKey = attribute.Key("enduser.scope")
-)
-
-// These attributes may be used for any operation to store information about a thread that started a span.
-const (
- // Current "managed" thread ID (as opposed to OS thread ID).
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 42
- ThreadIDKey = attribute.Key("thread.id")
- // Current thread name.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'main'
- ThreadNameKey = attribute.Key("thread.name")
-)
-
-// These attributes allow to report this unit of code and therefore to provide more context about the span.
-const (
- // The method or function name, or equivalent (usually rightmost part of the code
- // unit's name).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'serveRequest'
- CodeFunctionKey = attribute.Key("code.function")
- // The "namespace" within which `code.function` is defined. Usually the qualified
- // class or module name, such that `code.namespace` + some separator +
- // `code.function` form a unique identifier for the code unit.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'com.example.MyHTTPService'
- CodeNamespaceKey = attribute.Key("code.namespace")
- // The source code file name that identifies the code unit as uniquely as possible
- // (preferably an absolute file path).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '/usr/local/MyApplication/content_root/app/index.php'
- CodeFilepathKey = attribute.Key("code.filepath")
- // The line number in `code.filepath` best representing the operation. It SHOULD
- // point within the code unit named in `code.function`.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 42
- CodeLineNumberKey = attribute.Key("code.lineno")
-)
-
-// This document defines semantic conventions for HTTP client and server Spans.
-const (
- // HTTP request method.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'GET', 'POST', 'HEAD'
- HTTPMethodKey = attribute.Key("http.method")
- // Full HTTP request URL in the form `scheme://host[:port]/path?query[#fragment]`.
- // Usually the fragment is not transmitted over HTTP, but if it is known, it
- // should be included nevertheless.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv'
- // Note: `http.url` MUST NOT contain credentials passed via URL in form of
- // `https://username:password@www.example.com/`. In such case the attribute's
- // value should be `https://www.example.com/`.
- HTTPURLKey = attribute.Key("http.url")
- // The full request target as passed in a HTTP request line or equivalent.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '/path/12314/?q=ddds#123'
- HTTPTargetKey = attribute.Key("http.target")
- // The value of the [HTTP host
- // header](https://tools.ietf.org/html/rfc7230#section-5.4). An empty Host header
- // should also be reported, see note.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'www.example.org'
- // Note: When the header is present but empty the attribute SHOULD be set to the
- // empty string. Note that this is a valid situation that is expected in certain
- // cases, according the aforementioned [section of RFC
- // 7230](https://tools.ietf.org/html/rfc7230#section-5.4). When the header is not
- // set the attribute MUST NOT be set.
- HTTPHostKey = attribute.Key("http.host")
- // The URI scheme identifying the used protocol.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'http', 'https'
- HTTPSchemeKey = attribute.Key("http.scheme")
- // [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6).
- //
- // Type: int
- // Required: If and only if one was received/sent.
- // Stability: stable
- // Examples: 200
- HTTPStatusCodeKey = attribute.Key("http.status_code")
- // Kind of HTTP protocol used.
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- // Note: If `net.transport` is not specified, it can be assumed to be `IP.TCP`
- // except if `http.flavor` is `QUIC`, in which case `IP.UDP` is assumed.
- HTTPFlavorKey = attribute.Key("http.flavor")
- // Value of the [HTTP User-
- // Agent](https://tools.ietf.org/html/rfc7231#section-5.5.3) header sent by the
- // client.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'CERN-LineMode/2.15 libwww/2.17b3'
- HTTPUserAgentKey = attribute.Key("http.user_agent")
- // The size of the request payload body in bytes. This is the number of bytes
- // transferred excluding headers and is often, but not always, present as the
- // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For
- // requests using transport encoding, this should be the compressed size.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 3495
- HTTPRequestContentLengthKey = attribute.Key("http.request_content_length")
- // The size of the uncompressed request payload body after transport decoding. Not
- // set if transport encoding not used.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 5493
- HTTPRequestContentLengthUncompressedKey = attribute.Key("http.request_content_length_uncompressed")
- // The size of the response payload body in bytes. This is the number of bytes
- // transferred excluding headers and is often, but not always, present as the
- // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For
- // requests using transport encoding, this should be the compressed size.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 3495
- HTTPResponseContentLengthKey = attribute.Key("http.response_content_length")
- // The size of the uncompressed response payload body after transport decoding.
- // Not set if transport encoding not used.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 5493
- HTTPResponseContentLengthUncompressedKey = attribute.Key("http.response_content_length_uncompressed")
- // The ordinal number of request re-sending attempt.
- //
- // Type: int
- // Required: If and only if a request was retried.
- // Stability: stable
- // Examples: 3
- HTTPRetryCountKey = attribute.Key("http.retry_count")
-)
-
-var (
- // HTTP/1.0
- HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0")
- // HTTP/1.1
- HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1")
- // HTTP/2
- HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0")
- // HTTP/3
- HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0")
- // SPDY protocol
- HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY")
- // QUIC protocol
- HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC")
-)
-
-// Semantic Convention for HTTP Server
-const (
- // The primary server name of the matched virtual host. This should be obtained
- // via configuration. If no such configuration can be obtained, this attribute
- // MUST NOT be set ( `net.host.name` should be used instead).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'example.com'
- // Note: `http.url` is usually not readily available on the server side but would
- // have to be assembled in a cumbersome and sometimes lossy process from other
- // information (see e.g. open-telemetry/opentelemetry-python/pull/148). It is thus
- // preferred to supply the raw data that is available.
- HTTPServerNameKey = attribute.Key("http.server_name")
- // The matched route (path template).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '/users/:userID?'
- HTTPRouteKey = attribute.Key("http.route")
- // The IP address of the original client behind all proxies, if known (e.g. from
- // [X-Forwarded-For](https://developer.mozilla.org/en-
- // US/docs/Web/HTTP/Headers/X-Forwarded-For)).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '83.164.160.102'
- // Note: This is not necessarily the same as `net.peer.ip`, which would
- // identify the network-level peer, which may be a proxy.
-
- // This attribute should be set when a source of information different
- // from the one used for `net.peer.ip`, is available even if that other
- // source just confirms the same value as `net.peer.ip`.
- // Rationale: For `net.peer.ip`, one typically does not know if it
- // comes from a proxy, reverse proxy, or the actual client. Setting
- // `http.client_ip` when it's the same as `net.peer.ip` means that
- // one is at least somewhat confident that the address is not that of
- // the closest proxy.
- HTTPClientIPKey = attribute.Key("http.client_ip")
-)
-
-// Attributes that exist for multiple DynamoDB request types.
-const (
- // The keys in the `RequestItems` object field.
- //
- // Type: string[]
- // Required: No
- // Stability: stable
- // Examples: 'Users', 'Cats'
- AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
- // The JSON-serialized value of each item in the `ConsumedCapacity` response
- // field.
- //
- // Type: string[]
- // Required: No
- // Stability: stable
- // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : {
- // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits":
- // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number,
- // "ReadCapacityUnits": number, "WriteCapacityUnits": number } },
- // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number,
- // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName":
- // "string", "WriteCapacityUnits": number }'
- AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
- // The JSON-serialized value of the `ItemCollectionMetrics` response field.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob,
- // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" :
- // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S":
- // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }'
- AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
- // The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter.
- //
- // Type: double
- // Required: No
- // Stability: stable
- // Examples: 1.0, 2.0
- AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
- // The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter.
- //
- // Type: double
- // Required: No
- // Stability: stable
- // Examples: 1.0, 2.0
- AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
- // The value of the `ConsistentRead` request parameter.
- //
- // Type: boolean
- // Required: No
- // Stability: stable
- AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
- // The value of the `ProjectionExpression` request parameter.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems,
- // ProductReviews'
- AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
- // The value of the `Limit` request parameter.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 10
- AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
- // The value of the `AttributesToGet` request parameter.
- //
- // Type: string[]
- // Required: No
- // Stability: stable
- // Examples: 'lives', 'id'
- AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
- // The value of the `IndexName` request parameter.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'name_to_group'
- AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
- // The value of the `Select` request parameter.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'ALL_ATTRIBUTES', 'COUNT'
- AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
-)
-
-// DynamoDB.CreateTable
-const (
- // The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request
- // field
- //
- // Type: string[]
- // Required: No
- // Stability: stable
- // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string",
- // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ],
- // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits":
- // number, "WriteCapacityUnits": number } }'
- AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
- // The JSON-serialized value of each item of the `LocalSecondaryIndexes` request
- // field.
- //
- // Type: string[]
- // Required: No
- // Stability: stable
- // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes":
- // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string",
- // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ],
- // "ProjectionType": "string" } }'
- AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
-)
-
-// DynamoDB.ListTables
-const (
- // The value of the `ExclusiveStartTableName` request parameter.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'Users', 'CatsTable'
- AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
- // The number of items in the `TableNames` response parameter.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 20
- AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
-)
-
-// DynamoDB.Query
-const (
- // The value of the `ScanIndexForward` request parameter.
- //
- // Type: boolean
- // Required: No
- // Stability: stable
- AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
-)
-
-// DynamoDB.Scan
-const (
- // The value of the `Segment` request parameter.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 10
- AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
- // The value of the `TotalSegments` request parameter.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 100
- AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
- // The value of the `Count` response parameter.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 10
- AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
- // The value of the `ScannedCount` response parameter.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 50
- AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
-)
-
-// DynamoDB.UpdateTable
-const (
- // The JSON-serialized value of each item in the `AttributeDefinitions` request
- // field.
- //
- // Type: string[]
- // Required: No
- // Stability: stable
- // Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
- AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
- // The JSON-serialized value of each item in the `GlobalSecondaryIndexUpdates`
- // request field.
- //
- // Type: string[]
- // Required: No
- // Stability: stable
- // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
- // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
- // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
- // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits":
- // number } }'
- AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
-)
-
-// This document defines the attributes used in messaging systems.
-const (
- // A string identifying the messaging system.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS'
- MessagingSystemKey = attribute.Key("messaging.system")
- // The message destination name. This might be equal to the span name but is
- // required nevertheless.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'MyQueue', 'MyTopic'
- MessagingDestinationKey = attribute.Key("messaging.destination")
- // The kind of message destination
- //
- // Type: Enum
- // Required: Required only if the message destination is either a `queue` or
- // `topic`.
- // Stability: stable
- MessagingDestinationKindKey = attribute.Key("messaging.destination_kind")
- // A boolean that is true if the message destination is temporary.
- //
- // Type: boolean
- // Required: If missing, it is assumed to be false.
- // Stability: stable
- MessagingTempDestinationKey = attribute.Key("messaging.temp_destination")
- // The name of the transport protocol.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'AMQP', 'MQTT'
- MessagingProtocolKey = attribute.Key("messaging.protocol")
- // The version of the transport protocol.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '0.9.1'
- MessagingProtocolVersionKey = attribute.Key("messaging.protocol_version")
- // Connection string.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'tibjmsnaming://localhost:7222',
- // 'https://queue.amazonaws.com/80398EXAMPLE/MyQueue'
- MessagingURLKey = attribute.Key("messaging.url")
- // A value used by the messaging system as an identifier for the message,
- // represented as a string.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '452a7c7c7c7048c2f887f61572b18fc2'
- MessagingMessageIDKey = attribute.Key("messaging.message_id")
- // The [conversation ID](#conversations) identifying the conversation to which the
- // message belongs, represented as a string. Sometimes called "Correlation ID".
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'MyConversationID'
- MessagingConversationIDKey = attribute.Key("messaging.conversation_id")
- // The (uncompressed) size of the message payload in bytes. Also use this
- // attribute if it is unknown whether the compressed or uncompressed payload size
- // is reported.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 2738
- MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message_payload_size_bytes")
- // The compressed size of the message payload in bytes.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 2048
- MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message_payload_compressed_size_bytes")
-)
-
-var (
- // A message sent to a queue
- MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue")
- // A message sent to a topic
- MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic")
-)
-
-// Semantic convention for a consumer of messages received from a messaging system
-const (
- // A string identifying the kind of message consumption as defined in the
- // [Operation names](#operation-names) section above. If the operation is "send",
- // this attribute MUST NOT be set, since the operation can be inferred from the
- // span kind in that case.
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- MessagingOperationKey = attribute.Key("messaging.operation")
- // The identifier for the consumer receiving a message. For Kafka, set it to
- // `{messaging.kafka.consumer_group} - {messaging.kafka.client_id}`, if both are
- // present, or only `messaging.kafka.consumer_group`. For brokers, such as
- // RabbitMQ and Artemis, set it to the `client_id` of the client consuming the
- // message.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'mygroup - client-6'
- MessagingConsumerIDKey = attribute.Key("messaging.consumer_id")
-)
-
-var (
- // receive
- MessagingOperationReceive = MessagingOperationKey.String("receive")
- // process
- MessagingOperationProcess = MessagingOperationKey.String("process")
-)
-
-// Attributes for RabbitMQ
-const (
- // RabbitMQ message routing key.
- //
- // Type: string
- // Required: Unless it is empty.
- // Stability: stable
- // Examples: 'myKey'
- MessagingRabbitmqRoutingKeyKey = attribute.Key("messaging.rabbitmq.routing_key")
-)
-
-// Attributes for Apache Kafka
-const (
- // Message keys in Kafka are used for grouping alike messages to ensure they're
- // processed on the same partition. They differ from `messaging.message_id` in
- // that they're not unique. If the key is `null`, the attribute MUST NOT be set.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'myKey'
- // Note: If the key type is not string, it's string representation has to be
- // supplied for the attribute. If the key has no unambiguous, canonical string
- // form, don't include its value.
- MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message_key")
- // Name of the Kafka Consumer Group that is handling the message. Only applies to
- // consumers, not producers.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'my-group'
- MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer_group")
- // Client ID for the Consumer or Producer that is handling the message.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'client-5'
- MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id")
- // Partition the message is sent to.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 2
- MessagingKafkaPartitionKey = attribute.Key("messaging.kafka.partition")
- // A boolean that is true if the message is a tombstone.
- //
- // Type: boolean
- // Required: If missing, it is assumed to be false.
- // Stability: stable
- MessagingKafkaTombstoneKey = attribute.Key("messaging.kafka.tombstone")
-)
-
-// Attributes for Apache RocketMQ
-const (
- // Namespace of RocketMQ resources, resources in different namespaces are
- // individual.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'myNamespace'
- MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
- // Name of the RocketMQ producer/consumer group that is handling the message. The
- // client type is identified by the SpanKind.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'myConsumerGroup'
- MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
- // The unique identifier for each client.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'myhost@8742@s8083jm'
- MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id")
- // Type of message.
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message_type")
- // The secondary classifier of message besides topic.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'tagA'
- MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message_tag")
- // Key(s) of message, another way to mark message besides message id.
- //
- // Type: string[]
- // Required: No
- // Stability: stable
- // Examples: 'keyA', 'keyB'
- MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message_keys")
- // Model of message consumption. This only applies to consumer spans.
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
-)
-
-var (
- // Normal message
- MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
- // FIFO message
- MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
- // Delay message
- MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
- // Transaction message
- MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
-)
-
-var (
- // Clustering consumption model
- MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
- // Broadcasting consumption model
- MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
-)
-
-// This document defines semantic conventions for remote procedure calls.
-const (
- // A string identifying the remoting system. See below for a list of well-known
- // identifiers.
- //
- // Type: Enum
- // Required: Always
- // Stability: stable
- RPCSystemKey = attribute.Key("rpc.system")
- // The full (logical) name of the service being called, including its package
- // name, if applicable.
- //
- // Type: string
- // Required: No, but recommended
- // Stability: stable
- // Examples: 'myservice.EchoService'
- // Note: This is the logical name of the service from the RPC interface
- // perspective, which can be different from the name of any implementing class.
- // The `code.namespace` attribute may be used to store the latter (despite the
- // attribute name, it may include a class name; e.g., class with method actually
- // executing the call on the server side, RPC client stub class on the client
- // side).
- RPCServiceKey = attribute.Key("rpc.service")
- // The name of the (logical) method being called, must be equal to the $method
- // part in the span name.
- //
- // Type: string
- // Required: No, but recommended
- // Stability: stable
- // Examples: 'exampleMethod'
- // Note: This is the logical name of the method from the RPC interface
- // perspective, which can be different from the name of any implementing
- // method/function. The `code.function` attribute may be used to store the latter
- // (e.g., method actually executing the call on the server side, RPC client stub
- // method on the client side).
- RPCMethodKey = attribute.Key("rpc.method")
-)
-
-var (
- // gRPC
- RPCSystemGRPC = RPCSystemKey.String("grpc")
- // Java RMI
- RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
- // .NET WCF
- RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
- // Apache Dubbo
- RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
-)
-
-// Tech-specific attributes for gRPC.
-const (
- // The [numeric status
- // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC
- // request.
- //
- // Type: Enum
- // Required: Always
- // Stability: stable
- RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
-)
-
-var (
- // OK
- RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
- // CANCELLED
- RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
- // UNKNOWN
- RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
- // INVALID_ARGUMENT
- RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
- // DEADLINE_EXCEEDED
- RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
- // NOT_FOUND
- RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
- // ALREADY_EXISTS
- RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
- // PERMISSION_DENIED
- RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
- // RESOURCE_EXHAUSTED
- RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
- // FAILED_PRECONDITION
- RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
- // ABORTED
- RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
- // OUT_OF_RANGE
- RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
- // UNIMPLEMENTED
- RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
- // INTERNAL
- RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
- // UNAVAILABLE
- RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
- // DATA_LOSS
- RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
- // UNAUTHENTICATED
- RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
-)
-
-// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/).
-const (
- // Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC
- // 1.0 does not specify this, the value can be omitted.
- //
- // Type: string
- // Required: If missing, it is assumed to be "1.0".
- // Stability: stable
- // Examples: '2.0', '1.0'
- RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
- // `id` property of request or response. Since protocol allows id to be int,
- // string, `null` or missing (for notifications), value is expected to be cast to
- // string for simplicity. Use empty string in case of `null` value. Omit entirely
- // if this is a notification.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '10', 'request-7', ''
- RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
- // `error.code` property of response if it is an error response.
- //
- // Type: int
- // Required: If missing, response is assumed to be successful.
- // Stability: stable
- // Examples: -32700, 100
- RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
- // `error.message` property of response if it is an error response.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'Parse error', 'User already exists'
- RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
-)
-
-// RPC received/sent message.
-const (
- // Whether this is a received or sent message.
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- MessageTypeKey = attribute.Key("message.type")
- // MUST be calculated as two different counters starting from `1` one for sent
- // messages and one for received message.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Note: This way we guarantee that the values will be consistent between
- // different implementations.
- MessageIDKey = attribute.Key("message.id")
- // Compressed size of the message in bytes.
- //
- // Type: int
- // Required: No
- // Stability: stable
- MessageCompressedSizeKey = attribute.Key("message.compressed_size")
- // Uncompressed size of the message in bytes.
- //
- // Type: int
- // Required: No
- // Stability: stable
- MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
-)
-
-var (
- // sent
- MessageTypeSent = MessageTypeKey.String("SENT")
- // received
- MessageTypeReceived = MessageTypeKey.String("RECEIVED")
-)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md
deleted file mode 100644
index 82e1f46b4..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Semconv v1.20.0
-
-[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.20.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.20.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go
deleted file mode 100644
index 6685c392b..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go
+++ /dev/null
@@ -1,1198 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// Describes HTTP attributes.
-const (
- // HTTPMethodKey is the attribute Key conforming to the "http.method"
- // semantic conventions. It represents the hTTP request method.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'GET', 'POST', 'HEAD'
- HTTPMethodKey = attribute.Key("http.method")
-
- // HTTPStatusCodeKey is the attribute Key conforming to the
- // "http.status_code" semantic conventions. It represents the [HTTP
- // response status code](https://tools.ietf.org/html/rfc7231#section-6).
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (If and only if one was
- // received/sent.)
- // Stability: stable
- // Examples: 200
- HTTPStatusCodeKey = attribute.Key("http.status_code")
-)
-
-// HTTPMethod returns an attribute KeyValue conforming to the "http.method"
-// semantic conventions. It represents the hTTP request method.
-func HTTPMethod(val string) attribute.KeyValue {
- return HTTPMethodKey.String(val)
-}
-
-// HTTPStatusCode returns an attribute KeyValue conforming to the
-// "http.status_code" semantic conventions. It represents the [HTTP response
-// status code](https://tools.ietf.org/html/rfc7231#section-6).
-func HTTPStatusCode(val int) attribute.KeyValue {
- return HTTPStatusCodeKey.Int(val)
-}
-
-// HTTP Server spans attributes
-const (
- // HTTPSchemeKey is the attribute Key conforming to the "http.scheme"
- // semantic conventions. It represents the URI scheme identifying the used
- // protocol.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'http', 'https'
- HTTPSchemeKey = attribute.Key("http.scheme")
-
- // HTTPRouteKey is the attribute Key conforming to the "http.route"
- // semantic conventions. It represents the matched route (path template in
- // the format used by the respective server framework). See note below
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (If and only if it's available)
- // Stability: stable
- // Examples: '/users/:userID?', '{controller}/{action}/{id?}'
- // Note: MUST NOT be populated when this is not supported by the HTTP
- // server framework as the route attribute should have low-cardinality and
- // the URI path can NOT substitute it.
- // SHOULD include the [application
- // root](/specification/trace/semantic_conventions/http.md#http-server-definitions)
- // if there is one.
- HTTPRouteKey = attribute.Key("http.route")
-)
-
-// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme"
-// semantic conventions. It represents the URI scheme identifying the used
-// protocol.
-func HTTPScheme(val string) attribute.KeyValue {
- return HTTPSchemeKey.String(val)
-}
-
-// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
-// semantic conventions. It represents the matched route (path template in the
-// format used by the respective server framework). See note below
-func HTTPRoute(val string) attribute.KeyValue {
- return HTTPRouteKey.String(val)
-}
-
-// Attributes for Events represented using Log Records.
-const (
- // EventNameKey is the attribute Key conforming to the "event.name"
- // semantic conventions. It represents the name identifies the event.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'click', 'exception'
- EventNameKey = attribute.Key("event.name")
-
- // EventDomainKey is the attribute Key conforming to the "event.domain"
- // semantic conventions. It represents the domain identifies the business
- // context for the events.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- // Note: Events across different domains may have same `event.name`, yet be
- // unrelated events.
- EventDomainKey = attribute.Key("event.domain")
-)
-
-var (
- // Events from browser apps
- EventDomainBrowser = EventDomainKey.String("browser")
- // Events from mobile apps
- EventDomainDevice = EventDomainKey.String("device")
- // Events from Kubernetes
- EventDomainK8S = EventDomainKey.String("k8s")
-)
-
-// EventName returns an attribute KeyValue conforming to the "event.name"
-// semantic conventions. It represents the name identifies the event.
-func EventName(val string) attribute.KeyValue {
- return EventNameKey.String(val)
-}
-
-// These attributes may be used for any network related operation.
-const (
- // NetTransportKey is the attribute Key conforming to the "net.transport"
- // semantic conventions. It represents the transport protocol used. See
- // note below.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- NetTransportKey = attribute.Key("net.transport")
-
- // NetProtocolNameKey is the attribute Key conforming to the
- // "net.protocol.name" semantic conventions. It represents the application
- // layer protocol used. The value SHOULD be normalized to lowercase.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'amqp', 'http', 'mqtt'
- NetProtocolNameKey = attribute.Key("net.protocol.name")
-
- // NetProtocolVersionKey is the attribute Key conforming to the
- // "net.protocol.version" semantic conventions. It represents the version
- // of the application layer protocol used. See note below.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '3.1.1'
- // Note: `net.protocol.version` refers to the version of the protocol used
- // and might be different from the protocol client's version. If the HTTP
- // client used has a version of `0.27.2`, but sends HTTP version `1.1`,
- // this attribute should be set to `1.1`.
- NetProtocolVersionKey = attribute.Key("net.protocol.version")
-
- // NetSockPeerNameKey is the attribute Key conforming to the
- // "net.sock.peer.name" semantic conventions. It represents the remote
- // socket peer name.
- //
- // Type: string
- // RequirementLevel: Recommended (If available and different from
- // `net.peer.name` and if `net.sock.peer.addr` is set.)
- // Stability: stable
- // Examples: 'proxy.example.com'
- NetSockPeerNameKey = attribute.Key("net.sock.peer.name")
-
- // NetSockPeerAddrKey is the attribute Key conforming to the
- // "net.sock.peer.addr" semantic conventions. It represents the remote
- // socket peer address: IPv4 or IPv6 for internet protocols, path for local
- // communication,
- // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '127.0.0.1', '/tmp/mysql.sock'
- NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr")
-
- // NetSockPeerPortKey is the attribute Key conforming to the
- // "net.sock.peer.port" semantic conventions. It represents the remote
- // socket peer port.
- //
- // Type: int
- // RequirementLevel: Recommended (If defined for the address family and if
- // different than `net.peer.port` and if `net.sock.peer.addr` is set.)
- // Stability: stable
- // Examples: 16456
- NetSockPeerPortKey = attribute.Key("net.sock.peer.port")
-
- // NetSockFamilyKey is the attribute Key conforming to the
- // "net.sock.family" semantic conventions. It represents the protocol
- // [address
- // family](https://man7.org/linux/man-pages/man7/address_families.7.html)
- // which is used for communication.
- //
- // Type: Enum
- // RequirementLevel: ConditionallyRequired (If different than `inet` and if
- // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers
- // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in
- // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support
- // instrumentations that follow previous versions of this document.)
- // Stability: stable
- // Examples: 'inet6', 'bluetooth'
- NetSockFamilyKey = attribute.Key("net.sock.family")
-
- // NetPeerNameKey is the attribute Key conforming to the "net.peer.name"
- // semantic conventions. It represents the logical remote hostname, see
- // note below.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'example.com'
- // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an
- // extra DNS lookup.
- NetPeerNameKey = attribute.Key("net.peer.name")
-
- // NetPeerPortKey is the attribute Key conforming to the "net.peer.port"
- // semantic conventions. It represents the logical remote port number
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 80, 8080, 443
- NetPeerPortKey = attribute.Key("net.peer.port")
-
- // NetHostNameKey is the attribute Key conforming to the "net.host.name"
- // semantic conventions. It represents the logical local hostname or
- // similar, see note below.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'localhost'
- NetHostNameKey = attribute.Key("net.host.name")
-
- // NetHostPortKey is the attribute Key conforming to the "net.host.port"
- // semantic conventions. It represents the logical local port number,
- // preferably the one that the peer used to connect
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 8080
- NetHostPortKey = attribute.Key("net.host.port")
-
- // NetSockHostAddrKey is the attribute Key conforming to the
- // "net.sock.host.addr" semantic conventions. It represents the local
- // socket address. Useful in case of a multi-IP host.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '192.168.0.1'
- NetSockHostAddrKey = attribute.Key("net.sock.host.addr")
-
- // NetSockHostPortKey is the attribute Key conforming to the
- // "net.sock.host.port" semantic conventions. It represents the local
- // socket port number.
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (If defined for the address
- // family and if different than `net.host.port` and if `net.sock.host.addr`
- // is set. In other cases, it is still recommended to set this.)
- // Stability: stable
- // Examples: 35555
- NetSockHostPortKey = attribute.Key("net.sock.host.port")
-)
-
-var (
- // ip_tcp
- NetTransportTCP = NetTransportKey.String("ip_tcp")
- // ip_udp
- NetTransportUDP = NetTransportKey.String("ip_udp")
- // Named or anonymous pipe. See note below
- NetTransportPipe = NetTransportKey.String("pipe")
- // In-process communication
- NetTransportInProc = NetTransportKey.String("inproc")
- // Something else (non IP-based)
- NetTransportOther = NetTransportKey.String("other")
-)
-
-var (
- // IPv4 address
- NetSockFamilyInet = NetSockFamilyKey.String("inet")
- // IPv6 address
- NetSockFamilyInet6 = NetSockFamilyKey.String("inet6")
- // Unix domain socket path
- NetSockFamilyUnix = NetSockFamilyKey.String("unix")
-)
-
-// NetProtocolName returns an attribute KeyValue conforming to the
-// "net.protocol.name" semantic conventions. It represents the application
-// layer protocol used. The value SHOULD be normalized to lowercase.
-func NetProtocolName(val string) attribute.KeyValue {
- return NetProtocolNameKey.String(val)
-}
-
-// NetProtocolVersion returns an attribute KeyValue conforming to the
-// "net.protocol.version" semantic conventions. It represents the version of
-// the application layer protocol used. See note below.
-func NetProtocolVersion(val string) attribute.KeyValue {
- return NetProtocolVersionKey.String(val)
-}
-
-// NetSockPeerName returns an attribute KeyValue conforming to the
-// "net.sock.peer.name" semantic conventions. It represents the remote socket
-// peer name.
-func NetSockPeerName(val string) attribute.KeyValue {
- return NetSockPeerNameKey.String(val)
-}
-
-// NetSockPeerAddr returns an attribute KeyValue conforming to the
-// "net.sock.peer.addr" semantic conventions. It represents the remote socket
-// peer address: IPv4 or IPv6 for internet protocols, path for local
-// communication,
-// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html).
-func NetSockPeerAddr(val string) attribute.KeyValue {
- return NetSockPeerAddrKey.String(val)
-}
-
-// NetSockPeerPort returns an attribute KeyValue conforming to the
-// "net.sock.peer.port" semantic conventions. It represents the remote socket
-// peer port.
-func NetSockPeerPort(val int) attribute.KeyValue {
- return NetSockPeerPortKey.Int(val)
-}
-
-// NetPeerName returns an attribute KeyValue conforming to the
-// "net.peer.name" semantic conventions. It represents the logical remote
-// hostname, see note below.
-func NetPeerName(val string) attribute.KeyValue {
- return NetPeerNameKey.String(val)
-}
-
-// NetPeerPort returns an attribute KeyValue conforming to the
-// "net.peer.port" semantic conventions. It represents the logical remote port
-// number
-func NetPeerPort(val int) attribute.KeyValue {
- return NetPeerPortKey.Int(val)
-}
-
-// NetHostName returns an attribute KeyValue conforming to the
-// "net.host.name" semantic conventions. It represents the logical local
-// hostname or similar, see note below.
-func NetHostName(val string) attribute.KeyValue {
- return NetHostNameKey.String(val)
-}
-
-// NetHostPort returns an attribute KeyValue conforming to the
-// "net.host.port" semantic conventions. It represents the logical local port
-// number, preferably the one that the peer used to connect
-func NetHostPort(val int) attribute.KeyValue {
- return NetHostPortKey.Int(val)
-}
-
-// NetSockHostAddr returns an attribute KeyValue conforming to the
-// "net.sock.host.addr" semantic conventions. It represents the local socket
-// address. Useful in case of a multi-IP host.
-func NetSockHostAddr(val string) attribute.KeyValue {
- return NetSockHostAddrKey.String(val)
-}
-
-// NetSockHostPort returns an attribute KeyValue conforming to the
-// "net.sock.host.port" semantic conventions. It represents the local socket
-// port number.
-func NetSockHostPort(val int) attribute.KeyValue {
- return NetSockHostPortKey.Int(val)
-}
-
-// These attributes may be used for any network related operation.
-const (
- // NetHostConnectionTypeKey is the attribute Key conforming to the
- // "net.host.connection.type" semantic conventions. It represents the
- // internet connection type currently being used by the host.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'wifi'
- NetHostConnectionTypeKey = attribute.Key("net.host.connection.type")
-
- // NetHostConnectionSubtypeKey is the attribute Key conforming to the
- // "net.host.connection.subtype" semantic conventions. It represents the
- // this describes more details regarding the connection.type. It may be the
- // type of cell technology connection, but it could be used for describing
- // details about a wifi connection.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'LTE'
- NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype")
-
- // NetHostCarrierNameKey is the attribute Key conforming to the
- // "net.host.carrier.name" semantic conventions. It represents the name of
- // the mobile carrier.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'sprint'
- NetHostCarrierNameKey = attribute.Key("net.host.carrier.name")
-
- // NetHostCarrierMccKey is the attribute Key conforming to the
- // "net.host.carrier.mcc" semantic conventions. It represents the mobile
- // carrier country code.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '310'
- NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc")
-
- // NetHostCarrierMncKey is the attribute Key conforming to the
- // "net.host.carrier.mnc" semantic conventions. It represents the mobile
- // carrier network code.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '001'
- NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc")
-
- // NetHostCarrierIccKey is the attribute Key conforming to the
- // "net.host.carrier.icc" semantic conventions. It represents the ISO
- // 3166-1 alpha-2 2-character country code associated with the mobile
- // carrier network.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'DE'
- NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc")
-)
-
-var (
- // wifi
- NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi")
- // wired
- NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired")
- // cell
- NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell")
- // unavailable
- NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable")
- // unknown
- NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown")
-)
-
-var (
- // GPRS
- NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs")
- // EDGE
- NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge")
- // UMTS
- NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts")
- // CDMA
- NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma")
- // EVDO Rel. 0
- NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0")
- // EVDO Rev. A
- NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a")
- // CDMA2000 1XRTT
- NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt")
- // HSDPA
- NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa")
- // HSUPA
- NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa")
- // HSPA
- NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa")
- // IDEN
- NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden")
- // EVDO Rev. B
- NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b")
- // LTE
- NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte")
- // EHRPD
- NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd")
- // HSPAP
- NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap")
- // GSM
- NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm")
- // TD-SCDMA
- NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma")
- // IWLAN
- NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan")
- // 5G NR (New Radio)
- NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr")
- // 5G NRNSA (New Radio Non-Standalone)
- NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa")
- // LTE CA
- NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca")
-)
-
-// NetHostCarrierName returns an attribute KeyValue conforming to the
-// "net.host.carrier.name" semantic conventions. It represents the name of the
-// mobile carrier.
-func NetHostCarrierName(val string) attribute.KeyValue {
- return NetHostCarrierNameKey.String(val)
-}
-
-// NetHostCarrierMcc returns an attribute KeyValue conforming to the
-// "net.host.carrier.mcc" semantic conventions. It represents the mobile
-// carrier country code.
-func NetHostCarrierMcc(val string) attribute.KeyValue {
- return NetHostCarrierMccKey.String(val)
-}
-
-// NetHostCarrierMnc returns an attribute KeyValue conforming to the
-// "net.host.carrier.mnc" semantic conventions. It represents the mobile
-// carrier network code.
-func NetHostCarrierMnc(val string) attribute.KeyValue {
- return NetHostCarrierMncKey.String(val)
-}
-
-// NetHostCarrierIcc returns an attribute KeyValue conforming to the
-// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1
-// alpha-2 2-character country code associated with the mobile carrier network.
-func NetHostCarrierIcc(val string) attribute.KeyValue {
- return NetHostCarrierIccKey.String(val)
-}
-
-// Semantic conventions for HTTP client and server Spans.
-const (
- // HTTPRequestContentLengthKey is the attribute Key conforming to the
- // "http.request_content_length" semantic conventions. It represents the
- // size of the request payload body in bytes. This is the number of bytes
- // transferred excluding headers and is often, but not always, present as
- // the
- // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
- // header. For requests using transport encoding, this should be the
- // compressed size.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 3495
- HTTPRequestContentLengthKey = attribute.Key("http.request_content_length")
-
- // HTTPResponseContentLengthKey is the attribute Key conforming to the
- // "http.response_content_length" semantic conventions. It represents the
- // size of the response payload body in bytes. This is the number of bytes
- // transferred excluding headers and is often, but not always, present as
- // the
- // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
- // header. For requests using transport encoding, this should be the
- // compressed size.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 3495
- HTTPResponseContentLengthKey = attribute.Key("http.response_content_length")
-)
-
-// HTTPRequestContentLength returns an attribute KeyValue conforming to the
-// "http.request_content_length" semantic conventions. It represents the size
-// of the request payload body in bytes. This is the number of bytes
-// transferred excluding headers and is often, but not always, present as the
-// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
-// header. For requests using transport encoding, this should be the compressed
-// size.
-func HTTPRequestContentLength(val int) attribute.KeyValue {
- return HTTPRequestContentLengthKey.Int(val)
-}
-
-// HTTPResponseContentLength returns an attribute KeyValue conforming to the
-// "http.response_content_length" semantic conventions. It represents the size
-// of the response payload body in bytes. This is the number of bytes
-// transferred excluding headers and is often, but not always, present as the
-// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
-// header. For requests using transport encoding, this should be the compressed
-// size.
-func HTTPResponseContentLength(val int) attribute.KeyValue {
- return HTTPResponseContentLengthKey.Int(val)
-}
-
-// Semantic convention describing per-message attributes populated on messaging
-// spans or links.
-const (
- // MessagingMessageIDKey is the attribute Key conforming to the
- // "messaging.message.id" semantic conventions. It represents a value used
- // by the messaging system as an identifier for the message, represented as
- // a string.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '452a7c7c7c7048c2f887f61572b18fc2'
- MessagingMessageIDKey = attribute.Key("messaging.message.id")
-
- // MessagingMessageConversationIDKey is the attribute Key conforming to the
- // "messaging.message.conversation_id" semantic conventions. It represents
- // the [conversation ID](#conversations) identifying the conversation to
- // which the message belongs, represented as a string. Sometimes called
- // "Correlation ID".
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'MyConversationID'
- MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
-
- // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to
- // the "messaging.message.payload_size_bytes" semantic conventions. It
- // represents the (uncompressed) size of the message payload in bytes. Also
- // use this attribute if it is unknown whether the compressed or
- // uncompressed payload size is reported.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 2738
- MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes")
-
- // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key
- // conforming to the "messaging.message.payload_compressed_size_bytes"
- // semantic conventions. It represents the compressed size of the message
- // payload in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 2048
- MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes")
-)
-
-// MessagingMessageID returns an attribute KeyValue conforming to the
-// "messaging.message.id" semantic conventions. It represents a value used by
-// the messaging system as an identifier for the message, represented as a
-// string.
-func MessagingMessageID(val string) attribute.KeyValue {
- return MessagingMessageIDKey.String(val)
-}
-
-// MessagingMessageConversationID returns an attribute KeyValue conforming
-// to the "messaging.message.conversation_id" semantic conventions. It
-// represents the [conversation ID](#conversations) identifying the
-// conversation to which the message belongs, represented as a string.
-// Sometimes called "Correlation ID".
-func MessagingMessageConversationID(val string) attribute.KeyValue {
- return MessagingMessageConversationIDKey.String(val)
-}
-
-// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming
-// to the "messaging.message.payload_size_bytes" semantic conventions. It
-// represents the (uncompressed) size of the message payload in bytes. Also use
-// this attribute if it is unknown whether the compressed or uncompressed
-// payload size is reported.
-func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue {
- return MessagingMessagePayloadSizeBytesKey.Int(val)
-}
-
-// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue
-// conforming to the "messaging.message.payload_compressed_size_bytes" semantic
-// conventions. It represents the compressed size of the message payload in
-// bytes.
-func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue {
- return MessagingMessagePayloadCompressedSizeBytesKey.Int(val)
-}
-
-// Semantic convention for attributes that describe messaging destination on
-// broker
-const (
- // MessagingDestinationNameKey is the attribute Key conforming to the
- // "messaging.destination.name" semantic conventions. It represents the
- // message destination name
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'MyQueue', 'MyTopic'
- // Note: Destination name SHOULD uniquely identify a specific queue, topic
- // or other entity within the broker. If
- // the broker does not have such notion, the destination name SHOULD
- // uniquely identify the broker.
- MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
-
- // MessagingDestinationTemplateKey is the attribute Key conforming to the
- // "messaging.destination.template" semantic conventions. It represents the
- // low cardinality representation of the messaging destination name
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '/customers/{customerID}'
- // Note: Destination names could be constructed from templates. An example
- // would be a destination name involving a user name or product id.
- // Although the destination name in this case is of high cardinality, the
- // underlying template is of low cardinality and can be effectively used
- // for grouping and aggregation.
- MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
-
- // MessagingDestinationTemporaryKey is the attribute Key conforming to the
- // "messaging.destination.temporary" semantic conventions. It represents a
- // boolean that is true if the message destination is temporary and might
- // not exist anymore after messages are processed.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
-
- // MessagingDestinationAnonymousKey is the attribute Key conforming to the
- // "messaging.destination.anonymous" semantic conventions. It represents a
- // boolean that is true if the message destination is anonymous (could be
- // unnamed or have auto-generated name).
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
-)
-
-// MessagingDestinationName returns an attribute KeyValue conforming to the
-// "messaging.destination.name" semantic conventions. It represents the message
-// destination name
-func MessagingDestinationName(val string) attribute.KeyValue {
- return MessagingDestinationNameKey.String(val)
-}
-
-// MessagingDestinationTemplate returns an attribute KeyValue conforming to
-// the "messaging.destination.template" semantic conventions. It represents the
-// low cardinality representation of the messaging destination name
-func MessagingDestinationTemplate(val string) attribute.KeyValue {
- return MessagingDestinationTemplateKey.String(val)
-}
-
-// MessagingDestinationTemporary returns an attribute KeyValue conforming to
-// the "messaging.destination.temporary" semantic conventions. It represents a
-// boolean that is true if the message destination is temporary and might not
-// exist anymore after messages are processed.
-func MessagingDestinationTemporary(val bool) attribute.KeyValue {
- return MessagingDestinationTemporaryKey.Bool(val)
-}
-
-// MessagingDestinationAnonymous returns an attribute KeyValue conforming to
-// the "messaging.destination.anonymous" semantic conventions. It represents a
-// boolean that is true if the message destination is anonymous (could be
-// unnamed or have auto-generated name).
-func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
- return MessagingDestinationAnonymousKey.Bool(val)
-}
-
-// Semantic convention for attributes that describe messaging source on broker
-const (
- // MessagingSourceNameKey is the attribute Key conforming to the
- // "messaging.source.name" semantic conventions. It represents the message
- // source name
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'MyQueue', 'MyTopic'
- // Note: Source name SHOULD uniquely identify a specific queue, topic, or
- // other entity within the broker. If
- // the broker does not have such notion, the source name SHOULD uniquely
- // identify the broker.
- MessagingSourceNameKey = attribute.Key("messaging.source.name")
-
- // MessagingSourceTemplateKey is the attribute Key conforming to the
- // "messaging.source.template" semantic conventions. It represents the low
- // cardinality representation of the messaging source name
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '/customers/{customerID}'
- // Note: Source names could be constructed from templates. An example would
- // be a source name involving a user name or product id. Although the
- // source name in this case is of high cardinality, the underlying template
- // is of low cardinality and can be effectively used for grouping and
- // aggregation.
- MessagingSourceTemplateKey = attribute.Key("messaging.source.template")
-
- // MessagingSourceTemporaryKey is the attribute Key conforming to the
- // "messaging.source.temporary" semantic conventions. It represents a
- // boolean that is true if the message source is temporary and might not
- // exist anymore after messages are processed.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary")
-
- // MessagingSourceAnonymousKey is the attribute Key conforming to the
- // "messaging.source.anonymous" semantic conventions. It represents a
- // boolean that is true if the message source is anonymous (could be
- // unnamed or have auto-generated name).
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous")
-)
-
-// MessagingSourceName returns an attribute KeyValue conforming to the
-// "messaging.source.name" semantic conventions. It represents the message
-// source name
-func MessagingSourceName(val string) attribute.KeyValue {
- return MessagingSourceNameKey.String(val)
-}
-
-// MessagingSourceTemplate returns an attribute KeyValue conforming to the
-// "messaging.source.template" semantic conventions. It represents the low
-// cardinality representation of the messaging source name
-func MessagingSourceTemplate(val string) attribute.KeyValue {
- return MessagingSourceTemplateKey.String(val)
-}
-
-// MessagingSourceTemporary returns an attribute KeyValue conforming to the
-// "messaging.source.temporary" semantic conventions. It represents a boolean
-// that is true if the message source is temporary and might not exist anymore
-// after messages are processed.
-func MessagingSourceTemporary(val bool) attribute.KeyValue {
- return MessagingSourceTemporaryKey.Bool(val)
-}
-
-// MessagingSourceAnonymous returns an attribute KeyValue conforming to the
-// "messaging.source.anonymous" semantic conventions. It represents a boolean
-// that is true if the message source is anonymous (could be unnamed or have
-// auto-generated name).
-func MessagingSourceAnonymous(val bool) attribute.KeyValue {
- return MessagingSourceAnonymousKey.Bool(val)
-}
-
-// Attributes for RabbitMQ
-const (
- // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key
- // conforming to the "messaging.rabbitmq.destination.routing_key" semantic
- // conventions. It represents the rabbitMQ message routing key.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (If not empty.)
- // Stability: stable
- // Examples: 'myKey'
- MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
-)
-
-// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue
-// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
-// conventions. It represents the rabbitMQ message routing key.
-func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue {
- return MessagingRabbitmqDestinationRoutingKeyKey.String(val)
-}
-
-// Attributes for Apache Kafka
-const (
- // MessagingKafkaMessageKeyKey is the attribute Key conforming to the
- // "messaging.kafka.message.key" semantic conventions. It represents the
- // message keys in Kafka are used for grouping alike messages to ensure
- // they're processed on the same partition. They differ from
- // `messaging.message.id` in that they're not unique. If the key is `null`,
- // the attribute MUST NOT be set.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'myKey'
- // Note: If the key type is not string, it's string representation has to
- // be supplied for the attribute. If the key has no unambiguous, canonical
- // string form, don't include its value.
- MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
-
- // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the
- // "messaging.kafka.consumer.group" semantic conventions. It represents the
- // name of the Kafka Consumer Group that is handling the message. Only
- // applies to consumers, not producers.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'my-group'
- MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group")
-
- // MessagingKafkaClientIDKey is the attribute Key conforming to the
- // "messaging.kafka.client_id" semantic conventions. It represents the
- // client ID for the Consumer or Producer that is handling the message.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'client-5'
- MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id")
-
- // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to
- // the "messaging.kafka.destination.partition" semantic conventions. It
- // represents the partition the message is sent to.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 2
- MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition")
-
- // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the
- // "messaging.kafka.source.partition" semantic conventions. It represents
- // the partition the message is received from.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 2
- MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition")
-
- // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the
- // "messaging.kafka.message.offset" semantic conventions. It represents the
- // offset of a record in the corresponding Kafka partition.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 42
- MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset")
-
- // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
- // "messaging.kafka.message.tombstone" semantic conventions. It represents
- // a boolean that is true if the message is a tombstone.
- //
- // Type: boolean
- // RequirementLevel: ConditionallyRequired (If value is `true`. When
- // missing, the value is assumed to be `false`.)
- // Stability: stable
- MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
-)
-
-// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
-// "messaging.kafka.message.key" semantic conventions. It represents the
-// message keys in Kafka are used for grouping alike messages to ensure they're
-// processed on the same partition. They differ from `messaging.message.id` in
-// that they're not unique. If the key is `null`, the attribute MUST NOT be
-// set.
-func MessagingKafkaMessageKey(val string) attribute.KeyValue {
- return MessagingKafkaMessageKeyKey.String(val)
-}
-
-// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to
-// the "messaging.kafka.consumer.group" semantic conventions. It represents the
-// name of the Kafka Consumer Group that is handling the message. Only applies
-// to consumers, not producers.
-func MessagingKafkaConsumerGroup(val string) attribute.KeyValue {
- return MessagingKafkaConsumerGroupKey.String(val)
-}
-
-// MessagingKafkaClientID returns an attribute KeyValue conforming to the
-// "messaging.kafka.client_id" semantic conventions. It represents the client
-// ID for the Consumer or Producer that is handling the message.
-func MessagingKafkaClientID(val string) attribute.KeyValue {
- return MessagingKafkaClientIDKey.String(val)
-}
-
-// MessagingKafkaDestinationPartition returns an attribute KeyValue
-// conforming to the "messaging.kafka.destination.partition" semantic
-// conventions. It represents the partition the message is sent to.
-func MessagingKafkaDestinationPartition(val int) attribute.KeyValue {
- return MessagingKafkaDestinationPartitionKey.Int(val)
-}
-
-// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to
-// the "messaging.kafka.source.partition" semantic conventions. It represents
-// the partition the message is received from.
-func MessagingKafkaSourcePartition(val int) attribute.KeyValue {
- return MessagingKafkaSourcePartitionKey.Int(val)
-}
-
-// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to
-// the "messaging.kafka.message.offset" semantic conventions. It represents the
-// offset of a record in the corresponding Kafka partition.
-func MessagingKafkaMessageOffset(val int) attribute.KeyValue {
- return MessagingKafkaMessageOffsetKey.Int(val)
-}
-
-// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming
-// to the "messaging.kafka.message.tombstone" semantic conventions. It
-// represents a boolean that is true if the message is a tombstone.
-func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
- return MessagingKafkaMessageTombstoneKey.Bool(val)
-}
-
-// Attributes for Apache RocketMQ
-const (
- // MessagingRocketmqNamespaceKey is the attribute Key conforming to the
- // "messaging.rocketmq.namespace" semantic conventions. It represents the
- // namespace of RocketMQ resources, resources in different namespaces are
- // individual.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'myNamespace'
- MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
-
- // MessagingRocketmqClientGroupKey is the attribute Key conforming to the
- // "messaging.rocketmq.client_group" semantic conventions. It represents
- // the name of the RocketMQ producer/consumer group that is handling the
- // message. The client type is identified by the SpanKind.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'myConsumerGroup'
- MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
-
- // MessagingRocketmqClientIDKey is the attribute Key conforming to the
- // "messaging.rocketmq.client_id" semantic conventions. It represents the
- // unique identifier for each client.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'myhost@8742@s8083jm'
- MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id")
-
- // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key
- // conforming to the "messaging.rocketmq.message.delivery_timestamp"
- // semantic conventions. It represents the timestamp in milliseconds that
- // the delay message is expected to be delivered to consumer.
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (If the message type is delay
- // and delay time level is not specified.)
- // Stability: stable
- // Examples: 1665987217045
- MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
-
- // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key
- // conforming to the "messaging.rocketmq.message.delay_time_level" semantic
- // conventions. It represents the delay time level for delay message, which
- // determines the message delay time.
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (If the message type is delay
- // and delivery timestamp is not specified.)
- // Stability: stable
- // Examples: 3
- MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
-
- // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.group" semantic conventions. It represents
- // the it is essential for FIFO message. Messages that belong to the same
- // message group are always processed one by one within the same consumer
- // group.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (If the message type is FIFO.)
- // Stability: stable
- // Examples: 'myMessageGroup'
- MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
-
- // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.type" semantic conventions. It represents
- // the type of message.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
-
- // MessagingRocketmqMessageTagKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.tag" semantic conventions. It represents the
- // secondary classifier of message besides topic.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'tagA'
- MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
-
- // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.keys" semantic conventions. It represents
- // the key(s) of message, another way to mark message besides message id.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'keyA', 'keyB'
- MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
-
- // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to
- // the "messaging.rocketmq.consumption_model" semantic conventions. It
- // represents the model of message consumption. This only applies to
- // consumer spans.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
-)
-
-var (
- // Normal message
- MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
- // FIFO message
- MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
- // Delay message
- MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
- // Transaction message
- MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
-)
-
-var (
- // Clustering consumption model
- MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
- // Broadcasting consumption model
- MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
-)
-
-// MessagingRocketmqNamespace returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.namespace" semantic conventions. It represents the
-// namespace of RocketMQ resources, resources in different namespaces are
-// individual.
-func MessagingRocketmqNamespace(val string) attribute.KeyValue {
- return MessagingRocketmqNamespaceKey.String(val)
-}
-
-// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.client_group" semantic conventions. It represents
-// the name of the RocketMQ producer/consumer group that is handling the
-// message. The client type is identified by the SpanKind.
-func MessagingRocketmqClientGroup(val string) attribute.KeyValue {
- return MessagingRocketmqClientGroupKey.String(val)
-}
-
-// MessagingRocketmqClientID returns an attribute KeyValue conforming to the
-// "messaging.rocketmq.client_id" semantic conventions. It represents the
-// unique identifier for each client.
-func MessagingRocketmqClientID(val string) attribute.KeyValue {
- return MessagingRocketmqClientIDKey.String(val)
-}
-
-// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue
-// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
-// conventions. It represents the timestamp in milliseconds that the delay
-// message is expected to be delivered to consumer.
-func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue {
- return MessagingRocketmqMessageDeliveryTimestampKey.Int(val)
-}
-
-// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue
-// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
-// conventions. It represents the delay time level for delay message, which
-// determines the message delay time.
-func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue {
- return MessagingRocketmqMessageDelayTimeLevelKey.Int(val)
-}
-
-// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.message.group" semantic conventions. It represents
-// the it is essential for FIFO message. Messages that belong to the same
-// message group are always processed one by one within the same consumer
-// group.
-func MessagingRocketmqMessageGroup(val string) attribute.KeyValue {
- return MessagingRocketmqMessageGroupKey.String(val)
-}
-
-// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.message.tag" semantic conventions. It represents the
-// secondary classifier of message besides topic.
-func MessagingRocketmqMessageTag(val string) attribute.KeyValue {
- return MessagingRocketmqMessageTagKey.String(val)
-}
-
-// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.message.keys" semantic conventions. It represents
-// the key(s) of message, another way to mark message besides message id.
-func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue {
- return MessagingRocketmqMessageKeysKey.StringSlice(val)
-}
-
-// Describes user-agent attributes.
-const (
- // UserAgentOriginalKey is the attribute Key conforming to the
- // "user_agent.original" semantic conventions. It represents the value of
- // the [HTTP
- // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
- // header sent by the client.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'CERN-LineMode/2.15 libwww/2.17b3'
- UserAgentOriginalKey = attribute.Key("user_agent.original")
-)
-
-// UserAgentOriginal returns an attribute KeyValue conforming to the
-// "user_agent.original" semantic conventions. It represents the value of the
-// [HTTP
-// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
-// header sent by the client.
-func UserAgentOriginal(val string) attribute.KeyValue {
- return UserAgentOriginalKey.String(val)
-}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go
deleted file mode 100644
index 0d1f55a8f..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Package semconv implements OpenTelemetry semantic conventions.
-//
-// OpenTelemetry semantic conventions are agreed standardized naming
-// patterns for OpenTelemetry things. This package represents the conventions
-// as of the v1.20.0 version of the OpenTelemetry specification.
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go
deleted file mode 100644
index 637763932..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go
+++ /dev/null
@@ -1,188 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// This semantic convention defines the attributes used to represent a feature
-// flag evaluation as an event.
-const (
- // FeatureFlagKeyKey is the attribute Key conforming to the
- // "feature_flag.key" semantic conventions. It represents the unique
- // identifier of the feature flag.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'logo-color'
- FeatureFlagKeyKey = attribute.Key("feature_flag.key")
-
- // FeatureFlagProviderNameKey is the attribute Key conforming to the
- // "feature_flag.provider_name" semantic conventions. It represents the
- // name of the service provider that performs the flag evaluation.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: stable
- // Examples: 'Flag Manager'
- FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
-
- // FeatureFlagVariantKey is the attribute Key conforming to the
- // "feature_flag.variant" semantic conventions. It represents the sHOULD be
- // a semantic identifier for a value. If one is unavailable, a stringified
- // version of the value can be used.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: stable
- // Examples: 'red', 'true', 'on'
- // Note: A semantic identifier, commonly referred to as a variant, provides
- // a means
- // for referring to a value without including the value itself. This can
- // provide additional context for understanding the meaning behind a value.
- // For example, the variant `red` maybe be used for the value `#c05543`.
- //
- // A stringified version of the value can be used in situations where a
- // semantic identifier is unavailable. String representation of the value
- // should be determined by the implementer.
- FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
-)
-
-// FeatureFlagKey returns an attribute KeyValue conforming to the
-// "feature_flag.key" semantic conventions. It represents the unique identifier
-// of the feature flag.
-func FeatureFlagKey(val string) attribute.KeyValue {
- return FeatureFlagKeyKey.String(val)
-}
-
-// FeatureFlagProviderName returns an attribute KeyValue conforming to the
-// "feature_flag.provider_name" semantic conventions. It represents the name of
-// the service provider that performs the flag evaluation.
-func FeatureFlagProviderName(val string) attribute.KeyValue {
- return FeatureFlagProviderNameKey.String(val)
-}
-
-// FeatureFlagVariant returns an attribute KeyValue conforming to the
-// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
-// semantic identifier for a value. If one is unavailable, a stringified
-// version of the value can be used.
-func FeatureFlagVariant(val string) attribute.KeyValue {
- return FeatureFlagVariantKey.String(val)
-}
-
-// RPC received/sent message.
-const (
- // MessageTypeKey is the attribute Key conforming to the "message.type"
- // semantic conventions. It represents the whether this is a received or
- // sent message.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- MessageTypeKey = attribute.Key("message.type")
-
- // MessageIDKey is the attribute Key conforming to the "message.id"
- // semantic conventions. It represents the mUST be calculated as two
- // different counters starting from `1` one for sent messages and one for
- // received message.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Note: This way we guarantee that the values will be consistent between
- // different implementations.
- MessageIDKey = attribute.Key("message.id")
-
- // MessageCompressedSizeKey is the attribute Key conforming to the
- // "message.compressed_size" semantic conventions. It represents the
- // compressed size of the message in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- MessageCompressedSizeKey = attribute.Key("message.compressed_size")
-
- // MessageUncompressedSizeKey is the attribute Key conforming to the
- // "message.uncompressed_size" semantic conventions. It represents the
- // uncompressed size of the message in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
-)
-
-var (
- // sent
- MessageTypeSent = MessageTypeKey.String("SENT")
- // received
- MessageTypeReceived = MessageTypeKey.String("RECEIVED")
-)
-
-// MessageID returns an attribute KeyValue conforming to the "message.id"
-// semantic conventions. It represents the mUST be calculated as two different
-// counters starting from `1` one for sent messages and one for received
-// message.
-func MessageID(val int) attribute.KeyValue {
- return MessageIDKey.Int(val)
-}
-
-// MessageCompressedSize returns an attribute KeyValue conforming to the
-// "message.compressed_size" semantic conventions. It represents the compressed
-// size of the message in bytes.
-func MessageCompressedSize(val int) attribute.KeyValue {
- return MessageCompressedSizeKey.Int(val)
-}
-
-// MessageUncompressedSize returns an attribute KeyValue conforming to the
-// "message.uncompressed_size" semantic conventions. It represents the
-// uncompressed size of the message in bytes.
-func MessageUncompressedSize(val int) attribute.KeyValue {
- return MessageUncompressedSizeKey.Int(val)
-}
-
-// The attributes used to report a single exception associated with a span.
-const (
- // ExceptionEscapedKey is the attribute Key conforming to the
- // "exception.escaped" semantic conventions. It represents the sHOULD be
- // set to true if the exception event is recorded at a point where it is
- // known that the exception is escaping the scope of the span.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- // Note: An exception is considered to have escaped (or left) the scope of
- // a span,
- // if that span is ended while the exception is still logically "in
- // flight".
- // This may be actually "in flight" in some languages (e.g. if the
- // exception
- // is passed to a Context manager's `__exit__` method in Python) but will
- // usually be caught at the point of recording the exception in most
- // languages.
- //
- // It is usually not possible to determine at the point where an exception
- // is thrown
- // whether it will escape the scope of a span.
- // However, it is trivial to know that an exception
- // will escape, if one checks for an active exception just before ending
- // the span,
- // as done in the [example above](#recording-an-exception).
- //
- // It follows that an exception may still escape the scope of the span
- // even if the `exception.escaped` attribute was not set or set to false,
- // since the event might have been recorded at a time where it was not
- // clear whether the exception will escape.
- ExceptionEscapedKey = attribute.Key("exception.escaped")
-)
-
-// ExceptionEscaped returns an attribute KeyValue conforming to the
-// "exception.escaped" semantic conventions. It represents the sHOULD be set to
-// true if the exception event is recorded at a point where it is known that
-// the exception is escaping the scope of the span.
-func ExceptionEscaped(val bool) attribute.KeyValue {
- return ExceptionEscapedKey.Bool(val)
-}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go
deleted file mode 100644
index f40c97825..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
-
-const (
- // ExceptionEventName is the name of the Span event representing an exception.
- ExceptionEventName = "exception"
-)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go
deleted file mode 100644
index 9c1840631..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
-
-// HTTP scheme attributes.
-var (
- HTTPSchemeHTTP = HTTPSchemeKey.String("http")
- HTTPSchemeHTTPS = HTTPSchemeKey.String("https")
-)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/httpconv/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/httpconv/README.md
deleted file mode 100644
index 96b4b0d0b..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/httpconv/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Semconv v1.20.0 HTTP conv
-
-[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.20.0/httpconv)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.20.0/httpconv)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/httpconv/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/httpconv/http.go
deleted file mode 100644
index 8f261a9db..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/httpconv/http.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Package httpconv provides OpenTelemetry HTTP semantic conventions for
-// tracing telemetry.
-package httpconv // import "go.opentelemetry.io/otel/semconv/v1.20.0/httpconv"
-
-import (
- "net/http"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/codes"
- "go.opentelemetry.io/otel/semconv/internal/v4"
- semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
-)
-
-var (
- nc = &internal.NetConv{
- NetHostNameKey: semconv.NetHostNameKey,
- NetHostPortKey: semconv.NetHostPortKey,
- NetPeerNameKey: semconv.NetPeerNameKey,
- NetPeerPortKey: semconv.NetPeerPortKey,
- NetSockPeerAddrKey: semconv.NetSockPeerAddrKey,
- NetSockPeerPortKey: semconv.NetSockPeerPortKey,
- NetTransportOther: semconv.NetTransportOther,
- NetTransportTCP: semconv.NetTransportTCP,
- NetTransportUDP: semconv.NetTransportUDP,
- NetTransportInProc: semconv.NetTransportInProc,
- }
-
- hc = &internal.HTTPConv{
- NetConv: nc,
-
- EnduserIDKey: semconv.EnduserIDKey,
- HTTPClientIPKey: semconv.HTTPClientIPKey,
- NetProtocolNameKey: semconv.NetProtocolNameKey,
- NetProtocolVersionKey: semconv.NetProtocolVersionKey,
- HTTPMethodKey: semconv.HTTPMethodKey,
- HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey,
- HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey,
- HTTPRouteKey: semconv.HTTPRouteKey,
- HTTPSchemeHTTP: semconv.HTTPSchemeHTTP,
- HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS,
- HTTPStatusCodeKey: semconv.HTTPStatusCodeKey,
- HTTPTargetKey: semconv.HTTPTargetKey,
- HTTPURLKey: semconv.HTTPURLKey,
- UserAgentOriginalKey: semconv.UserAgentOriginalKey,
- }
-)
-
-// ClientResponse returns trace attributes for an HTTP response received by a
-// client from a server. It will return the following attributes if the related
-// values are defined in resp: "http.status.code",
-// "http.response_content_length".
-//
-// This does not add all OpenTelemetry required attributes for an HTTP event,
-// it assumes ClientRequest was used to create the span with a complete set of
-// attributes. If a complete set of attributes can be generated using the
-// request contained in resp. For example:
-//
-// append(ClientResponse(resp), ClientRequest(resp.Request)...)
-func ClientResponse(resp *http.Response) []attribute.KeyValue {
- return hc.ClientResponse(resp)
-}
-
-// ClientRequest returns trace attributes for an HTTP request made by a client.
-// The following attributes are always returned: "http.url",
-// "net.protocol.(name|version)", "http.method", "net.peer.name".
-// The following attributes are returned if the related values are defined
-// in req: "net.peer.port", "http.user_agent", "http.request_content_length",
-// "enduser.id".
-func ClientRequest(req *http.Request) []attribute.KeyValue {
- return hc.ClientRequest(req)
-}
-
-// ClientStatus returns a span status code and message for an HTTP status code
-// value received by a client.
-func ClientStatus(code int) (codes.Code, string) {
- return hc.ClientStatus(code)
-}
-
-// ServerRequest returns trace attributes for an HTTP request received by a
-// server.
-//
-// The server must be the primary server name if it is known. For example this
-// would be the ServerName directive
-// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
-// server, and the server_name directive
-// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
-// nginx server. More generically, the primary server name would be the host
-// header value that matches the default virtual host of an HTTP server. It
-// should include the host identifier and if a port is used to route to the
-// server that port identifier should be included as an appropriate port
-// suffix.
-//
-// If the primary server name is not known, server should be an empty string.
-// The req Host will be used to determine the server instead.
-//
-// The following attributes are always returned: "http.method", "http.scheme",
-// ""net.protocol.(name|version)", "http.target", "net.host.name".
-// The following attributes are returned if they related values are defined
-// in req: "net.host.port", "net.sock.peer.addr", "net.sock.peer.port",
-// "user_agent.original", "enduser.id", "http.client_ip".
-func ServerRequest(server string, req *http.Request) []attribute.KeyValue {
- return hc.ServerRequest(server, req)
-}
-
-// ServerStatus returns a span status code and message for an HTTP status code
-// value returned by a server. Status codes in the 400-499 range are not
-// returned as errors.
-func ServerStatus(code int) (codes.Code, string) {
- return hc.ServerStatus(code)
-}
-
-// RequestHeader returns the contents of h as attributes.
-//
-// Instrumentation should require an explicit configuration of which headers to
-// captured and then prune what they pass here. Including all headers can be a
-// security risk - explicit configuration helps avoid leaking sensitive
-// information.
-//
-// The User-Agent header is already captured in the user_agent.original attribute
-// from ClientRequest and ServerRequest. Instrumentation may provide an option
-// to capture that header here even though it is not recommended. Otherwise,
-// instrumentation should filter that out of what is passed.
-func RequestHeader(h http.Header) []attribute.KeyValue {
- return hc.RequestHeader(h)
-}
-
-// ResponseHeader returns the contents of h as attributes.
-//
-// Instrumentation should require an explicit configuration of which headers to
-// captured and then prune what they pass here. Including all headers can be a
-// security risk - explicit configuration helps avoid leaking sensitive
-// information.
-//
-// The User-Agent header is already captured in the user_agent.original attribute
-// from ClientRequest and ServerRequest. Instrumentation may provide an option
-// to capture that header here even though it is not recommended. Otherwise,
-// instrumentation should filter that out of what is passed.
-func ResponseHeader(h http.Header) []attribute.KeyValue {
- return hc.ResponseHeader(h)
-}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go
deleted file mode 100644
index 3d44dae27..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go
+++ /dev/null
@@ -1,2060 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// The web browser in which the application represented by the resource is
-// running. The `browser.*` attributes MUST be used only for resources that
-// represent applications running in a web browser (regardless of whether
-// running on a mobile or desktop device).
-const (
- // BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
- // semantic conventions. It represents the array of brand name and version
- // separated by a space
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
- // Note: This value is intended to be taken from the [UA client hints
- // API](https://wicg.github.io/ua-client-hints/#interface)
- // (`navigator.userAgentData.brands`).
- BrowserBrandsKey = attribute.Key("browser.brands")
-
- // BrowserPlatformKey is the attribute Key conforming to the
- // "browser.platform" semantic conventions. It represents the platform on
- // which the browser is running
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Windows', 'macOS', 'Android'
- // Note: This value is intended to be taken from the [UA client hints
- // API](https://wicg.github.io/ua-client-hints/#interface)
- // (`navigator.userAgentData.platform`). If unavailable, the legacy
- // `navigator.platform` API SHOULD NOT be used instead and this attribute
- // SHOULD be left unset in order for the values to be consistent.
- // The list of possible values is defined in the [W3C User-Agent Client
- // Hints
- // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
- // Note that some (but not all) of these values can overlap with values in
- // the [`os.type` and `os.name` attributes](./os.md). However, for
- // consistency, the values in the `browser.platform` attribute should
- // capture the exact value that the user agent provides.
- BrowserPlatformKey = attribute.Key("browser.platform")
-
- // BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
- // semantic conventions. It represents a boolean that is true if the
- // browser is running on a mobile device
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- // Note: This value is intended to be taken from the [UA client hints
- // API](https://wicg.github.io/ua-client-hints/#interface)
- // (`navigator.userAgentData.mobile`). If unavailable, this attribute
- // SHOULD be left unset.
- BrowserMobileKey = attribute.Key("browser.mobile")
-
- // BrowserLanguageKey is the attribute Key conforming to the
- // "browser.language" semantic conventions. It represents the preferred
- // language of the user using the browser
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'en', 'en-US', 'fr', 'fr-FR'
- // Note: This value is intended to be taken from the Navigator API
- // `navigator.language`.
- BrowserLanguageKey = attribute.Key("browser.language")
-)
-
-// BrowserBrands returns an attribute KeyValue conforming to the
-// "browser.brands" semantic conventions. It represents the array of brand name
-// and version separated by a space
-func BrowserBrands(val ...string) attribute.KeyValue {
- return BrowserBrandsKey.StringSlice(val)
-}
-
-// BrowserPlatform returns an attribute KeyValue conforming to the
-// "browser.platform" semantic conventions. It represents the platform on which
-// the browser is running
-func BrowserPlatform(val string) attribute.KeyValue {
- return BrowserPlatformKey.String(val)
-}
-
-// BrowserMobile returns an attribute KeyValue conforming to the
-// "browser.mobile" semantic conventions. It represents a boolean that is true
-// if the browser is running on a mobile device
-func BrowserMobile(val bool) attribute.KeyValue {
- return BrowserMobileKey.Bool(val)
-}
-
-// BrowserLanguage returns an attribute KeyValue conforming to the
-// "browser.language" semantic conventions. It represents the preferred
-// language of the user using the browser
-func BrowserLanguage(val string) attribute.KeyValue {
- return BrowserLanguageKey.String(val)
-}
-
-// A cloud environment (e.g. GCP, Azure, AWS)
-const (
- // CloudProviderKey is the attribute Key conforming to the "cloud.provider"
- // semantic conventions. It represents the name of the cloud provider.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- CloudProviderKey = attribute.Key("cloud.provider")
-
- // CloudAccountIDKey is the attribute Key conforming to the
- // "cloud.account.id" semantic conventions. It represents the cloud account
- // ID the resource is assigned to.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '111111111111', 'opentelemetry'
- CloudAccountIDKey = attribute.Key("cloud.account.id")
-
- // CloudRegionKey is the attribute Key conforming to the "cloud.region"
- // semantic conventions. It represents the geographical region the resource
- // is running.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'us-central1', 'us-east-1'
- // Note: Refer to your provider's docs to see the available regions, for
- // example [Alibaba Cloud
- // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS
- // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/),
- // [Azure
- // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/),
- // [Google Cloud regions](https://cloud.google.com/about/locations), or
- // [Tencent Cloud
- // regions](https://www.tencentcloud.com/document/product/213/6091).
- CloudRegionKey = attribute.Key("cloud.region")
-
- // CloudResourceIDKey is the attribute Key conforming to the
- // "cloud.resource_id" semantic conventions. It represents the cloud
- // provider-specific native identifier of the monitored cloud resource
- // (e.g. an
- // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
- // on AWS, a [fully qualified resource
- // ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
- // on Azure, a [full resource
- // name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
- // on GCP)
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function',
- // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID',
- // '/subscriptions/<SUBSCIPTION_GUID>/resourceGroups/<RG>/providers/Microsoft.Web/sites/<FUNCAPP>/functions/<FUNC>'
- // Note: On some cloud providers, it may not be possible to determine the
- // full ID at startup,
- // so it may be necessary to set `cloud.resource_id` as a span attribute
- // instead.
- //
- // The exact value to use for `cloud.resource_id` depends on the cloud
- // provider.
- // The following well-known definitions MUST be used if you set this
- // attribute and they apply:
- //
- // * **AWS Lambda:** The function
- // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
- // Take care not to use the "invoked ARN" directly but replace any
- // [alias
- // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html)
- // with the resolved function version, as the same runtime instance may
- // be invokable with
- // multiple different aliases.
- // * **GCP:** The [URI of the
- // resource](https://cloud.google.com/iam/docs/full-resource-names)
- // * **Azure:** The [Fully Qualified Resource
- // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
- // of the invoked function,
- // *not* the function app, having the form
- // `/subscriptions/<SUBSCIPTION_GUID>/resourceGroups/<RG>/providers/Microsoft.Web/sites/<FUNCAPP>/functions/<FUNC>`.
- // This means that a span attribute MUST be used, as an Azure function
- // app can host multiple functions that would usually share
- // a TracerProvider.
- CloudResourceIDKey = attribute.Key("cloud.resource_id")
-
- // CloudAvailabilityZoneKey is the attribute Key conforming to the
- // "cloud.availability_zone" semantic conventions. It represents the cloud
- // regions often have multiple, isolated locations known as zones to
- // increase availability. Availability zone represents the zone where the
- // resource is running.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'us-east-1c'
- // Note: Availability zones are called "zones" on Alibaba Cloud and Google
- // Cloud.
- CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
-
- // CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
- // semantic conventions. It represents the cloud platform in use.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Note: The prefix of the service SHOULD match the one specified in
- // `cloud.provider`.
- CloudPlatformKey = attribute.Key("cloud.platform")
-)
-
-var (
- // Alibaba Cloud
- CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
- // Amazon Web Services
- CloudProviderAWS = CloudProviderKey.String("aws")
- // Microsoft Azure
- CloudProviderAzure = CloudProviderKey.String("azure")
- // Google Cloud Platform
- CloudProviderGCP = CloudProviderKey.String("gcp")
- // Heroku Platform as a Service
- CloudProviderHeroku = CloudProviderKey.String("heroku")
- // IBM Cloud
- CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud")
- // Tencent Cloud
- CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
-)
-
-var (
- // Alibaba Cloud Elastic Compute Service
- CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
- // Alibaba Cloud Function Compute
- CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
- // Red Hat OpenShift on Alibaba Cloud
- CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift")
- // AWS Elastic Compute Cloud
- CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
- // AWS Elastic Container Service
- CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
- // AWS Elastic Kubernetes Service
- CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
- // AWS Lambda
- CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
- // AWS Elastic Beanstalk
- CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
- // AWS App Runner
- CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
- // Red Hat OpenShift on AWS (ROSA)
- CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift")
- // Azure Virtual Machines
- CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
- // Azure Container Instances
- CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
- // Azure Kubernetes Service
- CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
- // Azure Functions
- CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
- // Azure App Service
- CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
- // Azure Red Hat OpenShift
- CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift")
- // Google Cloud Compute Engine (GCE)
- CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
- // Google Cloud Run
- CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
- // Google Cloud Kubernetes Engine (GKE)
- CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
- // Google Cloud Functions (GCF)
- CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
- // Google Cloud App Engine (GAE)
- CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
- // Red Hat OpenShift on Google Cloud
- CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift")
- // Red Hat OpenShift on IBM Cloud
- CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift")
- // Tencent Cloud Cloud Virtual Machine (CVM)
- CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
- // Tencent Cloud Elastic Kubernetes Service (EKS)
- CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
- // Tencent Cloud Serverless Cloud Function (SCF)
- CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
-)
-
-// CloudAccountID returns an attribute KeyValue conforming to the
-// "cloud.account.id" semantic conventions. It represents the cloud account ID
-// the resource is assigned to.
-func CloudAccountID(val string) attribute.KeyValue {
- return CloudAccountIDKey.String(val)
-}
-
-// CloudRegion returns an attribute KeyValue conforming to the
-// "cloud.region" semantic conventions. It represents the geographical region
-// the resource is running.
-func CloudRegion(val string) attribute.KeyValue {
- return CloudRegionKey.String(val)
-}
-
-// CloudResourceID returns an attribute KeyValue conforming to the
-// "cloud.resource_id" semantic conventions. It represents the cloud
-// provider-specific native identifier of the monitored cloud resource (e.g. an
-// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
-// on AWS, a [fully qualified resource
-// ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
-// on Azure, a [full resource
-// name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
-// on GCP)
-func CloudResourceID(val string) attribute.KeyValue {
- return CloudResourceIDKey.String(val)
-}
-
-// CloudAvailabilityZone returns an attribute KeyValue conforming to the
-// "cloud.availability_zone" semantic conventions. It represents the cloud
-// regions often have multiple, isolated locations known as zones to increase
-// availability. Availability zone represents the zone where the resource is
-// running.
-func CloudAvailabilityZone(val string) attribute.KeyValue {
- return CloudAvailabilityZoneKey.String(val)
-}
-
-// Resources used by AWS Elastic Container Service (ECS).
-const (
- // AWSECSContainerARNKey is the attribute Key conforming to the
- // "aws.ecs.container.arn" semantic conventions. It represents the Amazon
- // Resource Name (ARN) of an [ECS container
- // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples:
- // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
- AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
-
- // AWSECSClusterARNKey is the attribute Key conforming to the
- // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
- // [ECS
- // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
- AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
-
- // AWSECSLaunchtypeKey is the attribute Key conforming to the
- // "aws.ecs.launchtype" semantic conventions. It represents the [launch
- // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html)
- // for an ECS task.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
-
- // AWSECSTaskARNKey is the attribute Key conforming to the
- // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an
- // [ECS task
- // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples:
- // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b'
- AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
-
- // AWSECSTaskFamilyKey is the attribute Key conforming to the
- // "aws.ecs.task.family" semantic conventions. It represents the task
- // definition family this task definition is a member of.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry-family'
- AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
-
- // AWSECSTaskRevisionKey is the attribute Key conforming to the
- // "aws.ecs.task.revision" semantic conventions. It represents the revision
- // for this task definition.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '8', '26'
- AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
-)
-
-var (
- // ec2
- AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
- // fargate
- AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
-)
-
-// AWSECSContainerARN returns an attribute KeyValue conforming to the
-// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
-// Resource Name (ARN) of an [ECS container
-// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
-func AWSECSContainerARN(val string) attribute.KeyValue {
- return AWSECSContainerARNKey.String(val)
-}
-
-// AWSECSClusterARN returns an attribute KeyValue conforming to the
-// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS
-// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
-func AWSECSClusterARN(val string) attribute.KeyValue {
- return AWSECSClusterARNKey.String(val)
-}
-
-// AWSECSTaskARN returns an attribute KeyValue conforming to the
-// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS
-// task
-// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
-func AWSECSTaskARN(val string) attribute.KeyValue {
- return AWSECSTaskARNKey.String(val)
-}
-
-// AWSECSTaskFamily returns an attribute KeyValue conforming to the
-// "aws.ecs.task.family" semantic conventions. It represents the task
-// definition family this task definition is a member of.
-func AWSECSTaskFamily(val string) attribute.KeyValue {
- return AWSECSTaskFamilyKey.String(val)
-}
-
-// AWSECSTaskRevision returns an attribute KeyValue conforming to the
-// "aws.ecs.task.revision" semantic conventions. It represents the revision for
-// this task definition.
-func AWSECSTaskRevision(val string) attribute.KeyValue {
- return AWSECSTaskRevisionKey.String(val)
-}
-
-// Resources used by AWS Elastic Kubernetes Service (EKS).
-const (
- // AWSEKSClusterARNKey is the attribute Key conforming to the
- // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an
- // EKS cluster.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
- AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
-)
-
-// AWSEKSClusterARN returns an attribute KeyValue conforming to the
-// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
-// cluster.
-func AWSEKSClusterARN(val string) attribute.KeyValue {
- return AWSEKSClusterARNKey.String(val)
-}
-
-// Resources specific to Amazon Web Services.
-const (
- // AWSLogGroupNamesKey is the attribute Key conforming to the
- // "aws.log.group.names" semantic conventions. It represents the name(s) of
- // the AWS log group(s) an application is writing to.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '/aws/lambda/my-function', 'opentelemetry-service'
- // Note: Multiple log groups must be supported for cases like
- // multi-container applications, where a single application has sidecar
- // containers, and each write to their own log group.
- AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
-
- // AWSLogGroupARNsKey is the attribute Key conforming to the
- // "aws.log.group.arns" semantic conventions. It represents the Amazon
- // Resource Name(s) (ARN) of the AWS log group(s).
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples:
- // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
- // Note: See the [log group ARN format
- // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
- AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
-
- // AWSLogStreamNamesKey is the attribute Key conforming to the
- // "aws.log.stream.names" semantic conventions. It represents the name(s)
- // of the AWS log stream(s) an application is writing to.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
- AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
-
- // AWSLogStreamARNsKey is the attribute Key conforming to the
- // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of
- // the AWS log stream(s).
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples:
- // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
- // Note: See the [log stream ARN format
- // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
- // One log group can contain several log streams, so these ARNs necessarily
- // identify both a log group and a log stream.
- AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
-)
-
-// AWSLogGroupNames returns an attribute KeyValue conforming to the
-// "aws.log.group.names" semantic conventions. It represents the name(s) of the
-// AWS log group(s) an application is writing to.
-func AWSLogGroupNames(val ...string) attribute.KeyValue {
- return AWSLogGroupNamesKey.StringSlice(val)
-}
-
-// AWSLogGroupARNs returns an attribute KeyValue conforming to the
-// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
-// Name(s) (ARN) of the AWS log group(s).
-func AWSLogGroupARNs(val ...string) attribute.KeyValue {
- return AWSLogGroupARNsKey.StringSlice(val)
-}
-
-// AWSLogStreamNames returns an attribute KeyValue conforming to the
-// "aws.log.stream.names" semantic conventions. It represents the name(s) of
-// the AWS log stream(s) an application is writing to.
-func AWSLogStreamNames(val ...string) attribute.KeyValue {
- return AWSLogStreamNamesKey.StringSlice(val)
-}
-
-// AWSLogStreamARNs returns an attribute KeyValue conforming to the
-// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
-// AWS log stream(s).
-func AWSLogStreamARNs(val ...string) attribute.KeyValue {
- return AWSLogStreamARNsKey.StringSlice(val)
-}
-
-// Heroku dyno metadata
-const (
- // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the
- // "heroku.release.creation_timestamp" semantic conventions. It represents
- // the time and date the release was created
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '2022-10-23T18:00:42Z'
- HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp")
-
- // HerokuReleaseCommitKey is the attribute Key conforming to the
- // "heroku.release.commit" semantic conventions. It represents the commit
- // hash for the current release
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec'
- HerokuReleaseCommitKey = attribute.Key("heroku.release.commit")
-
- // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id"
- // semantic conventions. It represents the unique identifier for the
- // application
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da'
- HerokuAppIDKey = attribute.Key("heroku.app.id")
-)
-
-// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming
-// to the "heroku.release.creation_timestamp" semantic conventions. It
-// represents the time and date the release was created
-func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue {
- return HerokuReleaseCreationTimestampKey.String(val)
-}
-
-// HerokuReleaseCommit returns an attribute KeyValue conforming to the
-// "heroku.release.commit" semantic conventions. It represents the commit hash
-// for the current release
-func HerokuReleaseCommit(val string) attribute.KeyValue {
- return HerokuReleaseCommitKey.String(val)
-}
-
-// HerokuAppID returns an attribute KeyValue conforming to the
-// "heroku.app.id" semantic conventions. It represents the unique identifier
-// for the application
-func HerokuAppID(val string) attribute.KeyValue {
- return HerokuAppIDKey.String(val)
-}
-
-// A container instance.
-const (
- // ContainerNameKey is the attribute Key conforming to the "container.name"
- // semantic conventions. It represents the container name used by container
- // runtime.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry-autoconf'
- ContainerNameKey = attribute.Key("container.name")
-
- // ContainerIDKey is the attribute Key conforming to the "container.id"
- // semantic conventions. It represents the container ID. Usually a UUID, as
- // for example used to [identify Docker
- // containers](https://docs.docker.com/engine/reference/run/#container-identification).
- // The UUID might be abbreviated.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'a3bf90e006b2'
- ContainerIDKey = attribute.Key("container.id")
-
- // ContainerRuntimeKey is the attribute Key conforming to the
- // "container.runtime" semantic conventions. It represents the container
- // runtime managing this container.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'docker', 'containerd', 'rkt'
- ContainerRuntimeKey = attribute.Key("container.runtime")
-
- // ContainerImageNameKey is the attribute Key conforming to the
- // "container.image.name" semantic conventions. It represents the name of
- // the image the container was built on.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'gcr.io/opentelemetry/operator'
- ContainerImageNameKey = attribute.Key("container.image.name")
-
- // ContainerImageTagKey is the attribute Key conforming to the
- // "container.image.tag" semantic conventions. It represents the container
- // image tag.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '0.1'
- ContainerImageTagKey = attribute.Key("container.image.tag")
-)
-
-// ContainerName returns an attribute KeyValue conforming to the
-// "container.name" semantic conventions. It represents the container name used
-// by container runtime.
-func ContainerName(val string) attribute.KeyValue {
- return ContainerNameKey.String(val)
-}
-
-// ContainerID returns an attribute KeyValue conforming to the
-// "container.id" semantic conventions. It represents the container ID. Usually
-// a UUID, as for example used to [identify Docker
-// containers](https://docs.docker.com/engine/reference/run/#container-identification).
-// The UUID might be abbreviated.
-func ContainerID(val string) attribute.KeyValue {
- return ContainerIDKey.String(val)
-}
-
-// ContainerRuntime returns an attribute KeyValue conforming to the
-// "container.runtime" semantic conventions. It represents the container
-// runtime managing this container.
-func ContainerRuntime(val string) attribute.KeyValue {
- return ContainerRuntimeKey.String(val)
-}
-
-// ContainerImageName returns an attribute KeyValue conforming to the
-// "container.image.name" semantic conventions. It represents the name of the
-// image the container was built on.
-func ContainerImageName(val string) attribute.KeyValue {
- return ContainerImageNameKey.String(val)
-}
-
-// ContainerImageTag returns an attribute KeyValue conforming to the
-// "container.image.tag" semantic conventions. It represents the container
-// image tag.
-func ContainerImageTag(val string) attribute.KeyValue {
- return ContainerImageTagKey.String(val)
-}
-
-// The software deployment.
-const (
- // DeploymentEnvironmentKey is the attribute Key conforming to the
- // "deployment.environment" semantic conventions. It represents the name of
- // the [deployment
- // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
- // deployment tier).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'staging', 'production'
- DeploymentEnvironmentKey = attribute.Key("deployment.environment")
-)
-
-// DeploymentEnvironment returns an attribute KeyValue conforming to the
-// "deployment.environment" semantic conventions. It represents the name of the
-// [deployment
-// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
-// deployment tier).
-func DeploymentEnvironment(val string) attribute.KeyValue {
- return DeploymentEnvironmentKey.String(val)
-}
-
-// The device on which the process represented by this resource is running.
-const (
- // DeviceIDKey is the attribute Key conforming to the "device.id" semantic
- // conventions. It represents a unique identifier representing the device
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
- // Note: The device identifier MUST only be defined using the values
- // outlined below. This value is not an advertising identifier and MUST NOT
- // be used as such. On iOS (Swift or Objective-C), this value MUST be equal
- // to the [vendor
- // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor).
- // On Android (Java or Kotlin), this value MUST be equal to the Firebase
- // Installation ID or a globally unique UUID which is persisted across
- // sessions in your application. More information can be found
- // [here](https://developer.android.com/training/articles/user-data-ids) on
- // best practices and exact implementation details. Caution should be taken
- // when storing personal data or anything which can identify a user. GDPR
- // and data protection laws may apply, ensure you do your own due
- // diligence.
- DeviceIDKey = attribute.Key("device.id")
-
- // DeviceModelIdentifierKey is the attribute Key conforming to the
- // "device.model.identifier" semantic conventions. It represents the model
- // identifier for the device
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'iPhone3,4', 'SM-G920F'
- // Note: It's recommended this value represents a machine readable version
- // of the model identifier rather than the market or consumer-friendly name
- // of the device.
- DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
-
- // DeviceModelNameKey is the attribute Key conforming to the
- // "device.model.name" semantic conventions. It represents the marketing
- // name for the device model
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
- // Note: It's recommended this value represents a human readable version of
- // the device model rather than a machine readable alternative.
- DeviceModelNameKey = attribute.Key("device.model.name")
-
- // DeviceManufacturerKey is the attribute Key conforming to the
- // "device.manufacturer" semantic conventions. It represents the name of
- // the device manufacturer
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Apple', 'Samsung'
- // Note: The Android OS provides this field via
- // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
- // iOS apps SHOULD hardcode the value `Apple`.
- DeviceManufacturerKey = attribute.Key("device.manufacturer")
-)
-
-// DeviceID returns an attribute KeyValue conforming to the "device.id"
-// semantic conventions. It represents a unique identifier representing the
-// device
-func DeviceID(val string) attribute.KeyValue {
- return DeviceIDKey.String(val)
-}
-
-// DeviceModelIdentifier returns an attribute KeyValue conforming to the
-// "device.model.identifier" semantic conventions. It represents the model
-// identifier for the device
-func DeviceModelIdentifier(val string) attribute.KeyValue {
- return DeviceModelIdentifierKey.String(val)
-}
-
-// DeviceModelName returns an attribute KeyValue conforming to the
-// "device.model.name" semantic conventions. It represents the marketing name
-// for the device model
-func DeviceModelName(val string) attribute.KeyValue {
- return DeviceModelNameKey.String(val)
-}
-
-// DeviceManufacturer returns an attribute KeyValue conforming to the
-// "device.manufacturer" semantic conventions. It represents the name of the
-// device manufacturer
-func DeviceManufacturer(val string) attribute.KeyValue {
- return DeviceManufacturerKey.String(val)
-}
-
-// A serverless instance.
-const (
- // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
- // conventions. It represents the name of the single function that this
- // runtime instance executes.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'my-function', 'myazurefunctionapp/some-function-name'
- // Note: This is the name of the function as configured/deployed on the
- // FaaS
- // platform and is usually different from the name of the callback
- // function (which may be stored in the
- // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes)
- // span attributes).
- //
- // For some cloud providers, the above definition is ambiguous. The
- // following
- // definition of function name MUST be used for this attribute
- // (and consequently the span name) for the listed cloud
- // providers/products:
- //
- // * **Azure:** The full name `<FUNCAPP>/<FUNC>`, i.e., function app name
- // followed by a forward slash followed by the function name (this form
- // can also be seen in the resource JSON for the function).
- // This means that a span attribute MUST be used, as an Azure function
- // app can host multiple functions that would usually share
- // a TracerProvider (see also the `cloud.resource_id` attribute).
- FaaSNameKey = attribute.Key("faas.name")
-
- // FaaSVersionKey is the attribute Key conforming to the "faas.version"
- // semantic conventions. It represents the immutable version of the
- // function being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '26', 'pinkfroid-00002'
- // Note: Depending on the cloud provider and platform, use:
- //
- // * **AWS Lambda:** The [function
- // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html)
- // (an integer represented as a decimal string).
- // * **Google Cloud Run:** The
- // [revision](https://cloud.google.com/run/docs/managing/revisions)
- // (i.e., the function name plus the revision suffix).
- // * **Google Cloud Functions:** The value of the
- // [`K_REVISION` environment
- // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically).
- // * **Azure Functions:** Not applicable. Do not set this attribute.
- FaaSVersionKey = attribute.Key("faas.version")
-
- // FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
- // semantic conventions. It represents the execution environment ID as a
- // string, that will be potentially reused for other invocations to the
- // same function/function version.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
- // Note: * **AWS Lambda:** Use the (full) log stream name.
- FaaSInstanceKey = attribute.Key("faas.instance")
-
- // FaaSMaxMemoryKey is the attribute Key conforming to the
- // "faas.max_memory" semantic conventions. It represents the amount of
- // memory available to the serverless function converted to Bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 134217728
- // Note: It's recommended to set this attribute since e.g. too little
- // memory can easily stop a Java AWS Lambda function from working
- // correctly. On AWS Lambda, the environment variable
- // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must
- // be multiplied by 1,048,576).
- FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
-)
-
-// FaaSName returns an attribute KeyValue conforming to the "faas.name"
-// semantic conventions. It represents the name of the single function that
-// this runtime instance executes.
-func FaaSName(val string) attribute.KeyValue {
- return FaaSNameKey.String(val)
-}
-
-// FaaSVersion returns an attribute KeyValue conforming to the
-// "faas.version" semantic conventions. It represents the immutable version of
-// the function being executed.
-func FaaSVersion(val string) attribute.KeyValue {
- return FaaSVersionKey.String(val)
-}
-
-// FaaSInstance returns an attribute KeyValue conforming to the
-// "faas.instance" semantic conventions. It represents the execution
-// environment ID as a string, that will be potentially reused for other
-// invocations to the same function/function version.
-func FaaSInstance(val string) attribute.KeyValue {
- return FaaSInstanceKey.String(val)
-}
-
-// FaaSMaxMemory returns an attribute KeyValue conforming to the
-// "faas.max_memory" semantic conventions. It represents the amount of memory
-// available to the serverless function converted to Bytes.
-func FaaSMaxMemory(val int) attribute.KeyValue {
- return FaaSMaxMemoryKey.Int(val)
-}
-
-// A host is defined as a general computing instance.
-const (
- // HostIDKey is the attribute Key conforming to the "host.id" semantic
- // conventions. It represents the unique host ID. For Cloud, this must be
- // the instance_id assigned by the cloud provider. For non-containerized
- // systems, this should be the `machine-id`. See the table below for the
- // sources to use to determine the `machine-id` based on operating system.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'fdbf79e8af94cb7f9e8df36789187052'
- HostIDKey = attribute.Key("host.id")
-
- // HostNameKey is the attribute Key conforming to the "host.name" semantic
- // conventions. It represents the name of the host. On Unix systems, it may
- // contain what the hostname command returns, or the fully qualified
- // hostname, or another name specified by the user.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry-test'
- HostNameKey = attribute.Key("host.name")
-
- // HostTypeKey is the attribute Key conforming to the "host.type" semantic
- // conventions. It represents the type of host. For Cloud, this must be the
- // machine type.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'n1-standard-1'
- HostTypeKey = attribute.Key("host.type")
-
- // HostArchKey is the attribute Key conforming to the "host.arch" semantic
- // conventions. It represents the CPU architecture the host system is
- // running on.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- HostArchKey = attribute.Key("host.arch")
-
- // HostImageNameKey is the attribute Key conforming to the
- // "host.image.name" semantic conventions. It represents the name of the VM
- // image or OS install the host was instantiated from.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
- HostImageNameKey = attribute.Key("host.image.name")
-
- // HostImageIDKey is the attribute Key conforming to the "host.image.id"
- // semantic conventions. It represents the vM image ID. For Cloud, this
- // value is from the provider.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'ami-07b06b442921831e5'
- HostImageIDKey = attribute.Key("host.image.id")
-
- // HostImageVersionKey is the attribute Key conforming to the
- // "host.image.version" semantic conventions. It represents the version
- // string of the VM image as defined in [Version
- // Attributes](README.md#version-attributes).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '0.1'
- HostImageVersionKey = attribute.Key("host.image.version")
-)
-
-var (
- // AMD64
- HostArchAMD64 = HostArchKey.String("amd64")
- // ARM32
- HostArchARM32 = HostArchKey.String("arm32")
- // ARM64
- HostArchARM64 = HostArchKey.String("arm64")
- // Itanium
- HostArchIA64 = HostArchKey.String("ia64")
- // 32-bit PowerPC
- HostArchPPC32 = HostArchKey.String("ppc32")
- // 64-bit PowerPC
- HostArchPPC64 = HostArchKey.String("ppc64")
- // IBM z/Architecture
- HostArchS390x = HostArchKey.String("s390x")
- // 32-bit x86
- HostArchX86 = HostArchKey.String("x86")
-)
-
-// HostID returns an attribute KeyValue conforming to the "host.id" semantic
-// conventions. It represents the unique host ID. For Cloud, this must be the
-// instance_id assigned by the cloud provider. For non-containerized systems,
-// this should be the `machine-id`. See the table below for the sources to use
-// to determine the `machine-id` based on operating system.
-func HostID(val string) attribute.KeyValue {
- return HostIDKey.String(val)
-}
-
-// HostName returns an attribute KeyValue conforming to the "host.name"
-// semantic conventions. It represents the name of the host. On Unix systems,
-// it may contain what the hostname command returns, or the fully qualified
-// hostname, or another name specified by the user.
-func HostName(val string) attribute.KeyValue {
- return HostNameKey.String(val)
-}
-
-// HostType returns an attribute KeyValue conforming to the "host.type"
-// semantic conventions. It represents the type of host. For Cloud, this must
-// be the machine type.
-func HostType(val string) attribute.KeyValue {
- return HostTypeKey.String(val)
-}
-
-// HostImageName returns an attribute KeyValue conforming to the
-// "host.image.name" semantic conventions. It represents the name of the VM
-// image or OS install the host was instantiated from.
-func HostImageName(val string) attribute.KeyValue {
- return HostImageNameKey.String(val)
-}
-
-// HostImageID returns an attribute KeyValue conforming to the
-// "host.image.id" semantic conventions. It represents the vM image ID. For
-// Cloud, this value is from the provider.
-func HostImageID(val string) attribute.KeyValue {
- return HostImageIDKey.String(val)
-}
-
-// HostImageVersion returns an attribute KeyValue conforming to the
-// "host.image.version" semantic conventions. It represents the version string
-// of the VM image as defined in [Version
-// Attributes](README.md#version-attributes).
-func HostImageVersion(val string) attribute.KeyValue {
- return HostImageVersionKey.String(val)
-}
-
-// A Kubernetes Cluster.
-const (
- // K8SClusterNameKey is the attribute Key conforming to the
- // "k8s.cluster.name" semantic conventions. It represents the name of the
- // cluster.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry-cluster'
- K8SClusterNameKey = attribute.Key("k8s.cluster.name")
-)
-
-// K8SClusterName returns an attribute KeyValue conforming to the
-// "k8s.cluster.name" semantic conventions. It represents the name of the
-// cluster.
-func K8SClusterName(val string) attribute.KeyValue {
- return K8SClusterNameKey.String(val)
-}
-
-// A Kubernetes Node object.
-const (
- // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
- // semantic conventions. It represents the name of the Node.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'node-1'
- K8SNodeNameKey = attribute.Key("k8s.node.name")
-
- // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid"
- // semantic conventions. It represents the UID of the Node.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
- K8SNodeUIDKey = attribute.Key("k8s.node.uid")
-)
-
-// K8SNodeName returns an attribute KeyValue conforming to the
-// "k8s.node.name" semantic conventions. It represents the name of the Node.
-func K8SNodeName(val string) attribute.KeyValue {
- return K8SNodeNameKey.String(val)
-}
-
-// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
-// semantic conventions. It represents the UID of the Node.
-func K8SNodeUID(val string) attribute.KeyValue {
- return K8SNodeUIDKey.String(val)
-}
-
-// A Kubernetes Namespace.
-const (
- // K8SNamespaceNameKey is the attribute Key conforming to the
- // "k8s.namespace.name" semantic conventions. It represents the name of the
- // namespace that the pod is running in.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'default'
- K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
-)
-
-// K8SNamespaceName returns an attribute KeyValue conforming to the
-// "k8s.namespace.name" semantic conventions. It represents the name of the
-// namespace that the pod is running in.
-func K8SNamespaceName(val string) attribute.KeyValue {
- return K8SNamespaceNameKey.String(val)
-}
-
-// A Kubernetes Pod object.
-const (
- // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid"
- // semantic conventions. It represents the UID of the Pod.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SPodUIDKey = attribute.Key("k8s.pod.uid")
-
- // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name"
- // semantic conventions. It represents the name of the Pod.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry-pod-autoconf'
- K8SPodNameKey = attribute.Key("k8s.pod.name")
-)
-
-// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
-// semantic conventions. It represents the UID of the Pod.
-func K8SPodUID(val string) attribute.KeyValue {
- return K8SPodUIDKey.String(val)
-}
-
-// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
-// semantic conventions. It represents the name of the Pod.
-func K8SPodName(val string) attribute.KeyValue {
- return K8SPodNameKey.String(val)
-}
-
-// A container in a
-// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates).
-const (
- // K8SContainerNameKey is the attribute Key conforming to the
- // "k8s.container.name" semantic conventions. It represents the name of the
- // Container from Pod specification, must be unique within a Pod. Container
- // runtime usually uses different globally unique name (`container.name`).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'redis'
- K8SContainerNameKey = attribute.Key("k8s.container.name")
-
- // K8SContainerRestartCountKey is the attribute Key conforming to the
- // "k8s.container.restart_count" semantic conventions. It represents the
- // number of times the container was restarted. This attribute can be used
- // to identify a particular container (running or stopped) within a
- // container spec.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 0, 2
- K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
-)
-
-// K8SContainerName returns an attribute KeyValue conforming to the
-// "k8s.container.name" semantic conventions. It represents the name of the
-// Container from Pod specification, must be unique within a Pod. Container
-// runtime usually uses different globally unique name (`container.name`).
-func K8SContainerName(val string) attribute.KeyValue {
- return K8SContainerNameKey.String(val)
-}
-
-// K8SContainerRestartCount returns an attribute KeyValue conforming to the
-// "k8s.container.restart_count" semantic conventions. It represents the number
-// of times the container was restarted. This attribute can be used to identify
-// a particular container (running or stopped) within a container spec.
-func K8SContainerRestartCount(val int) attribute.KeyValue {
- return K8SContainerRestartCountKey.Int(val)
-}
-
-// A Kubernetes ReplicaSet object.
-const (
- // K8SReplicaSetUIDKey is the attribute Key conforming to the
- // "k8s.replicaset.uid" semantic conventions. It represents the UID of the
- // ReplicaSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
-
- // K8SReplicaSetNameKey is the attribute Key conforming to the
- // "k8s.replicaset.name" semantic conventions. It represents the name of
- // the ReplicaSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
-)
-
-// K8SReplicaSetUID returns an attribute KeyValue conforming to the
-// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
-// ReplicaSet.
-func K8SReplicaSetUID(val string) attribute.KeyValue {
- return K8SReplicaSetUIDKey.String(val)
-}
-
-// K8SReplicaSetName returns an attribute KeyValue conforming to the
-// "k8s.replicaset.name" semantic conventions. It represents the name of the
-// ReplicaSet.
-func K8SReplicaSetName(val string) attribute.KeyValue {
- return K8SReplicaSetNameKey.String(val)
-}
-
-// A Kubernetes Deployment object.
-const (
- // K8SDeploymentUIDKey is the attribute Key conforming to the
- // "k8s.deployment.uid" semantic conventions. It represents the UID of the
- // Deployment.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
-
- // K8SDeploymentNameKey is the attribute Key conforming to the
- // "k8s.deployment.name" semantic conventions. It represents the name of
- // the Deployment.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
-)
-
-// K8SDeploymentUID returns an attribute KeyValue conforming to the
-// "k8s.deployment.uid" semantic conventions. It represents the UID of the
-// Deployment.
-func K8SDeploymentUID(val string) attribute.KeyValue {
- return K8SDeploymentUIDKey.String(val)
-}
-
-// K8SDeploymentName returns an attribute KeyValue conforming to the
-// "k8s.deployment.name" semantic conventions. It represents the name of the
-// Deployment.
-func K8SDeploymentName(val string) attribute.KeyValue {
- return K8SDeploymentNameKey.String(val)
-}
-
-// A Kubernetes StatefulSet object.
-const (
- // K8SStatefulSetUIDKey is the attribute Key conforming to the
- // "k8s.statefulset.uid" semantic conventions. It represents the UID of the
- // StatefulSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
-
- // K8SStatefulSetNameKey is the attribute Key conforming to the
- // "k8s.statefulset.name" semantic conventions. It represents the name of
- // the StatefulSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
-)
-
-// K8SStatefulSetUID returns an attribute KeyValue conforming to the
-// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
-// StatefulSet.
-func K8SStatefulSetUID(val string) attribute.KeyValue {
- return K8SStatefulSetUIDKey.String(val)
-}
-
-// K8SStatefulSetName returns an attribute KeyValue conforming to the
-// "k8s.statefulset.name" semantic conventions. It represents the name of the
-// StatefulSet.
-func K8SStatefulSetName(val string) attribute.KeyValue {
- return K8SStatefulSetNameKey.String(val)
-}
-
-// A Kubernetes DaemonSet object.
-const (
- // K8SDaemonSetUIDKey is the attribute Key conforming to the
- // "k8s.daemonset.uid" semantic conventions. It represents the UID of the
- // DaemonSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
-
- // K8SDaemonSetNameKey is the attribute Key conforming to the
- // "k8s.daemonset.name" semantic conventions. It represents the name of the
- // DaemonSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
-)
-
-// K8SDaemonSetUID returns an attribute KeyValue conforming to the
-// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
-// DaemonSet.
-func K8SDaemonSetUID(val string) attribute.KeyValue {
- return K8SDaemonSetUIDKey.String(val)
-}
-
-// K8SDaemonSetName returns an attribute KeyValue conforming to the
-// "k8s.daemonset.name" semantic conventions. It represents the name of the
-// DaemonSet.
-func K8SDaemonSetName(val string) attribute.KeyValue {
- return K8SDaemonSetNameKey.String(val)
-}
-
-// A Kubernetes Job object.
-const (
- // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid"
- // semantic conventions. It represents the UID of the Job.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SJobUIDKey = attribute.Key("k8s.job.uid")
-
- // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name"
- // semantic conventions. It represents the name of the Job.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SJobNameKey = attribute.Key("k8s.job.name")
-)
-
-// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
-// semantic conventions. It represents the UID of the Job.
-func K8SJobUID(val string) attribute.KeyValue {
- return K8SJobUIDKey.String(val)
-}
-
-// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
-// semantic conventions. It represents the name of the Job.
-func K8SJobName(val string) attribute.KeyValue {
- return K8SJobNameKey.String(val)
-}
-
-// A Kubernetes CronJob object.
-const (
- // K8SCronJobUIDKey is the attribute Key conforming to the
- // "k8s.cronjob.uid" semantic conventions. It represents the UID of the
- // CronJob.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
-
- // K8SCronJobNameKey is the attribute Key conforming to the
- // "k8s.cronjob.name" semantic conventions. It represents the name of the
- // CronJob.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
-)
-
-// K8SCronJobUID returns an attribute KeyValue conforming to the
-// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
-// CronJob.
-func K8SCronJobUID(val string) attribute.KeyValue {
- return K8SCronJobUIDKey.String(val)
-}
-
-// K8SCronJobName returns an attribute KeyValue conforming to the
-// "k8s.cronjob.name" semantic conventions. It represents the name of the
-// CronJob.
-func K8SCronJobName(val string) attribute.KeyValue {
- return K8SCronJobNameKey.String(val)
-}
-
-// The operating system (OS) on which the process represented by this resource
-// is running.
-const (
- // OSTypeKey is the attribute Key conforming to the "os.type" semantic
- // conventions. It represents the operating system type.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- OSTypeKey = attribute.Key("os.type")
-
- // OSDescriptionKey is the attribute Key conforming to the "os.description"
- // semantic conventions. It represents the human readable (not intended to
- // be parsed) OS version information, like e.g. reported by `ver` or
- // `lsb_release -a` commands.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1
- // LTS'
- OSDescriptionKey = attribute.Key("os.description")
-
- // OSNameKey is the attribute Key conforming to the "os.name" semantic
- // conventions. It represents the human readable operating system name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'iOS', 'Android', 'Ubuntu'
- OSNameKey = attribute.Key("os.name")
-
- // OSVersionKey is the attribute Key conforming to the "os.version"
- // semantic conventions. It represents the version string of the operating
- // system as defined in [Version
- // Attributes](../../resource/semantic_conventions/README.md#version-attributes).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '14.2.1', '18.04.1'
- OSVersionKey = attribute.Key("os.version")
-)
-
-var (
- // Microsoft Windows
- OSTypeWindows = OSTypeKey.String("windows")
- // Linux
- OSTypeLinux = OSTypeKey.String("linux")
- // Apple Darwin
- OSTypeDarwin = OSTypeKey.String("darwin")
- // FreeBSD
- OSTypeFreeBSD = OSTypeKey.String("freebsd")
- // NetBSD
- OSTypeNetBSD = OSTypeKey.String("netbsd")
- // OpenBSD
- OSTypeOpenBSD = OSTypeKey.String("openbsd")
- // DragonFly BSD
- OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
- // HP-UX (Hewlett Packard Unix)
- OSTypeHPUX = OSTypeKey.String("hpux")
- // AIX (Advanced Interactive eXecutive)
- OSTypeAIX = OSTypeKey.String("aix")
- // SunOS, Oracle Solaris
- OSTypeSolaris = OSTypeKey.String("solaris")
- // IBM z/OS
- OSTypeZOS = OSTypeKey.String("z_os")
-)
-
-// OSDescription returns an attribute KeyValue conforming to the
-// "os.description" semantic conventions. It represents the human readable (not
-// intended to be parsed) OS version information, like e.g. reported by `ver`
-// or `lsb_release -a` commands.
-func OSDescription(val string) attribute.KeyValue {
- return OSDescriptionKey.String(val)
-}
-
-// OSName returns an attribute KeyValue conforming to the "os.name" semantic
-// conventions. It represents the human readable operating system name.
-func OSName(val string) attribute.KeyValue {
- return OSNameKey.String(val)
-}
-
-// OSVersion returns an attribute KeyValue conforming to the "os.version"
-// semantic conventions. It represents the version string of the operating
-// system as defined in [Version
-// Attributes](../../resource/semantic_conventions/README.md#version-attributes).
-func OSVersion(val string) attribute.KeyValue {
- return OSVersionKey.String(val)
-}
-
-// An operating system process.
-const (
- // ProcessPIDKey is the attribute Key conforming to the "process.pid"
- // semantic conventions. It represents the process identifier (PID).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 1234
- ProcessPIDKey = attribute.Key("process.pid")
-
- // ProcessParentPIDKey is the attribute Key conforming to the
- // "process.parent_pid" semantic conventions. It represents the parent
- // Process identifier (PID).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 111
- ProcessParentPIDKey = attribute.Key("process.parent_pid")
-
- // ProcessExecutableNameKey is the attribute Key conforming to the
- // "process.executable.name" semantic conventions. It represents the name
- // of the process executable. On Linux based systems, can be set to the
- // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name
- // of `GetProcessImageFileNameW`.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (See alternative attributes
- // below.)
- // Stability: stable
- // Examples: 'otelcol'
- ProcessExecutableNameKey = attribute.Key("process.executable.name")
-
- // ProcessExecutablePathKey is the attribute Key conforming to the
- // "process.executable.path" semantic conventions. It represents the full
- // path to the process executable. On Linux based systems, can be set to
- // the target of `proc/[pid]/exe`. On Windows, can be set to the result of
- // `GetProcessImageFileNameW`.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (See alternative attributes
- // below.)
- // Stability: stable
- // Examples: '/usr/bin/cmd/otelcol'
- ProcessExecutablePathKey = attribute.Key("process.executable.path")
-
- // ProcessCommandKey is the attribute Key conforming to the
- // "process.command" semantic conventions. It represents the command used
- // to launch the process (i.e. the command name). On Linux based systems,
- // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can
- // be set to the first parameter extracted from `GetCommandLineW`.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (See alternative attributes
- // below.)
- // Stability: stable
- // Examples: 'cmd/otelcol'
- ProcessCommandKey = attribute.Key("process.command")
-
- // ProcessCommandLineKey is the attribute Key conforming to the
- // "process.command_line" semantic conventions. It represents the full
- // command used to launch the process as a single string representing the
- // full command. On Windows, can be set to the result of `GetCommandLineW`.
- // Do not set this if you have to assemble it just for monitoring; use
- // `process.command_args` instead.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (See alternative attributes
- // below.)
- // Stability: stable
- // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
- ProcessCommandLineKey = attribute.Key("process.command_line")
-
- // ProcessCommandArgsKey is the attribute Key conforming to the
- // "process.command_args" semantic conventions. It represents the all the
- // command arguments (including the command/executable itself) as received
- // by the process. On Linux-based systems (and some other Unixoid systems
- // supporting procfs), can be set according to the list of null-delimited
- // strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
- // this would be the full argv vector passed to `main`.
- //
- // Type: string[]
- // RequirementLevel: ConditionallyRequired (See alternative attributes
- // below.)
- // Stability: stable
- // Examples: 'cmd/otecol', '--config=config.yaml'
- ProcessCommandArgsKey = attribute.Key("process.command_args")
-
- // ProcessOwnerKey is the attribute Key conforming to the "process.owner"
- // semantic conventions. It represents the username of the user that owns
- // the process.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'root'
- ProcessOwnerKey = attribute.Key("process.owner")
-)
-
-// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
-// semantic conventions. It represents the process identifier (PID).
-func ProcessPID(val int) attribute.KeyValue {
- return ProcessPIDKey.Int(val)
-}
-
-// ProcessParentPID returns an attribute KeyValue conforming to the
-// "process.parent_pid" semantic conventions. It represents the parent Process
-// identifier (PID).
-func ProcessParentPID(val int) attribute.KeyValue {
- return ProcessParentPIDKey.Int(val)
-}
-
-// ProcessExecutableName returns an attribute KeyValue conforming to the
-// "process.executable.name" semantic conventions. It represents the name of
-// the process executable. On Linux based systems, can be set to the `Name` in
-// `proc/[pid]/status`. On Windows, can be set to the base name of
-// `GetProcessImageFileNameW`.
-func ProcessExecutableName(val string) attribute.KeyValue {
- return ProcessExecutableNameKey.String(val)
-}
-
-// ProcessExecutablePath returns an attribute KeyValue conforming to the
-// "process.executable.path" semantic conventions. It represents the full path
-// to the process executable. On Linux based systems, can be set to the target
-// of `proc/[pid]/exe`. On Windows, can be set to the result of
-// `GetProcessImageFileNameW`.
-func ProcessExecutablePath(val string) attribute.KeyValue {
- return ProcessExecutablePathKey.String(val)
-}
-
-// ProcessCommand returns an attribute KeyValue conforming to the
-// "process.command" semantic conventions. It represents the command used to
-// launch the process (i.e. the command name). On Linux based systems, can be
-// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to
-// the first parameter extracted from `GetCommandLineW`.
-func ProcessCommand(val string) attribute.KeyValue {
- return ProcessCommandKey.String(val)
-}
-
-// ProcessCommandLine returns an attribute KeyValue conforming to the
-// "process.command_line" semantic conventions. It represents the full command
-// used to launch the process as a single string representing the full command.
-// On Windows, can be set to the result of `GetCommandLineW`. Do not set this
-// if you have to assemble it just for monitoring; use `process.command_args`
-// instead.
-func ProcessCommandLine(val string) attribute.KeyValue {
- return ProcessCommandLineKey.String(val)
-}
-
-// ProcessCommandArgs returns an attribute KeyValue conforming to the
-// "process.command_args" semantic conventions. It represents the all the
-// command arguments (including the command/executable itself) as received by
-// the process. On Linux-based systems (and some other Unixoid systems
-// supporting procfs), can be set according to the list of null-delimited
-// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
-// this would be the full argv vector passed to `main`.
-func ProcessCommandArgs(val ...string) attribute.KeyValue {
- return ProcessCommandArgsKey.StringSlice(val)
-}
-
-// ProcessOwner returns an attribute KeyValue conforming to the
-// "process.owner" semantic conventions. It represents the username of the user
-// that owns the process.
-func ProcessOwner(val string) attribute.KeyValue {
- return ProcessOwnerKey.String(val)
-}
-
-// The single (language) runtime instance which is monitored.
-const (
- // ProcessRuntimeNameKey is the attribute Key conforming to the
- // "process.runtime.name" semantic conventions. It represents the name of
- // the runtime of this process. For compiled native binaries, this SHOULD
- // be the name of the compiler.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'OpenJDK Runtime Environment'
- ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
-
- // ProcessRuntimeVersionKey is the attribute Key conforming to the
- // "process.runtime.version" semantic conventions. It represents the
- // version of the runtime of this process, as returned by the runtime
- // without modification.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '14.0.2'
- ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
-
- // ProcessRuntimeDescriptionKey is the attribute Key conforming to the
- // "process.runtime.description" semantic conventions. It represents an
- // additional description about the runtime of the process, for example a
- // specific vendor customization of the runtime environment.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
- ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
-)
-
-// ProcessRuntimeName returns an attribute KeyValue conforming to the
-// "process.runtime.name" semantic conventions. It represents the name of the
-// runtime of this process. For compiled native binaries, this SHOULD be the
-// name of the compiler.
-func ProcessRuntimeName(val string) attribute.KeyValue {
- return ProcessRuntimeNameKey.String(val)
-}
-
-// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
-// "process.runtime.version" semantic conventions. It represents the version of
-// the runtime of this process, as returned by the runtime without
-// modification.
-func ProcessRuntimeVersion(val string) attribute.KeyValue {
- return ProcessRuntimeVersionKey.String(val)
-}
-
-// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
-// "process.runtime.description" semantic conventions. It represents an
-// additional description about the runtime of the process, for example a
-// specific vendor customization of the runtime environment.
-func ProcessRuntimeDescription(val string) attribute.KeyValue {
- return ProcessRuntimeDescriptionKey.String(val)
-}
-
-// A service instance.
-const (
- // ServiceNameKey is the attribute Key conforming to the "service.name"
- // semantic conventions. It represents the logical name of the service.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'shoppingcart'
- // Note: MUST be the same for all instances of horizontally scaled
- // services. If the value was not specified, SDKs MUST fallback to
- // `unknown_service:` concatenated with
- // [`process.executable.name`](process.md#process), e.g.
- // `unknown_service:bash`. If `process.executable.name` is not available,
- // the value MUST be set to `unknown_service`.
- ServiceNameKey = attribute.Key("service.name")
-)
-
-// ServiceName returns an attribute KeyValue conforming to the
-// "service.name" semantic conventions. It represents the logical name of the
-// service.
-func ServiceName(val string) attribute.KeyValue {
- return ServiceNameKey.String(val)
-}
-
-// A service instance.
-const (
- // ServiceNamespaceKey is the attribute Key conforming to the
- // "service.namespace" semantic conventions. It represents a namespace for
- // `service.name`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Shop'
- // Note: A string value having a meaning that helps to distinguish a group
- // of services, for example the team name that owns a group of services.
- // `service.name` is expected to be unique within the same namespace. If
- // `service.namespace` is not specified in the Resource then `service.name`
- // is expected to be unique for all services that have no explicit
- // namespace defined (so the empty/unspecified namespace is simply one more
- // valid namespace). Zero-length namespace string is assumed equal to
- // unspecified namespace.
- ServiceNamespaceKey = attribute.Key("service.namespace")
-
- // ServiceInstanceIDKey is the attribute Key conforming to the
- // "service.instance.id" semantic conventions. It represents the string ID
- // of the service instance.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'my-k8s-pod-deployment-1',
- // '627cc493-f310-47de-96bd-71410b7dec09'
- // Note: MUST be unique for each instance of the same
- // `service.namespace,service.name` pair (in other words
- // `service.namespace,service.name,service.instance.id` triplet MUST be
- // globally unique). The ID helps to distinguish instances of the same
- // service that exist at the same time (e.g. instances of a horizontally
- // scaled service). It is preferable for the ID to be persistent and stay
- // the same for the lifetime of the service instance, however it is
- // acceptable that the ID is ephemeral and changes during important
- // lifetime events for the service (e.g. service restarts). If the service
- // has no inherent unique ID that can be used as the value of this
- // attribute it is recommended to generate a random Version 1 or Version 4
- // RFC 4122 UUID (services aiming for reproducible UUIDs may also use
- // Version 5, see RFC 4122 for more recommendations).
- ServiceInstanceIDKey = attribute.Key("service.instance.id")
-
- // ServiceVersionKey is the attribute Key conforming to the
- // "service.version" semantic conventions. It represents the version string
- // of the service API or implementation.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '2.0.0'
- ServiceVersionKey = attribute.Key("service.version")
-)
-
-// ServiceNamespace returns an attribute KeyValue conforming to the
-// "service.namespace" semantic conventions. It represents a namespace for
-// `service.name`.
-func ServiceNamespace(val string) attribute.KeyValue {
- return ServiceNamespaceKey.String(val)
-}
-
-// ServiceInstanceID returns an attribute KeyValue conforming to the
-// "service.instance.id" semantic conventions. It represents the string ID of
-// the service instance.
-func ServiceInstanceID(val string) attribute.KeyValue {
- return ServiceInstanceIDKey.String(val)
-}
-
-// ServiceVersion returns an attribute KeyValue conforming to the
-// "service.version" semantic conventions. It represents the version string of
-// the service API or implementation.
-func ServiceVersion(val string) attribute.KeyValue {
- return ServiceVersionKey.String(val)
-}
-
-// The telemetry SDK used to capture data recorded by the instrumentation
-// libraries.
-const (
- // TelemetrySDKNameKey is the attribute Key conforming to the
- // "telemetry.sdk.name" semantic conventions. It represents the name of the
- // telemetry SDK as defined above.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'opentelemetry'
- TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
-
- // TelemetrySDKLanguageKey is the attribute Key conforming to the
- // "telemetry.sdk.language" semantic conventions. It represents the
- // language of the telemetry SDK.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
-
- // TelemetrySDKVersionKey is the attribute Key conforming to the
- // "telemetry.sdk.version" semantic conventions. It represents the version
- // string of the telemetry SDK.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: '1.2.3'
- TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
-)
-
-var (
- // cpp
- TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
- // dotnet
- TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
- // erlang
- TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
- // go
- TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
- // java
- TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
- // nodejs
- TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
- // php
- TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
- // python
- TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
- // ruby
- TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
- // webjs
- TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
- // swift
- TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
-)
-
-// TelemetrySDKName returns an attribute KeyValue conforming to the
-// "telemetry.sdk.name" semantic conventions. It represents the name of the
-// telemetry SDK as defined above.
-func TelemetrySDKName(val string) attribute.KeyValue {
- return TelemetrySDKNameKey.String(val)
-}
-
-// TelemetrySDKVersion returns an attribute KeyValue conforming to the
-// "telemetry.sdk.version" semantic conventions. It represents the version
-// string of the telemetry SDK.
-func TelemetrySDKVersion(val string) attribute.KeyValue {
- return TelemetrySDKVersionKey.String(val)
-}
-
-// The telemetry SDK used to capture data recorded by the instrumentation
-// libraries.
-const (
- // TelemetryAutoVersionKey is the attribute Key conforming to the
- // "telemetry.auto.version" semantic conventions. It represents the version
- // string of the auto instrumentation agent, if used.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '1.2.3'
- TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version")
-)
-
-// TelemetryAutoVersion returns an attribute KeyValue conforming to the
-// "telemetry.auto.version" semantic conventions. It represents the version
-// string of the auto instrumentation agent, if used.
-func TelemetryAutoVersion(val string) attribute.KeyValue {
- return TelemetryAutoVersionKey.String(val)
-}
-
-// Resource describing the packaged software running the application code. Web
-// engines are typically executed using process.runtime.
-const (
- // WebEngineNameKey is the attribute Key conforming to the "webengine.name"
- // semantic conventions. It represents the name of the web engine.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'WildFly'
- WebEngineNameKey = attribute.Key("webengine.name")
-
- // WebEngineVersionKey is the attribute Key conforming to the
- // "webengine.version" semantic conventions. It represents the version of
- // the web engine.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '21.0.0'
- WebEngineVersionKey = attribute.Key("webengine.version")
-
- // WebEngineDescriptionKey is the attribute Key conforming to the
- // "webengine.description" semantic conventions. It represents the
- // additional description of the web engine (e.g. detailed version and
- // edition information).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
- // 2.2.2.Final'
- WebEngineDescriptionKey = attribute.Key("webengine.description")
-)
-
-// WebEngineName returns an attribute KeyValue conforming to the
-// "webengine.name" semantic conventions. It represents the name of the web
-// engine.
-func WebEngineName(val string) attribute.KeyValue {
- return WebEngineNameKey.String(val)
-}
-
-// WebEngineVersion returns an attribute KeyValue conforming to the
-// "webengine.version" semantic conventions. It represents the version of the
-// web engine.
-func WebEngineVersion(val string) attribute.KeyValue {
- return WebEngineVersionKey.String(val)
-}
-
-// WebEngineDescription returns an attribute KeyValue conforming to the
-// "webengine.description" semantic conventions. It represents the additional
-// description of the web engine (e.g. detailed version and edition
-// information).
-func WebEngineDescription(val string) attribute.KeyValue {
- return WebEngineDescriptionKey.String(val)
-}
-
-// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's
-// concepts.
-const (
- // OTelScopeNameKey is the attribute Key conforming to the
- // "otel.scope.name" semantic conventions. It represents the name of the
- // instrumentation scope - (`InstrumentationScope.Name` in OTLP).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'io.opentelemetry.contrib.mongodb'
- OTelScopeNameKey = attribute.Key("otel.scope.name")
-
- // OTelScopeVersionKey is the attribute Key conforming to the
- // "otel.scope.version" semantic conventions. It represents the version of
- // the instrumentation scope - (`InstrumentationScope.Version` in OTLP).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '1.0.0'
- OTelScopeVersionKey = attribute.Key("otel.scope.version")
-)
-
-// OTelScopeName returns an attribute KeyValue conforming to the
-// "otel.scope.name" semantic conventions. It represents the name of the
-// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
-func OTelScopeName(val string) attribute.KeyValue {
- return OTelScopeNameKey.String(val)
-}
-
-// OTelScopeVersion returns an attribute KeyValue conforming to the
-// "otel.scope.version" semantic conventions. It represents the version of the
-// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
-func OTelScopeVersion(val string) attribute.KeyValue {
- return OTelScopeVersionKey.String(val)
-}
-
-// Span attributes used by non-OTLP exporters to represent OpenTelemetry
-// Scope's concepts.
-const (
- // OTelLibraryNameKey is the attribute Key conforming to the
- // "otel.library.name" semantic conventions. It represents the deprecated,
- // use the `otel.scope.name` attribute.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: deprecated
- // Examples: 'io.opentelemetry.contrib.mongodb'
- OTelLibraryNameKey = attribute.Key("otel.library.name")
-
- // OTelLibraryVersionKey is the attribute Key conforming to the
- // "otel.library.version" semantic conventions. It represents the
- // deprecated, use the `otel.scope.version` attribute.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: deprecated
- // Examples: '1.0.0'
- OTelLibraryVersionKey = attribute.Key("otel.library.version")
-)
-
-// OTelLibraryName returns an attribute KeyValue conforming to the
-// "otel.library.name" semantic conventions. It represents the deprecated, use
-// the `otel.scope.name` attribute.
-func OTelLibraryName(val string) attribute.KeyValue {
- return OTelLibraryNameKey.String(val)
-}
-
-// OTelLibraryVersion returns an attribute KeyValue conforming to the
-// "otel.library.version" semantic conventions. It represents the deprecated,
-// use the `otel.scope.version` attribute.
-func OTelLibraryVersion(val string) attribute.KeyValue {
- return OTelLibraryVersionKey.String(val)
-}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go
deleted file mode 100644
index 95d0210e3..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
-
-// SchemaURL is the schema URL that matches the version of the semantic conventions
-// that this package defines. Semconv packages starting from v1.4.0 must declare
-// non-empty schema URL in the form https://opentelemetry.io/schemas/<version>
-const SchemaURL = "https://opentelemetry.io/schemas/1.20.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go
deleted file mode 100644
index 90b1b0452..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go
+++ /dev/null
@@ -1,2599 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// The shared attributes used to report a single exception associated with a
-// span or log.
-const (
- // ExceptionTypeKey is the attribute Key conforming to the "exception.type"
- // semantic conventions. It represents the type of the exception (its
- // fully-qualified class name, if applicable). The dynamic type of the
- // exception should be preferred over the static type in languages that
- // support it.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'java.net.ConnectException', 'OSError'
- ExceptionTypeKey = attribute.Key("exception.type")
-
- // ExceptionMessageKey is the attribute Key conforming to the
- // "exception.message" semantic conventions. It represents the exception
- // message.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Division by zero', "Can't convert 'int' object to str
- // implicitly"
- ExceptionMessageKey = attribute.Key("exception.message")
-
- // ExceptionStacktraceKey is the attribute Key conforming to the
- // "exception.stacktrace" semantic conventions. It represents a stacktrace
- // as a string in the natural representation for the language runtime. The
- // representation is to be determined and documented by each language SIG.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
- // exception\\n at '
- // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
- // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
- // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
- ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
-)
-
-// ExceptionType returns an attribute KeyValue conforming to the
-// "exception.type" semantic conventions. It represents the type of the
-// exception (its fully-qualified class name, if applicable). The dynamic type
-// of the exception should be preferred over the static type in languages that
-// support it.
-func ExceptionType(val string) attribute.KeyValue {
- return ExceptionTypeKey.String(val)
-}
-
-// ExceptionMessage returns an attribute KeyValue conforming to the
-// "exception.message" semantic conventions. It represents the exception
-// message.
-func ExceptionMessage(val string) attribute.KeyValue {
- return ExceptionMessageKey.String(val)
-}
-
-// ExceptionStacktrace returns an attribute KeyValue conforming to the
-// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
-// string in the natural representation for the language runtime. The
-// representation is to be determined and documented by each language SIG.
-func ExceptionStacktrace(val string) attribute.KeyValue {
- return ExceptionStacktraceKey.String(val)
-}
-
-// The attributes described in this section are rather generic. They may be
-// used in any Log Record they apply to.
-const (
- // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid"
- // semantic conventions. It represents a unique identifier for the Log
- // Record.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV'
- // Note: If an id is provided, other log records with the same id will be
- // considered duplicates and can be removed safely. This means, that two
- // distinguishable log records MUST have different values.
- // The id MAY be an [Universally Unique Lexicographically Sortable
- // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers
- // (e.g. UUID) may be used as needed.
- LogRecordUIDKey = attribute.Key("log.record.uid")
-)
-
-// LogRecordUID returns an attribute KeyValue conforming to the
-// "log.record.uid" semantic conventions. It represents a unique identifier for
-// the Log Record.
-func LogRecordUID(val string) attribute.KeyValue {
- return LogRecordUIDKey.String(val)
-}
-
-// Span attributes used by AWS Lambda (in addition to general `faas`
-// attributes).
-const (
- // AWSLambdaInvokedARNKey is the attribute Key conforming to the
- // "aws.lambda.invoked_arn" semantic conventions. It represents the full
- // invoked ARN as provided on the `Context` passed to the function
- // (`Lambda-Runtime-Invoked-Function-ARN` header on the
- // `/runtime/invocation/next` applicable).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
- // Note: This may be different from `cloud.resource_id` if an alias is
- // involved.
- AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
-)
-
-// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
-// "aws.lambda.invoked_arn" semantic conventions. It represents the full
-// invoked ARN as provided on the `Context` passed to the function
-// (`Lambda-Runtime-Invoked-Function-ARN` header on the
-// `/runtime/invocation/next` applicable).
-func AWSLambdaInvokedARN(val string) attribute.KeyValue {
- return AWSLambdaInvokedARNKey.String(val)
-}
-
-// Attributes for CloudEvents. CloudEvents is a specification on how to define
-// event data in a standard way. These attributes can be attached to spans when
-// performing operations with CloudEvents, regardless of the protocol being
-// used.
-const (
- // CloudeventsEventIDKey is the attribute Key conforming to the
- // "cloudevents.event_id" semantic conventions. It represents the
- // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
- // uniquely identifies the event.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
- CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
-
- // CloudeventsEventSourceKey is the attribute Key conforming to the
- // "cloudevents.event_source" semantic conventions. It represents the
- // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
- // identifies the context in which an event happened.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'https://github.com/cloudevents',
- // '/cloudevents/spec/pull/123', 'my-service'
- CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
-
- // CloudeventsEventSpecVersionKey is the attribute Key conforming to the
- // "cloudevents.event_spec_version" semantic conventions. It represents the
- // [version of the CloudEvents
- // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
- // which the event uses.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '1.0'
- CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
-
- // CloudeventsEventTypeKey is the attribute Key conforming to the
- // "cloudevents.event_type" semantic conventions. It represents the
- // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
- // contains a value describing the type of event related to the originating
- // occurrence.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'com.github.pull_request.opened',
- // 'com.example.object.deleted.v2'
- CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
-
- // CloudeventsEventSubjectKey is the attribute Key conforming to the
- // "cloudevents.event_subject" semantic conventions. It represents the
- // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
- // of the event in the context of the event producer (identified by
- // source).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'mynewfile.jpg'
- CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
-)
-
-// CloudeventsEventID returns an attribute KeyValue conforming to the
-// "cloudevents.event_id" semantic conventions. It represents the
-// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
-// uniquely identifies the event.
-func CloudeventsEventID(val string) attribute.KeyValue {
- return CloudeventsEventIDKey.String(val)
-}
-
-// CloudeventsEventSource returns an attribute KeyValue conforming to the
-// "cloudevents.event_source" semantic conventions. It represents the
-// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
-// identifies the context in which an event happened.
-func CloudeventsEventSource(val string) attribute.KeyValue {
- return CloudeventsEventSourceKey.String(val)
-}
-
-// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to
-// the "cloudevents.event_spec_version" semantic conventions. It represents the
-// [version of the CloudEvents
-// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
-// which the event uses.
-func CloudeventsEventSpecVersion(val string) attribute.KeyValue {
- return CloudeventsEventSpecVersionKey.String(val)
-}
-
-// CloudeventsEventType returns an attribute KeyValue conforming to the
-// "cloudevents.event_type" semantic conventions. It represents the
-// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
-// contains a value describing the type of event related to the originating
-// occurrence.
-func CloudeventsEventType(val string) attribute.KeyValue {
- return CloudeventsEventTypeKey.String(val)
-}
-
-// CloudeventsEventSubject returns an attribute KeyValue conforming to the
-// "cloudevents.event_subject" semantic conventions. It represents the
-// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
-// of the event in the context of the event producer (identified by source).
-func CloudeventsEventSubject(val string) attribute.KeyValue {
- return CloudeventsEventSubjectKey.String(val)
-}
-
-// Semantic conventions for the OpenTracing Shim
-const (
- // OpentracingRefTypeKey is the attribute Key conforming to the
- // "opentracing.ref_type" semantic conventions. It represents the
- // parent-child Reference type
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Note: The causal relationship between a child Span and a parent Span.
- OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
-)
-
-var (
- // The parent Span depends on the child Span in some capacity
- OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
- // The parent Span does not depend in any way on the result of the child Span
- OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
-)
-
-// The attributes used to perform database client calls.
-const (
- // DBSystemKey is the attribute Key conforming to the "db.system" semantic
- // conventions. It represents an identifier for the database management
- // system (DBMS) product being used. See below for a list of well-known
- // identifiers.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- DBSystemKey = attribute.Key("db.system")
-
- // DBConnectionStringKey is the attribute Key conforming to the
- // "db.connection_string" semantic conventions. It represents the
- // connection string used to connect to the database. It is recommended to
- // remove embedded credentials.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;'
- DBConnectionStringKey = attribute.Key("db.connection_string")
-
- // DBUserKey is the attribute Key conforming to the "db.user" semantic
- // conventions. It represents the username for accessing the database.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'readonly_user', 'reporting_user'
- DBUserKey = attribute.Key("db.user")
-
- // DBJDBCDriverClassnameKey is the attribute Key conforming to the
- // "db.jdbc.driver_classname" semantic conventions. It represents the
- // fully-qualified class name of the [Java Database Connectivity
- // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/)
- // driver used to connect.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'org.postgresql.Driver',
- // 'com.microsoft.sqlserver.jdbc.SQLServerDriver'
- DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname")
-
- // DBNameKey is the attribute Key conforming to the "db.name" semantic
- // conventions. It represents the this attribute is used to report the name
- // of the database being accessed. For commands that switch the database,
- // this should be set to the target database (even if the command fails).
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (If applicable.)
- // Stability: stable
- // Examples: 'customers', 'main'
- // Note: In some SQL databases, the database name to be used is called
- // "schema name". In case there are multiple layers that could be
- // considered for database name (e.g. Oracle instance name and schema
- // name), the database name to be used is the more specific layer (e.g.
- // Oracle schema name).
- DBNameKey = attribute.Key("db.name")
-
- // DBStatementKey is the attribute Key conforming to the "db.statement"
- // semantic conventions. It represents the database statement being
- // executed.
- //
- // Type: string
- // RequirementLevel: Recommended (Should be collected by default only if
- // there is sanitization that excludes sensitive information.)
- // Stability: stable
- // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"'
- DBStatementKey = attribute.Key("db.statement")
-
- // DBOperationKey is the attribute Key conforming to the "db.operation"
- // semantic conventions. It represents the name of the operation being
- // executed, e.g. the [MongoDB command
- // name](https://docs.mongodb.com/manual/reference/command/#database-operations)
- // such as `findAndModify`, or the SQL keyword.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (If `db.statement` is not
- // applicable.)
- // Stability: stable
- // Examples: 'findAndModify', 'HMSET', 'SELECT'
- // Note: When setting this to an SQL keyword, it is not recommended to
- // attempt any client-side parsing of `db.statement` just to get this
- // property, but it should be set if the operation name is provided by the
- // library being instrumented. If the SQL statement has an ambiguous
- // operation, or performs more than one operation, this value may be
- // omitted.
- DBOperationKey = attribute.Key("db.operation")
-)
-
-var (
- // Some other SQL database. Fallback only. See notes
- DBSystemOtherSQL = DBSystemKey.String("other_sql")
- // Microsoft SQL Server
- DBSystemMSSQL = DBSystemKey.String("mssql")
- // Microsoft SQL Server Compact
- DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact")
- // MySQL
- DBSystemMySQL = DBSystemKey.String("mysql")
- // Oracle Database
- DBSystemOracle = DBSystemKey.String("oracle")
- // IBM DB2
- DBSystemDB2 = DBSystemKey.String("db2")
- // PostgreSQL
- DBSystemPostgreSQL = DBSystemKey.String("postgresql")
- // Amazon Redshift
- DBSystemRedshift = DBSystemKey.String("redshift")
- // Apache Hive
- DBSystemHive = DBSystemKey.String("hive")
- // Cloudscape
- DBSystemCloudscape = DBSystemKey.String("cloudscape")
- // HyperSQL DataBase
- DBSystemHSQLDB = DBSystemKey.String("hsqldb")
- // Progress Database
- DBSystemProgress = DBSystemKey.String("progress")
- // SAP MaxDB
- DBSystemMaxDB = DBSystemKey.String("maxdb")
- // SAP HANA
- DBSystemHanaDB = DBSystemKey.String("hanadb")
- // Ingres
- DBSystemIngres = DBSystemKey.String("ingres")
- // FirstSQL
- DBSystemFirstSQL = DBSystemKey.String("firstsql")
- // EnterpriseDB
- DBSystemEDB = DBSystemKey.String("edb")
- // InterSystems Caché
- DBSystemCache = DBSystemKey.String("cache")
- // Adabas (Adaptable Database System)
- DBSystemAdabas = DBSystemKey.String("adabas")
- // Firebird
- DBSystemFirebird = DBSystemKey.String("firebird")
- // Apache Derby
- DBSystemDerby = DBSystemKey.String("derby")
- // FileMaker
- DBSystemFilemaker = DBSystemKey.String("filemaker")
- // Informix
- DBSystemInformix = DBSystemKey.String("informix")
- // InstantDB
- DBSystemInstantDB = DBSystemKey.String("instantdb")
- // InterBase
- DBSystemInterbase = DBSystemKey.String("interbase")
- // MariaDB
- DBSystemMariaDB = DBSystemKey.String("mariadb")
- // Netezza
- DBSystemNetezza = DBSystemKey.String("netezza")
- // Pervasive PSQL
- DBSystemPervasive = DBSystemKey.String("pervasive")
- // PointBase
- DBSystemPointbase = DBSystemKey.String("pointbase")
- // SQLite
- DBSystemSqlite = DBSystemKey.String("sqlite")
- // Sybase
- DBSystemSybase = DBSystemKey.String("sybase")
- // Teradata
- DBSystemTeradata = DBSystemKey.String("teradata")
- // Vertica
- DBSystemVertica = DBSystemKey.String("vertica")
- // H2
- DBSystemH2 = DBSystemKey.String("h2")
- // ColdFusion IMQ
- DBSystemColdfusion = DBSystemKey.String("coldfusion")
- // Apache Cassandra
- DBSystemCassandra = DBSystemKey.String("cassandra")
- // Apache HBase
- DBSystemHBase = DBSystemKey.String("hbase")
- // MongoDB
- DBSystemMongoDB = DBSystemKey.String("mongodb")
- // Redis
- DBSystemRedis = DBSystemKey.String("redis")
- // Couchbase
- DBSystemCouchbase = DBSystemKey.String("couchbase")
- // CouchDB
- DBSystemCouchDB = DBSystemKey.String("couchdb")
- // Microsoft Azure Cosmos DB
- DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
- // Amazon DynamoDB
- DBSystemDynamoDB = DBSystemKey.String("dynamodb")
- // Neo4j
- DBSystemNeo4j = DBSystemKey.String("neo4j")
- // Apache Geode
- DBSystemGeode = DBSystemKey.String("geode")
- // Elasticsearch
- DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
- // Memcached
- DBSystemMemcached = DBSystemKey.String("memcached")
- // CockroachDB
- DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
- // OpenSearch
- DBSystemOpensearch = DBSystemKey.String("opensearch")
- // ClickHouse
- DBSystemClickhouse = DBSystemKey.String("clickhouse")
- // Cloud Spanner
- DBSystemSpanner = DBSystemKey.String("spanner")
- // Trino
- DBSystemTrino = DBSystemKey.String("trino")
-)
-
-// DBConnectionString returns an attribute KeyValue conforming to the
-// "db.connection_string" semantic conventions. It represents the connection
-// string used to connect to the database. It is recommended to remove embedded
-// credentials.
-func DBConnectionString(val string) attribute.KeyValue {
- return DBConnectionStringKey.String(val)
-}
-
-// DBUser returns an attribute KeyValue conforming to the "db.user" semantic
-// conventions. It represents the username for accessing the database.
-func DBUser(val string) attribute.KeyValue {
- return DBUserKey.String(val)
-}
-
-// DBJDBCDriverClassname returns an attribute KeyValue conforming to the
-// "db.jdbc.driver_classname" semantic conventions. It represents the
-// fully-qualified class name of the [Java Database Connectivity
-// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver
-// used to connect.
-func DBJDBCDriverClassname(val string) attribute.KeyValue {
- return DBJDBCDriverClassnameKey.String(val)
-}
-
-// DBName returns an attribute KeyValue conforming to the "db.name" semantic
-// conventions. It represents the this attribute is used to report the name of
-// the database being accessed. For commands that switch the database, this
-// should be set to the target database (even if the command fails).
-func DBName(val string) attribute.KeyValue {
- return DBNameKey.String(val)
-}
-
-// DBStatement returns an attribute KeyValue conforming to the
-// "db.statement" semantic conventions. It represents the database statement
-// being executed.
-func DBStatement(val string) attribute.KeyValue {
- return DBStatementKey.String(val)
-}
-
-// DBOperation returns an attribute KeyValue conforming to the
-// "db.operation" semantic conventions. It represents the name of the operation
-// being executed, e.g. the [MongoDB command
-// name](https://docs.mongodb.com/manual/reference/command/#database-operations)
-// such as `findAndModify`, or the SQL keyword.
-func DBOperation(val string) attribute.KeyValue {
- return DBOperationKey.String(val)
-}
-
-// Connection-level attributes for Microsoft SQL Server
-const (
- // DBMSSQLInstanceNameKey is the attribute Key conforming to the
- // "db.mssql.instance_name" semantic conventions. It represents the
- // Microsoft SQL Server [instance
- // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
- // connecting to. This name is used to determine the port of a named
- // instance.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'MSSQLSERVER'
- // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no
- // longer required (but still recommended if non-standard).
- DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name")
-)
-
-// DBMSSQLInstanceName returns an attribute KeyValue conforming to the
-// "db.mssql.instance_name" semantic conventions. It represents the Microsoft
-// SQL Server [instance
-// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
-// connecting to. This name is used to determine the port of a named instance.
-func DBMSSQLInstanceName(val string) attribute.KeyValue {
- return DBMSSQLInstanceNameKey.String(val)
-}
-
-// Call-level attributes for Cassandra
-const (
- // DBCassandraPageSizeKey is the attribute Key conforming to the
- // "db.cassandra.page_size" semantic conventions. It represents the fetch
- // size used for paging, i.e. how many rows will be returned at once.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 5000
- DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
-
- // DBCassandraConsistencyLevelKey is the attribute Key conforming to the
- // "db.cassandra.consistency_level" semantic conventions. It represents the
- // consistency level of the query. Based on consistency values from
- // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html).
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
-
- // DBCassandraTableKey is the attribute Key conforming to the
- // "db.cassandra.table" semantic conventions. It represents the name of the
- // primary table that the operation is acting upon, including the keyspace
- // name (if applicable).
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: stable
- // Examples: 'mytable'
- // Note: This mirrors the db.sql.table attribute but references cassandra
- // rather than sql. It is not recommended to attempt any client-side
- // parsing of `db.statement` just to get this property, but it should be
- // set if it is provided by the library being instrumented. If the
- // operation is acting upon an anonymous table, or more than one table,
- // this value MUST NOT be set.
- DBCassandraTableKey = attribute.Key("db.cassandra.table")
-
- // DBCassandraIdempotenceKey is the attribute Key conforming to the
- // "db.cassandra.idempotence" semantic conventions. It represents the
- // whether or not the query is idempotent.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
-
- // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming
- // to the "db.cassandra.speculative_execution_count" semantic conventions.
- // It represents the number of times a query was speculatively executed.
- // Not set or `0` if the query was not executed speculatively.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 0, 2
- DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
-
- // DBCassandraCoordinatorIDKey is the attribute Key conforming to the
- // "db.cassandra.coordinator.id" semantic conventions. It represents the ID
- // of the coordinating node for a query.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
- DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
-
- // DBCassandraCoordinatorDCKey is the attribute Key conforming to the
- // "db.cassandra.coordinator.dc" semantic conventions. It represents the
- // data center of the coordinating node for a query.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'us-west-2'
- DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
-)
-
-var (
- // all
- DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
- // each_quorum
- DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
- // quorum
- DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
- // local_quorum
- DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
- // one
- DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
- // two
- DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
- // three
- DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
- // local_one
- DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
- // any
- DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
- // serial
- DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
- // local_serial
- DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
-)
-
-// DBCassandraPageSize returns an attribute KeyValue conforming to the
-// "db.cassandra.page_size" semantic conventions. It represents the fetch size
-// used for paging, i.e. how many rows will be returned at once.
-func DBCassandraPageSize(val int) attribute.KeyValue {
- return DBCassandraPageSizeKey.Int(val)
-}
-
-// DBCassandraTable returns an attribute KeyValue conforming to the
-// "db.cassandra.table" semantic conventions. It represents the name of the
-// primary table that the operation is acting upon, including the keyspace name
-// (if applicable).
-func DBCassandraTable(val string) attribute.KeyValue {
- return DBCassandraTableKey.String(val)
-}
-
-// DBCassandraIdempotence returns an attribute KeyValue conforming to the
-// "db.cassandra.idempotence" semantic conventions. It represents the whether
-// or not the query is idempotent.
-func DBCassandraIdempotence(val bool) attribute.KeyValue {
- return DBCassandraIdempotenceKey.Bool(val)
-}
-
-// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue
-// conforming to the "db.cassandra.speculative_execution_count" semantic
-// conventions. It represents the number of times a query was speculatively
-// executed. Not set or `0` if the query was not executed speculatively.
-func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
- return DBCassandraSpeculativeExecutionCountKey.Int(val)
-}
-
-// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the
-// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of
-// the coordinating node for a query.
-func DBCassandraCoordinatorID(val string) attribute.KeyValue {
- return DBCassandraCoordinatorIDKey.String(val)
-}
-
-// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the
-// "db.cassandra.coordinator.dc" semantic conventions. It represents the data
-// center of the coordinating node for a query.
-func DBCassandraCoordinatorDC(val string) attribute.KeyValue {
- return DBCassandraCoordinatorDCKey.String(val)
-}
-
-// Call-level attributes for Redis
-const (
- // DBRedisDBIndexKey is the attribute Key conforming to the
- // "db.redis.database_index" semantic conventions. It represents the index
- // of the database being accessed as used in the [`SELECT`
- // command](https://redis.io/commands/select), provided as an integer. To
- // be used instead of the generic `db.name` attribute.
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (If other than the default
- // database (`0`).)
- // Stability: stable
- // Examples: 0, 1, 15
- DBRedisDBIndexKey = attribute.Key("db.redis.database_index")
-)
-
-// DBRedisDBIndex returns an attribute KeyValue conforming to the
-// "db.redis.database_index" semantic conventions. It represents the index of
-// the database being accessed as used in the [`SELECT`
-// command](https://redis.io/commands/select), provided as an integer. To be
-// used instead of the generic `db.name` attribute.
-func DBRedisDBIndex(val int) attribute.KeyValue {
- return DBRedisDBIndexKey.Int(val)
-}
-
-// Call-level attributes for MongoDB
-const (
- // DBMongoDBCollectionKey is the attribute Key conforming to the
- // "db.mongodb.collection" semantic conventions. It represents the
- // collection being accessed within the database stated in `db.name`.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'customers', 'products'
- DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection")
-)
-
-// DBMongoDBCollection returns an attribute KeyValue conforming to the
-// "db.mongodb.collection" semantic conventions. It represents the collection
-// being accessed within the database stated in `db.name`.
-func DBMongoDBCollection(val string) attribute.KeyValue {
- return DBMongoDBCollectionKey.String(val)
-}
-
-// Call-level attributes for SQL databases
-const (
- // DBSQLTableKey is the attribute Key conforming to the "db.sql.table"
- // semantic conventions. It represents the name of the primary table that
- // the operation is acting upon, including the database name (if
- // applicable).
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: stable
- // Examples: 'public.users', 'customers'
- // Note: It is not recommended to attempt any client-side parsing of
- // `db.statement` just to get this property, but it should be set if it is
- // provided by the library being instrumented. If the operation is acting
- // upon an anonymous table, or more than one table, this value MUST NOT be
- // set.
- DBSQLTableKey = attribute.Key("db.sql.table")
-)
-
-// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table"
-// semantic conventions. It represents the name of the primary table that the
-// operation is acting upon, including the database name (if applicable).
-func DBSQLTable(val string) attribute.KeyValue {
- return DBSQLTableKey.String(val)
-}
-
-// Call-level attributes for Cosmos DB.
-const (
- // DBCosmosDBClientIDKey is the attribute Key conforming to the
- // "db.cosmosdb.client_id" semantic conventions. It represents the unique
- // Cosmos client instance id.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d'
- DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id")
-
- // DBCosmosDBOperationTypeKey is the attribute Key conforming to the
- // "db.cosmosdb.operation_type" semantic conventions. It represents the
- // cosmosDB Operation Type.
- //
- // Type: Enum
- // RequirementLevel: ConditionallyRequired (when performing one of the
- // operations in this list)
- // Stability: stable
- DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type")
-
- // DBCosmosDBConnectionModeKey is the attribute Key conforming to the
- // "db.cosmosdb.connection_mode" semantic conventions. It represents the
- // cosmos client connection mode.
- //
- // Type: Enum
- // RequirementLevel: ConditionallyRequired (if not `direct` (or pick gw as
- // default))
- // Stability: stable
- DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode")
-
- // DBCosmosDBContainerKey is the attribute Key conforming to the
- // "db.cosmosdb.container" semantic conventions. It represents the cosmos
- // DB container name.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (if available)
- // Stability: stable
- // Examples: 'anystring'
- DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container")
-
- // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the
- // "db.cosmosdb.request_content_length" semantic conventions. It represents
- // the request payload size in bytes
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length")
-
- // DBCosmosDBStatusCodeKey is the attribute Key conforming to the
- // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos
- // DB status code.
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (if response was received)
- // Stability: stable
- // Examples: 200, 201
- DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code")
-
- // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the
- // "db.cosmosdb.sub_status_code" semantic conventions. It represents the
- // cosmos DB sub status code.
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (when response was received and
- // contained sub-code.)
- // Stability: stable
- // Examples: 1000, 1002
- DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code")
-
- // DBCosmosDBRequestChargeKey is the attribute Key conforming to the
- // "db.cosmosdb.request_charge" semantic conventions. It represents the rU
- // consumed for that operation
- //
- // Type: double
- // RequirementLevel: ConditionallyRequired (when available)
- // Stability: stable
- // Examples: 46.18, 1.0
- DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge")
-)
-
-var (
- // invalid
- DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid")
- // create
- DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create")
- // patch
- DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch")
- // read
- DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read")
- // read_feed
- DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed")
- // delete
- DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete")
- // replace
- DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace")
- // execute
- DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute")
- // query
- DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query")
- // head
- DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head")
- // head_feed
- DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed")
- // upsert
- DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert")
- // batch
- DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch")
- // query_plan
- DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan")
- // execute_javascript
- DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript")
-)
-
-var (
- // Gateway (HTTP) connections mode
- DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway")
- // Direct connection
- DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct")
-)
-
-// DBCosmosDBClientID returns an attribute KeyValue conforming to the
-// "db.cosmosdb.client_id" semantic conventions. It represents the unique
-// Cosmos client instance id.
-func DBCosmosDBClientID(val string) attribute.KeyValue {
- return DBCosmosDBClientIDKey.String(val)
-}
-
-// DBCosmosDBContainer returns an attribute KeyValue conforming to the
-// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB
-// container name.
-func DBCosmosDBContainer(val string) attribute.KeyValue {
- return DBCosmosDBContainerKey.String(val)
-}
-
-// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming
-// to the "db.cosmosdb.request_content_length" semantic conventions. It
-// represents the request payload size in bytes
-func DBCosmosDBRequestContentLength(val int) attribute.KeyValue {
- return DBCosmosDBRequestContentLengthKey.Int(val)
-}
-
-// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the
-// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB
-// status code.
-func DBCosmosDBStatusCode(val int) attribute.KeyValue {
- return DBCosmosDBStatusCodeKey.Int(val)
-}
-
-// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the
-// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos
-// DB sub status code.
-func DBCosmosDBSubStatusCode(val int) attribute.KeyValue {
- return DBCosmosDBSubStatusCodeKey.Int(val)
-}
-
-// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the
-// "db.cosmosdb.request_charge" semantic conventions. It represents the rU
-// consumed for that operation
-func DBCosmosDBRequestCharge(val float64) attribute.KeyValue {
- return DBCosmosDBRequestChargeKey.Float64(val)
-}
-
-// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's
-// concepts.
-const (
- // OTelStatusCodeKey is the attribute Key conforming to the
- // "otel.status_code" semantic conventions. It represents the name of the
- // code, either "OK" or "ERROR". MUST NOT be set if the status code is
- // UNSET.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- OTelStatusCodeKey = attribute.Key("otel.status_code")
-
- // OTelStatusDescriptionKey is the attribute Key conforming to the
- // "otel.status_description" semantic conventions. It represents the
- // description of the Status if it has a value, otherwise not set.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'resource not found'
- OTelStatusDescriptionKey = attribute.Key("otel.status_description")
-)
-
-var (
- // The operation has been validated by an Application developer or Operator to have completed successfully
- OTelStatusCodeOk = OTelStatusCodeKey.String("OK")
- // The operation contains an error
- OTelStatusCodeError = OTelStatusCodeKey.String("ERROR")
-)
-
-// OTelStatusDescription returns an attribute KeyValue conforming to the
-// "otel.status_description" semantic conventions. It represents the
-// description of the Status if it has a value, otherwise not set.
-func OTelStatusDescription(val string) attribute.KeyValue {
- return OTelStatusDescriptionKey.String(val)
-}
-
-// This semantic convention describes an instance of a function that runs
-// without provisioning or managing of servers (also known as serverless
-// functions or Function as a Service (FaaS)) with spans.
-const (
- // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger"
- // semantic conventions. It represents the type of the trigger which caused
- // this function invocation.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Note: For the server/consumer span on the incoming side,
- // `faas.trigger` MUST be set.
- //
- // Clients invoking FaaS instances usually cannot set `faas.trigger`,
- // since they would typically need to look in the payload to determine
- // the event type. If clients set it, it should be the same as the
- // trigger that corresponding incoming would have (i.e., this has
- // nothing to do with the underlying transport used to make the API
- // call to invoke the lambda, which is often HTTP).
- FaaSTriggerKey = attribute.Key("faas.trigger")
-
- // FaaSInvocationIDKey is the attribute Key conforming to the
- // "faas.invocation_id" semantic conventions. It represents the invocation
- // ID of the current function invocation.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
- FaaSInvocationIDKey = attribute.Key("faas.invocation_id")
-)
-
-var (
- // A response to some data source operation such as a database or filesystem read/write
- FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
- // To provide an answer to an inbound HTTP request
- FaaSTriggerHTTP = FaaSTriggerKey.String("http")
- // A function is set to be executed when messages are sent to a messaging system
- FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
- // A function is scheduled to be executed regularly
- FaaSTriggerTimer = FaaSTriggerKey.String("timer")
- // If none of the others apply
- FaaSTriggerOther = FaaSTriggerKey.String("other")
-)
-
-// FaaSInvocationID returns an attribute KeyValue conforming to the
-// "faas.invocation_id" semantic conventions. It represents the invocation ID
-// of the current function invocation.
-func FaaSInvocationID(val string) attribute.KeyValue {
- return FaaSInvocationIDKey.String(val)
-}
-
-// Semantic Convention for FaaS triggered as a response to some data source
-// operation such as a database or filesystem read/write.
-const (
- // FaaSDocumentCollectionKey is the attribute Key conforming to the
- // "faas.document.collection" semantic conventions. It represents the name
- // of the source on which the triggering operation was performed. For
- // example, in Cloud Storage or S3 corresponds to the bucket name, and in
- // Cosmos DB to the database name.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'myBucketName', 'myDBName'
- FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
-
- // FaaSDocumentOperationKey is the attribute Key conforming to the
- // "faas.document.operation" semantic conventions. It represents the
- // describes the type of the operation that was performed on the data.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
-
- // FaaSDocumentTimeKey is the attribute Key conforming to the
- // "faas.document.time" semantic conventions. It represents a string
- // containing the time when the data was accessed in the [ISO
- // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
- // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '2020-01-23T13:47:06Z'
- FaaSDocumentTimeKey = attribute.Key("faas.document.time")
-
- // FaaSDocumentNameKey is the attribute Key conforming to the
- // "faas.document.name" semantic conventions. It represents the document
- // name/table subjected to the operation. For example, in Cloud Storage or
- // S3 is the name of the file, and in Cosmos DB the table name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'myFile.txt', 'myTableName'
- FaaSDocumentNameKey = attribute.Key("faas.document.name")
-)
-
-var (
- // When a new object is created
- FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
- // When an object is modified
- FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
- // When an object is deleted
- FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
-)
-
-// FaaSDocumentCollection returns an attribute KeyValue conforming to the
-// "faas.document.collection" semantic conventions. It represents the name of
-// the source on which the triggering operation was performed. For example, in
-// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
-// database name.
-func FaaSDocumentCollection(val string) attribute.KeyValue {
- return FaaSDocumentCollectionKey.String(val)
-}
-
-// FaaSDocumentTime returns an attribute KeyValue conforming to the
-// "faas.document.time" semantic conventions. It represents a string containing
-// the time when the data was accessed in the [ISO
-// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
-// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
-func FaaSDocumentTime(val string) attribute.KeyValue {
- return FaaSDocumentTimeKey.String(val)
-}
-
-// FaaSDocumentName returns an attribute KeyValue conforming to the
-// "faas.document.name" semantic conventions. It represents the document
-// name/table subjected to the operation. For example, in Cloud Storage or S3
-// is the name of the file, and in Cosmos DB the table name.
-func FaaSDocumentName(val string) attribute.KeyValue {
- return FaaSDocumentNameKey.String(val)
-}
-
-// Semantic Convention for FaaS scheduled to be executed regularly.
-const (
- // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
- // conventions. It represents a string containing the function invocation
- // time in the [ISO
- // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
- // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '2020-01-23T13:47:06Z'
- FaaSTimeKey = attribute.Key("faas.time")
-
- // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
- // conventions. It represents a string containing the schedule period as
- // [Cron
- // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '0/5 * * * ? *'
- FaaSCronKey = attribute.Key("faas.cron")
-)
-
-// FaaSTime returns an attribute KeyValue conforming to the "faas.time"
-// semantic conventions. It represents a string containing the function
-// invocation time in the [ISO
-// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
-// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
-func FaaSTime(val string) attribute.KeyValue {
- return FaaSTimeKey.String(val)
-}
-
-// FaaSCron returns an attribute KeyValue conforming to the "faas.cron"
-// semantic conventions. It represents a string containing the schedule period
-// as [Cron
-// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
-func FaaSCron(val string) attribute.KeyValue {
- return FaaSCronKey.String(val)
-}
-
-// Contains additional attributes for incoming FaaS spans.
-const (
- // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
- // semantic conventions. It represents a boolean that is true if the
- // serverless function is executed for the first time (aka cold-start).
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- FaaSColdstartKey = attribute.Key("faas.coldstart")
-)
-
-// FaaSColdstart returns an attribute KeyValue conforming to the
-// "faas.coldstart" semantic conventions. It represents a boolean that is true
-// if the serverless function is executed for the first time (aka cold-start).
-func FaaSColdstart(val bool) attribute.KeyValue {
- return FaaSColdstartKey.Bool(val)
-}
-
-// Contains additional attributes for outgoing FaaS spans.
-const (
- // FaaSInvokedNameKey is the attribute Key conforming to the
- // "faas.invoked_name" semantic conventions. It represents the name of the
- // invoked function.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'my-function'
- // Note: SHOULD be equal to the `faas.name` resource attribute of the
- // invoked function.
- FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
-
- // FaaSInvokedProviderKey is the attribute Key conforming to the
- // "faas.invoked_provider" semantic conventions. It represents the cloud
- // provider of the invoked function.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- // Note: SHOULD be equal to the `cloud.provider` resource attribute of the
- // invoked function.
- FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
-
- // FaaSInvokedRegionKey is the attribute Key conforming to the
- // "faas.invoked_region" semantic conventions. It represents the cloud
- // region of the invoked function.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (For some cloud providers, like
- // AWS or GCP, the region in which a function is hosted is essential to
- // uniquely identify the function and also part of its endpoint. Since it's
- // part of the endpoint being called, the region is always known to
- // clients. In these cases, `faas.invoked_region` MUST be set accordingly.
- // If the region is unknown to the client or not required for identifying
- // the invoked function, setting `faas.invoked_region` is optional.)
- // Stability: stable
- // Examples: 'eu-central-1'
- // Note: SHOULD be equal to the `cloud.region` resource attribute of the
- // invoked function.
- FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
-)
-
-var (
- // Alibaba Cloud
- FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
- // Amazon Web Services
- FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
- // Microsoft Azure
- FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
- // Google Cloud Platform
- FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
- // Tencent Cloud
- FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
-)
-
-// FaaSInvokedName returns an attribute KeyValue conforming to the
-// "faas.invoked_name" semantic conventions. It represents the name of the
-// invoked function.
-func FaaSInvokedName(val string) attribute.KeyValue {
- return FaaSInvokedNameKey.String(val)
-}
-
-// FaaSInvokedRegion returns an attribute KeyValue conforming to the
-// "faas.invoked_region" semantic conventions. It represents the cloud region
-// of the invoked function.
-func FaaSInvokedRegion(val string) attribute.KeyValue {
- return FaaSInvokedRegionKey.String(val)
-}
-
-// Operations that access some remote service.
-const (
- // PeerServiceKey is the attribute Key conforming to the "peer.service"
- // semantic conventions. It represents the
- // [`service.name`](../../resource/semantic_conventions/README.md#service)
- // of the remote service. SHOULD be equal to the actual `service.name`
- // resource attribute of the remote service if any.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'AuthTokenCache'
- PeerServiceKey = attribute.Key("peer.service")
-)
-
-// PeerService returns an attribute KeyValue conforming to the
-// "peer.service" semantic conventions. It represents the
-// [`service.name`](../../resource/semantic_conventions/README.md#service) of
-// the remote service. SHOULD be equal to the actual `service.name` resource
-// attribute of the remote service if any.
-func PeerService(val string) attribute.KeyValue {
- return PeerServiceKey.String(val)
-}
-
-// These attributes may be used for any operation with an authenticated and/or
-// authorized enduser.
-const (
- // EnduserIDKey is the attribute Key conforming to the "enduser.id"
- // semantic conventions. It represents the username or client_id extracted
- // from the access token or
- // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header
- // in the inbound request from outside the system.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'username'
- EnduserIDKey = attribute.Key("enduser.id")
-
- // EnduserRoleKey is the attribute Key conforming to the "enduser.role"
- // semantic conventions. It represents the actual/assumed role the client
- // is making the request under extracted from token or application security
- // context.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'admin'
- EnduserRoleKey = attribute.Key("enduser.role")
-
- // EnduserScopeKey is the attribute Key conforming to the "enduser.scope"
- // semantic conventions. It represents the scopes or granted authorities
- // the client currently possesses extracted from token or application
- // security context. The value would come from the scope associated with an
- // [OAuth 2.0 Access
- // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
- // value in a [SAML 2.0
- // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'read:message, write:files'
- EnduserScopeKey = attribute.Key("enduser.scope")
-)
-
-// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
-// semantic conventions. It represents the username or client_id extracted from
-// the access token or
-// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in
-// the inbound request from outside the system.
-func EnduserID(val string) attribute.KeyValue {
- return EnduserIDKey.String(val)
-}
-
-// EnduserRole returns an attribute KeyValue conforming to the
-// "enduser.role" semantic conventions. It represents the actual/assumed role
-// the client is making the request under extracted from token or application
-// security context.
-func EnduserRole(val string) attribute.KeyValue {
- return EnduserRoleKey.String(val)
-}
-
-// EnduserScope returns an attribute KeyValue conforming to the
-// "enduser.scope" semantic conventions. It represents the scopes or granted
-// authorities the client currently possesses extracted from token or
-// application security context. The value would come from the scope associated
-// with an [OAuth 2.0 Access
-// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
-// value in a [SAML 2.0
-// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
-func EnduserScope(val string) attribute.KeyValue {
- return EnduserScopeKey.String(val)
-}
-
-// These attributes may be used for any operation to store information about a
-// thread that started a span.
-const (
- // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
- // conventions. It represents the current "managed" thread ID (as opposed
- // to OS thread ID).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 42
- ThreadIDKey = attribute.Key("thread.id")
-
- // ThreadNameKey is the attribute Key conforming to the "thread.name"
- // semantic conventions. It represents the current thread name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'main'
- ThreadNameKey = attribute.Key("thread.name")
-)
-
-// ThreadID returns an attribute KeyValue conforming to the "thread.id"
-// semantic conventions. It represents the current "managed" thread ID (as
-// opposed to OS thread ID).
-func ThreadID(val int) attribute.KeyValue {
- return ThreadIDKey.Int(val)
-}
-
-// ThreadName returns an attribute KeyValue conforming to the "thread.name"
-// semantic conventions. It represents the current thread name.
-func ThreadName(val string) attribute.KeyValue {
- return ThreadNameKey.String(val)
-}
-
-// These attributes allow to report this unit of code and therefore to provide
-// more context about the span.
-const (
- // CodeFunctionKey is the attribute Key conforming to the "code.function"
- // semantic conventions. It represents the method or function name, or
- // equivalent (usually rightmost part of the code unit's name).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'serveRequest'
- CodeFunctionKey = attribute.Key("code.function")
-
- // CodeNamespaceKey is the attribute Key conforming to the "code.namespace"
- // semantic conventions. It represents the "namespace" within which
- // `code.function` is defined. Usually the qualified class or module name,
- // such that `code.namespace` + some separator + `code.function` form a
- // unique identifier for the code unit.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'com.example.MyHTTPService'
- CodeNamespaceKey = attribute.Key("code.namespace")
-
- // CodeFilepathKey is the attribute Key conforming to the "code.filepath"
- // semantic conventions. It represents the source code file name that
- // identifies the code unit as uniquely as possible (preferably an absolute
- // file path).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '/usr/local/MyApplication/content_root/app/index.php'
- CodeFilepathKey = attribute.Key("code.filepath")
-
- // CodeLineNumberKey is the attribute Key conforming to the "code.lineno"
- // semantic conventions. It represents the line number in `code.filepath`
- // best representing the operation. It SHOULD point within the code unit
- // named in `code.function`.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 42
- CodeLineNumberKey = attribute.Key("code.lineno")
-
- // CodeColumnKey is the attribute Key conforming to the "code.column"
- // semantic conventions. It represents the column number in `code.filepath`
- // best representing the operation. It SHOULD point within the code unit
- // named in `code.function`.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 16
- CodeColumnKey = attribute.Key("code.column")
-)
-
-// CodeFunction returns an attribute KeyValue conforming to the
-// "code.function" semantic conventions. It represents the method or function
-// name, or equivalent (usually rightmost part of the code unit's name).
-func CodeFunction(val string) attribute.KeyValue {
- return CodeFunctionKey.String(val)
-}
-
-// CodeNamespace returns an attribute KeyValue conforming to the
-// "code.namespace" semantic conventions. It represents the "namespace" within
-// which `code.function` is defined. Usually the qualified class or module
-// name, such that `code.namespace` + some separator + `code.function` form a
-// unique identifier for the code unit.
-func CodeNamespace(val string) attribute.KeyValue {
- return CodeNamespaceKey.String(val)
-}
-
-// CodeFilepath returns an attribute KeyValue conforming to the
-// "code.filepath" semantic conventions. It represents the source code file
-// name that identifies the code unit as uniquely as possible (preferably an
-// absolute file path).
-func CodeFilepath(val string) attribute.KeyValue {
- return CodeFilepathKey.String(val)
-}
-
-// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno"
-// semantic conventions. It represents the line number in `code.filepath` best
-// representing the operation. It SHOULD point within the code unit named in
-// `code.function`.
-func CodeLineNumber(val int) attribute.KeyValue {
- return CodeLineNumberKey.Int(val)
-}
-
-// CodeColumn returns an attribute KeyValue conforming to the "code.column"
-// semantic conventions. It represents the column number in `code.filepath`
-// best representing the operation. It SHOULD point within the code unit named
-// in `code.function`.
-func CodeColumn(val int) attribute.KeyValue {
- return CodeColumnKey.Int(val)
-}
-
-// Semantic Convention for HTTP Client
-const (
- // HTTPURLKey is the attribute Key conforming to the "http.url" semantic
- // conventions. It represents the full HTTP request URL in the form
- // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is
- // not transmitted over HTTP, but if it is known, it should be included
- // nevertheless.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv'
- // Note: `http.url` MUST NOT contain credentials passed via URL in form of
- // `https://username:password@www.example.com/`. In such case the
- // attribute's value should be `https://www.example.com/`.
- HTTPURLKey = attribute.Key("http.url")
-
- // HTTPResendCountKey is the attribute Key conforming to the
- // "http.resend_count" semantic conventions. It represents the ordinal
- // number of request resending attempt (for any reason, including
- // redirects).
- //
- // Type: int
- // RequirementLevel: Recommended (if and only if request was retried.)
- // Stability: stable
- // Examples: 3
- // Note: The resend count SHOULD be updated each time an HTTP request gets
- // resent by the client, regardless of what was the cause of the resending
- // (e.g. redirection, authorization failure, 503 Server Unavailable,
- // network issues, or any other).
- HTTPResendCountKey = attribute.Key("http.resend_count")
-)
-
-// HTTPURL returns an attribute KeyValue conforming to the "http.url"
-// semantic conventions. It represents the full HTTP request URL in the form
-// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not
-// transmitted over HTTP, but if it is known, it should be included
-// nevertheless.
-func HTTPURL(val string) attribute.KeyValue {
- return HTTPURLKey.String(val)
-}
-
-// HTTPResendCount returns an attribute KeyValue conforming to the
-// "http.resend_count" semantic conventions. It represents the ordinal number
-// of request resending attempt (for any reason, including redirects).
-func HTTPResendCount(val int) attribute.KeyValue {
- return HTTPResendCountKey.Int(val)
-}
-
-// Semantic Convention for HTTP Server
-const (
- // HTTPTargetKey is the attribute Key conforming to the "http.target"
- // semantic conventions. It represents the full request target as passed in
- // a HTTP request line or equivalent.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: '/users/12314/?q=ddds'
- HTTPTargetKey = attribute.Key("http.target")
-
- // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip"
- // semantic conventions. It represents the IP address of the original
- // client behind all proxies, if known (e.g. from
- // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '83.164.160.102'
- // Note: This is not necessarily the same as `net.sock.peer.addr`, which
- // would
- // identify the network-level peer, which may be a proxy.
- //
- // This attribute should be set when a source of information different
- // from the one used for `net.sock.peer.addr`, is available even if that
- // other
- // source just confirms the same value as `net.sock.peer.addr`.
- // Rationale: For `net.sock.peer.addr`, one typically does not know if it
- // comes from a proxy, reverse proxy, or the actual client. Setting
- // `http.client_ip` when it's the same as `net.sock.peer.addr` means that
- // one is at least somewhat confident that the address is not that of
- // the closest proxy.
- HTTPClientIPKey = attribute.Key("http.client_ip")
-)
-
-// HTTPTarget returns an attribute KeyValue conforming to the "http.target"
-// semantic conventions. It represents the full request target as passed in a
-// HTTP request line or equivalent.
-func HTTPTarget(val string) attribute.KeyValue {
- return HTTPTargetKey.String(val)
-}
-
-// HTTPClientIP returns an attribute KeyValue conforming to the
-// "http.client_ip" semantic conventions. It represents the IP address of the
-// original client behind all proxies, if known (e.g. from
-// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)).
-func HTTPClientIP(val string) attribute.KeyValue {
- return HTTPClientIPKey.String(val)
-}
-
-// The `aws` conventions apply to operations using the AWS SDK. They map
-// request or response parameters in AWS SDK API calls to attributes on a Span.
-// The conventions have been collected over time based on feedback from AWS
-// users of tracing and will continue to evolve as new interesting conventions
-// are found.
-// Some descriptions are also provided for populating general OpenTelemetry
-// semantic conventions based on these APIs.
-const (
- // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id"
- // semantic conventions. It represents the AWS request ID as returned in
- // the response headers `x-amz-request-id` or `x-amz-requestid`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ'
- AWSRequestIDKey = attribute.Key("aws.request_id")
-)
-
-// AWSRequestID returns an attribute KeyValue conforming to the
-// "aws.request_id" semantic conventions. It represents the AWS request ID as
-// returned in the response headers `x-amz-request-id` or `x-amz-requestid`.
-func AWSRequestID(val string) attribute.KeyValue {
- return AWSRequestIDKey.String(val)
-}
-
-// Attributes that exist for multiple DynamoDB request types.
-const (
- // AWSDynamoDBTableNamesKey is the attribute Key conforming to the
- // "aws.dynamodb.table_names" semantic conventions. It represents the keys
- // in the `RequestItems` object field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Users', 'Cats'
- AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
-
- // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
- // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
- // JSON-serialized value of each item in the `ConsumedCapacity` response
- // field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": {
- // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" :
- // { "CapacityUnits": number, "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table":
- // { "CapacityUnits": number, "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number }, "TableName": "string",
- // "WriteCapacityUnits": number }'
- AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
-
- // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to
- // the "aws.dynamodb.item_collection_metrics" semantic conventions. It
- // represents the JSON-serialized value of the `ItemCollectionMetrics`
- // response field.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B":
- // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": {
- // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ],
- // "NULL": boolean, "S": "string", "SS": [ "string" ] } },
- // "SizeEstimateRangeGB": [ number ] } ] }'
- AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
-
- // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to
- // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
- // represents the value of the `ProvisionedThroughput.ReadCapacityUnits`
- // request parameter.
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 1.0, 2.0
- AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
-
- // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming
- // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions.
- // It represents the value of the
- // `ProvisionedThroughput.WriteCapacityUnits` request parameter.
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 1.0, 2.0
- AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
-
- // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
- // "aws.dynamodb.consistent_read" semantic conventions. It represents the
- // value of the `ConsistentRead` request parameter.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
-
- // AWSDynamoDBProjectionKey is the attribute Key conforming to the
- // "aws.dynamodb.projection" semantic conventions. It represents the value
- // of the `ProjectionExpression` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Title', 'Title, Price, Color', 'Title, Description,
- // RelatedItems, ProductReviews'
- AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
-
- // AWSDynamoDBLimitKey is the attribute Key conforming to the
- // "aws.dynamodb.limit" semantic conventions. It represents the value of
- // the `Limit` request parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 10
- AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
-
- // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
- // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
- // value of the `AttributesToGet` request parameter.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'lives', 'id'
- AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
-
- // AWSDynamoDBIndexNameKey is the attribute Key conforming to the
- // "aws.dynamodb.index_name" semantic conventions. It represents the value
- // of the `IndexName` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'name_to_group'
- AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
-
- // AWSDynamoDBSelectKey is the attribute Key conforming to the
- // "aws.dynamodb.select" semantic conventions. It represents the value of
- // the `Select` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'ALL_ATTRIBUTES', 'COUNT'
- AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
-)
-
-// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
-// "aws.dynamodb.table_names" semantic conventions. It represents the keys in
-// the `RequestItems` object field.
-func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
- return AWSDynamoDBTableNamesKey.StringSlice(val)
-}
-
-// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to
-// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
-// JSON-serialized value of each item in the `ConsumedCapacity` response field.
-func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
- return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
-}
-
-// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming
-// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It
-// represents the JSON-serialized value of the `ItemCollectionMetrics` response
-// field.
-func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
- return AWSDynamoDBItemCollectionMetricsKey.String(val)
-}
-
-// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue
-// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic
-// conventions. It represents the value of the
-// `ProvisionedThroughput.ReadCapacityUnits` request parameter.
-func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
- return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
-}
-
-// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue
-// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic
-// conventions. It represents the value of the
-// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
-func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
- return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
-}
-
-// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
-// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
-// of the `ConsistentRead` request parameter.
-func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
- return AWSDynamoDBConsistentReadKey.Bool(val)
-}
-
-// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
-// "aws.dynamodb.projection" semantic conventions. It represents the value of
-// the `ProjectionExpression` request parameter.
-func AWSDynamoDBProjection(val string) attribute.KeyValue {
- return AWSDynamoDBProjectionKey.String(val)
-}
-
-// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
-// "aws.dynamodb.limit" semantic conventions. It represents the value of the
-// `Limit` request parameter.
-func AWSDynamoDBLimit(val int) attribute.KeyValue {
- return AWSDynamoDBLimitKey.Int(val)
-}
-
-// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to
-// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
-// value of the `AttributesToGet` request parameter.
-func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
- return AWSDynamoDBAttributesToGetKey.StringSlice(val)
-}
-
-// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
-// "aws.dynamodb.index_name" semantic conventions. It represents the value of
-// the `IndexName` request parameter.
-func AWSDynamoDBIndexName(val string) attribute.KeyValue {
- return AWSDynamoDBIndexNameKey.String(val)
-}
-
-// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
-// "aws.dynamodb.select" semantic conventions. It represents the value of the
-// `Select` request parameter.
-func AWSDynamoDBSelect(val string) attribute.KeyValue {
- return AWSDynamoDBSelectKey.String(val)
-}
-
-// DynamoDB.CreateTable
-const (
- // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to
- // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
- // represents the JSON-serialized value of each item of the
- // `GlobalSecondaryIndexes` request field
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName":
- // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
- // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
- // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }'
- AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
-
- // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to
- // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
- // represents the JSON-serialized value of each item of the
- // `LocalSecondaryIndexes` request field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '{ "IndexARN": "string", "IndexName": "string",
- // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ {
- // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
- // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }'
- AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
-)
-
-// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue
-// conforming to the "aws.dynamodb.global_secondary_indexes" semantic
-// conventions. It represents the JSON-serialized value of each item of the
-// `GlobalSecondaryIndexes` request field
-func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
- return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
-}
-
-// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming
-// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
-// represents the JSON-serialized value of each item of the
-// `LocalSecondaryIndexes` request field.
-func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
- return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
-}
-
-// DynamoDB.ListTables
-const (
- // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
- // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents
- // the value of the `ExclusiveStartTableName` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Users', 'CatsTable'
- AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
-
- // AWSDynamoDBTableCountKey is the attribute Key conforming to the
- // "aws.dynamodb.table_count" semantic conventions. It represents the the
- // number of items in the `TableNames` response parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 20
- AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
-)
-
-// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming
-// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It
-// represents the value of the `ExclusiveStartTableName` request parameter.
-func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
- return AWSDynamoDBExclusiveStartTableKey.String(val)
-}
-
-// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.table_count" semantic conventions. It represents the the
-// number of items in the `TableNames` response parameter.
-func AWSDynamoDBTableCount(val int) attribute.KeyValue {
- return AWSDynamoDBTableCountKey.Int(val)
-}
-
-// DynamoDB.Query
-const (
- // AWSDynamoDBScanForwardKey is the attribute Key conforming to the
- // "aws.dynamodb.scan_forward" semantic conventions. It represents the
- // value of the `ScanIndexForward` request parameter.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
-)
-
-// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
-// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
-// the `ScanIndexForward` request parameter.
-func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
- return AWSDynamoDBScanForwardKey.Bool(val)
-}
-
-// DynamoDB.Scan
-const (
- // AWSDynamoDBSegmentKey is the attribute Key conforming to the
- // "aws.dynamodb.segment" semantic conventions. It represents the value of
- // the `Segment` request parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 10
- AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
-
- // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
- // "aws.dynamodb.total_segments" semantic conventions. It represents the
- // value of the `TotalSegments` request parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 100
- AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
-
- // AWSDynamoDBCountKey is the attribute Key conforming to the
- // "aws.dynamodb.count" semantic conventions. It represents the value of
- // the `Count` response parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 10
- AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
-
- // AWSDynamoDBScannedCountKey is the attribute Key conforming to the
- // "aws.dynamodb.scanned_count" semantic conventions. It represents the
- // value of the `ScannedCount` response parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 50
- AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
-)
-
-// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
-// "aws.dynamodb.segment" semantic conventions. It represents the value of the
-// `Segment` request parameter.
-func AWSDynamoDBSegment(val int) attribute.KeyValue {
- return AWSDynamoDBSegmentKey.Int(val)
-}
-
-// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
-// "aws.dynamodb.total_segments" semantic conventions. It represents the value
-// of the `TotalSegments` request parameter.
-func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
- return AWSDynamoDBTotalSegmentsKey.Int(val)
-}
-
-// AWSDynamoDBCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.count" semantic conventions. It represents the value of the
-// `Count` response parameter.
-func AWSDynamoDBCount(val int) attribute.KeyValue {
- return AWSDynamoDBCountKey.Int(val)
-}
-
-// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.scanned_count" semantic conventions. It represents the value
-// of the `ScannedCount` response parameter.
-func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
- return AWSDynamoDBScannedCountKey.Int(val)
-}
-
-// DynamoDB.UpdateTable
-const (
- // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to
- // the "aws.dynamodb.attribute_definitions" semantic conventions. It
- // represents the JSON-serialized value of each item in the
- // `AttributeDefinitions` request field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
- AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
-
- // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key
- // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
- // conventions. It represents the JSON-serialized value of each item in the
- // the `GlobalSecondaryIndexUpdates` request field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
- // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
- // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
- // "ProvisionedThroughput": { "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number } }'
- AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
-)
-
-// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming
-// to the "aws.dynamodb.attribute_definitions" semantic conventions. It
-// represents the JSON-serialized value of each item in the
-// `AttributeDefinitions` request field.
-func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
- return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
-}
-
-// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
-// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
-// conventions. It represents the JSON-serialized value of each item in the the
-// `GlobalSecondaryIndexUpdates` request field.
-func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
- return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
-}
-
-// Attributes that exist for S3 request types.
-const (
- // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket"
- // semantic conventions. It represents the S3 bucket name the request
- // refers to. Corresponds to the `--bucket` parameter of the [S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
- // operations.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'some-bucket-name'
- // Note: The `bucket` attribute is applicable to all S3 operations that
- // reference a bucket, i.e. that require the bucket name as a mandatory
- // parameter.
- // This applies to almost all S3 operations except `list-buckets`.
- AWSS3BucketKey = attribute.Key("aws.s3.bucket")
-
- // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic
- // conventions. It represents the S3 object key the request refers to.
- // Corresponds to the `--key` parameter of the [S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
- // operations.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'someFile.yml'
- // Note: The `key` attribute is applicable to all object-related S3
- // operations, i.e. that require the object key as a mandatory parameter.
- // This applies in particular to the following operations:
- //
- // -
- // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
- // -
- // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
- // -
- // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html)
- // -
- // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html)
- // -
- // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html)
- // -
- // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html)
- // -
- // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html)
- // -
- // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
- // -
- // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
- // -
- // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html)
- // -
- // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
- // -
- // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
- // -
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- AWSS3KeyKey = attribute.Key("aws.s3.key")
-
- // AWSS3CopySourceKey is the attribute Key conforming to the
- // "aws.s3.copy_source" semantic conventions. It represents the source
- // object (in the form `bucket`/`key`) for the copy operation.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'someFile.yml'
- // Note: The `copy_source` attribute applies to S3 copy operations and
- // corresponds to the `--copy-source` parameter
- // of the [copy-object operation within the S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html).
- // This applies in particular to the following operations:
- //
- // -
- // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
- // -
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source")
-
- // AWSS3UploadIDKey is the attribute Key conforming to the
- // "aws.s3.upload_id" semantic conventions. It represents the upload ID
- // that identifies the multipart upload.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ'
- // Note: The `upload_id` attribute applies to S3 multipart-upload
- // operations and corresponds to the `--upload-id` parameter
- // of the [S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
- // multipart operations.
- // This applies in particular to the following operations:
- //
- // -
- // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
- // -
- // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
- // -
- // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
- // -
- // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
- // -
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id")
-
- // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete"
- // semantic conventions. It represents the delete request container that
- // specifies the objects to be deleted.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples:
- // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean'
- // Note: The `delete` attribute is only applicable to the
- // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
- // operation.
- // The `delete` attribute corresponds to the `--delete` parameter of the
- // [delete-objects operation within the S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html).
- AWSS3DeleteKey = attribute.Key("aws.s3.delete")
-
- // AWSS3PartNumberKey is the attribute Key conforming to the
- // "aws.s3.part_number" semantic conventions. It represents the part number
- // of the part being uploaded in a multipart-upload operation. This is a
- // positive integer between 1 and 10,000.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 3456
- // Note: The `part_number` attribute is only applicable to the
- // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
- // and
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- // operations.
- // The `part_number` attribute corresponds to the `--part-number` parameter
- // of the
- // [upload-part operation within the S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html).
- AWSS3PartNumberKey = attribute.Key("aws.s3.part_number")
-)
-
-// AWSS3Bucket returns an attribute KeyValue conforming to the
-// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the
-// request refers to. Corresponds to the `--bucket` parameter of the [S3
-// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
-// operations.
-func AWSS3Bucket(val string) attribute.KeyValue {
- return AWSS3BucketKey.String(val)
-}
-
-// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key"
-// semantic conventions. It represents the S3 object key the request refers to.
-// Corresponds to the `--key` parameter of the [S3
-// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
-// operations.
-func AWSS3Key(val string) attribute.KeyValue {
- return AWSS3KeyKey.String(val)
-}
-
-// AWSS3CopySource returns an attribute KeyValue conforming to the
-// "aws.s3.copy_source" semantic conventions. It represents the source object
-// (in the form `bucket`/`key`) for the copy operation.
-func AWSS3CopySource(val string) attribute.KeyValue {
- return AWSS3CopySourceKey.String(val)
-}
-
-// AWSS3UploadID returns an attribute KeyValue conforming to the
-// "aws.s3.upload_id" semantic conventions. It represents the upload ID that
-// identifies the multipart upload.
-func AWSS3UploadID(val string) attribute.KeyValue {
- return AWSS3UploadIDKey.String(val)
-}
-
-// AWSS3Delete returns an attribute KeyValue conforming to the
-// "aws.s3.delete" semantic conventions. It represents the delete request
-// container that specifies the objects to be deleted.
-func AWSS3Delete(val string) attribute.KeyValue {
- return AWSS3DeleteKey.String(val)
-}
-
-// AWSS3PartNumber returns an attribute KeyValue conforming to the
-// "aws.s3.part_number" semantic conventions. It represents the part number of
-// the part being uploaded in a multipart-upload operation. This is a positive
-// integer between 1 and 10,000.
-func AWSS3PartNumber(val int) attribute.KeyValue {
- return AWSS3PartNumberKey.Int(val)
-}
-
-// Semantic conventions to apply when instrumenting the GraphQL implementation.
-// They map GraphQL operations to attributes on a Span.
-const (
- // GraphqlOperationNameKey is the attribute Key conforming to the
- // "graphql.operation.name" semantic conventions. It represents the name of
- // the operation being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'findBookByID'
- GraphqlOperationNameKey = attribute.Key("graphql.operation.name")
-
- // GraphqlOperationTypeKey is the attribute Key conforming to the
- // "graphql.operation.type" semantic conventions. It represents the type of
- // the operation being executed.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'query', 'mutation', 'subscription'
- GraphqlOperationTypeKey = attribute.Key("graphql.operation.type")
-
- // GraphqlDocumentKey is the attribute Key conforming to the
- // "graphql.document" semantic conventions. It represents the GraphQL
- // document being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'query findBookByID { bookByID(id: ?) { name } }'
- // Note: The value may be sanitized to exclude sensitive information.
- GraphqlDocumentKey = attribute.Key("graphql.document")
-)
-
-var (
- // GraphQL query
- GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query")
- // GraphQL mutation
- GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation")
- // GraphQL subscription
- GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription")
-)
-
-// GraphqlOperationName returns an attribute KeyValue conforming to the
-// "graphql.operation.name" semantic conventions. It represents the name of the
-// operation being executed.
-func GraphqlOperationName(val string) attribute.KeyValue {
- return GraphqlOperationNameKey.String(val)
-}
-
-// GraphqlDocument returns an attribute KeyValue conforming to the
-// "graphql.document" semantic conventions. It represents the GraphQL document
-// being executed.
-func GraphqlDocument(val string) attribute.KeyValue {
- return GraphqlDocumentKey.String(val)
-}
-
-// General attributes used in messaging systems.
-const (
- // MessagingSystemKey is the attribute Key conforming to the
- // "messaging.system" semantic conventions. It represents a string
- // identifying the messaging system.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS'
- MessagingSystemKey = attribute.Key("messaging.system")
-
- // MessagingOperationKey is the attribute Key conforming to the
- // "messaging.operation" semantic conventions. It represents a string
- // identifying the kind of messaging operation as defined in the [Operation
- // names](#operation-names) section above.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- // Note: If a custom value is used, it MUST be of low cardinality.
- MessagingOperationKey = attribute.Key("messaging.operation")
-
- // MessagingBatchMessageCountKey is the attribute Key conforming to the
- // "messaging.batch.message_count" semantic conventions. It represents the
- // number of messages sent, received, or processed in the scope of the
- // batching operation.
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (If the span describes an
- // operation on a batch of messages.)
- // Stability: stable
- // Examples: 0, 1, 2
- // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
- // spans that operate with a single message. When a messaging client
- // library supports both batch and single-message API for the same
- // operation, instrumentations SHOULD use `messaging.batch.message_count`
- // for batching APIs and SHOULD NOT use it for single-message APIs.
- MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
-)
-
-var (
- // publish
- MessagingOperationPublish = MessagingOperationKey.String("publish")
- // receive
- MessagingOperationReceive = MessagingOperationKey.String("receive")
- // process
- MessagingOperationProcess = MessagingOperationKey.String("process")
-)
-
-// MessagingSystem returns an attribute KeyValue conforming to the
-// "messaging.system" semantic conventions. It represents a string identifying
-// the messaging system.
-func MessagingSystem(val string) attribute.KeyValue {
- return MessagingSystemKey.String(val)
-}
-
-// MessagingBatchMessageCount returns an attribute KeyValue conforming to
-// the "messaging.batch.message_count" semantic conventions. It represents the
-// number of messages sent, received, or processed in the scope of the batching
-// operation.
-func MessagingBatchMessageCount(val int) attribute.KeyValue {
- return MessagingBatchMessageCountKey.Int(val)
-}
-
-// Semantic convention for a consumer of messages received from a messaging
-// system
-const (
- // MessagingConsumerIDKey is the attribute Key conforming to the
- // "messaging.consumer.id" semantic conventions. It represents the
- // identifier for the consumer receiving a message. For Kafka, set it to
- // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if
- // both are present, or only `messaging.kafka.consumer.group`. For brokers,
- // such as RabbitMQ and Artemis, set it to the `client_id` of the client
- // consuming the message.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'mygroup - client-6'
- MessagingConsumerIDKey = attribute.Key("messaging.consumer.id")
-)
-
-// MessagingConsumerID returns an attribute KeyValue conforming to the
-// "messaging.consumer.id" semantic conventions. It represents the identifier
-// for the consumer receiving a message. For Kafka, set it to
-// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both
-// are present, or only `messaging.kafka.consumer.group`. For brokers, such as
-// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the
-// message.
-func MessagingConsumerID(val string) attribute.KeyValue {
- return MessagingConsumerIDKey.String(val)
-}
-
-// Semantic conventions for remote procedure calls.
-const (
- // RPCSystemKey is the attribute Key conforming to the "rpc.system"
- // semantic conventions. It represents a string identifying the remoting
- // system. See below for a list of well-known identifiers.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- RPCSystemKey = attribute.Key("rpc.system")
-
- // RPCServiceKey is the attribute Key conforming to the "rpc.service"
- // semantic conventions. It represents the full (logical) name of the
- // service being called, including its package name, if applicable.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: stable
- // Examples: 'myservice.EchoService'
- // Note: This is the logical name of the service from the RPC interface
- // perspective, which can be different from the name of any implementing
- // class. The `code.namespace` attribute may be used to store the latter
- // (despite the attribute name, it may include a class name; e.g., class
- // with method actually executing the call on the server side, RPC client
- // stub class on the client side).
- RPCServiceKey = attribute.Key("rpc.service")
-
- // RPCMethodKey is the attribute Key conforming to the "rpc.method"
- // semantic conventions. It represents the name of the (logical) method
- // being called, must be equal to the $method part in the span name.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: stable
- // Examples: 'exampleMethod'
- // Note: This is the logical name of the method from the RPC interface
- // perspective, which can be different from the name of any implementing
- // method/function. The `code.function` attribute may be used to store the
- // latter (e.g., method actually executing the call on the server side, RPC
- // client stub method on the client side).
- RPCMethodKey = attribute.Key("rpc.method")
-)
-
-var (
- // gRPC
- RPCSystemGRPC = RPCSystemKey.String("grpc")
- // Java RMI
- RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
- // .NET WCF
- RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
- // Apache Dubbo
- RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
- // Connect RPC
- RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc")
-)
-
-// RPCService returns an attribute KeyValue conforming to the "rpc.service"
-// semantic conventions. It represents the full (logical) name of the service
-// being called, including its package name, if applicable.
-func RPCService(val string) attribute.KeyValue {
- return RPCServiceKey.String(val)
-}
-
-// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
-// semantic conventions. It represents the name of the (logical) method being
-// called, must be equal to the $method part in the span name.
-func RPCMethod(val string) attribute.KeyValue {
- return RPCMethodKey.String(val)
-}
-
-// Tech-specific attributes for gRPC.
-const (
- // RPCGRPCStatusCodeKey is the attribute Key conforming to the
- // "rpc.grpc.status_code" semantic conventions. It represents the [numeric
- // status
- // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of
- // the gRPC request.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
-)
-
-var (
- // OK
- RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
- // CANCELLED
- RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
- // UNKNOWN
- RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
- // INVALID_ARGUMENT
- RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
- // DEADLINE_EXCEEDED
- RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
- // NOT_FOUND
- RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
- // ALREADY_EXISTS
- RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
- // PERMISSION_DENIED
- RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
- // RESOURCE_EXHAUSTED
- RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
- // FAILED_PRECONDITION
- RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
- // ABORTED
- RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
- // OUT_OF_RANGE
- RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
- // UNIMPLEMENTED
- RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
- // INTERNAL
- RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
- // UNAVAILABLE
- RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
- // DATA_LOSS
- RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
- // UNAUTHENTICATED
- RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
-)
-
-// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/).
-const (
- // RPCJsonrpcVersionKey is the attribute Key conforming to the
- // "rpc.jsonrpc.version" semantic conventions. It represents the protocol
- // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
- // does not specify this, the value can be omitted.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (If other than the default
- // version (`1.0`))
- // Stability: stable
- // Examples: '2.0', '1.0'
- RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
-
- // RPCJsonrpcRequestIDKey is the attribute Key conforming to the
- // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
- // property of request or response. Since protocol allows id to be int,
- // string, `null` or missing (for notifications), value is expected to be
- // cast to string for simplicity. Use empty string in case of `null` value.
- // Omit entirely if this is a notification.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '10', 'request-7', ''
- RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
-
- // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the
- // "rpc.jsonrpc.error_code" semantic conventions. It represents the
- // `error.code` property of response if it is an error response.
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (If response is not successful.)
- // Stability: stable
- // Examples: -32700, 100
- RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
-
- // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the
- // "rpc.jsonrpc.error_message" semantic conventions. It represents the
- // `error.message` property of response if it is an error response.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Parse error', 'User already exists'
- RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
-)
-
-// RPCJsonrpcVersion returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
-// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
-// does not specify this, the value can be omitted.
-func RPCJsonrpcVersion(val string) attribute.KeyValue {
- return RPCJsonrpcVersionKey.String(val)
-}
-
-// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
-// property of request or response. Since protocol allows id to be int, string,
-// `null` or missing (for notifications), value is expected to be cast to
-// string for simplicity. Use empty string in case of `null` value. Omit
-// entirely if this is a notification.
-func RPCJsonrpcRequestID(val string) attribute.KeyValue {
- return RPCJsonrpcRequestIDKey.String(val)
-}
-
-// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.error_code" semantic conventions. It represents the
-// `error.code` property of response if it is an error response.
-func RPCJsonrpcErrorCode(val int) attribute.KeyValue {
- return RPCJsonrpcErrorCodeKey.Int(val)
-}
-
-// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.error_message" semantic conventions. It represents the
-// `error.message` property of response if it is an error response.
-func RPCJsonrpcErrorMessage(val string) attribute.KeyValue {
- return RPCJsonrpcErrorMessageKey.String(val)
-}
-
-// Tech-specific attributes for Connect RPC.
-const (
- // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the
- // "rpc.connect_rpc.error_code" semantic conventions. It represents the
- // [error codes](https://connect.build/docs/protocol/#error-codes) of the
- // Connect request. Error codes are always string values.
- //
- // Type: Enum
- // RequirementLevel: ConditionallyRequired (If response is not successful
- // and if error code available.)
- // Stability: stable
- RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code")
-)
-
-var (
- // cancelled
- RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled")
- // unknown
- RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown")
- // invalid_argument
- RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument")
- // deadline_exceeded
- RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded")
- // not_found
- RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found")
- // already_exists
- RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists")
- // permission_denied
- RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied")
- // resource_exhausted
- RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted")
- // failed_precondition
- RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition")
- // aborted
- RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted")
- // out_of_range
- RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range")
- // unimplemented
- RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented")
- // internal
- RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal")
- // unavailable
- RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable")
- // data_loss
- RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss")
- // unauthenticated
- RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated")
-)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md
deleted file mode 100644
index 0b6cbe960..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Semconv v1.24.0
-
-[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.24.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.24.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go
deleted file mode 100644
index 6e688345c..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go
+++ /dev/null
@@ -1,4387 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// Describes FaaS attributes.
-const (
- // FaaSInvokedNameKey is the attribute Key conforming to the
- // "faas.invoked_name" semantic conventions. It represents the name of the
- // invoked function.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: experimental
- // Examples: 'my-function'
- // Note: SHOULD be equal to the `faas.name` resource attribute of the
- // invoked function.
- FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
-
- // FaaSInvokedProviderKey is the attribute Key conforming to the
- // "faas.invoked_provider" semantic conventions. It represents the cloud
- // provider of the invoked function.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: experimental
- // Note: SHOULD be equal to the `cloud.provider` resource attribute of the
- // invoked function.
- FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
-
- // FaaSInvokedRegionKey is the attribute Key conforming to the
- // "faas.invoked_region" semantic conventions. It represents the cloud
- // region of the invoked function.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (For some cloud providers, like
- // AWS or GCP, the region in which a function is hosted is essential to
- // uniquely identify the function and also part of its endpoint. Since it's
- // part of the endpoint being called, the region is always known to
- // clients. In these cases, `faas.invoked_region` MUST be set accordingly.
- // If the region is unknown to the client or not required for identifying
- // the invoked function, setting `faas.invoked_region` is optional.)
- // Stability: experimental
- // Examples: 'eu-central-1'
- // Note: SHOULD be equal to the `cloud.region` resource attribute of the
- // invoked function.
- FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
-
- // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger"
- // semantic conventions. It represents the type of the trigger which caused
- // this function invocation.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- FaaSTriggerKey = attribute.Key("faas.trigger")
-)
-
-var (
- // Alibaba Cloud
- FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
- // Amazon Web Services
- FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
- // Microsoft Azure
- FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
- // Google Cloud Platform
- FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
- // Tencent Cloud
- FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
-)
-
-var (
- // A response to some data source operation such as a database or filesystem read/write
- FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
- // To provide an answer to an inbound HTTP request
- FaaSTriggerHTTP = FaaSTriggerKey.String("http")
- // A function is set to be executed when messages are sent to a messaging system
- FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
- // A function is scheduled to be executed regularly
- FaaSTriggerTimer = FaaSTriggerKey.String("timer")
- // If none of the others apply
- FaaSTriggerOther = FaaSTriggerKey.String("other")
-)
-
-// FaaSInvokedName returns an attribute KeyValue conforming to the
-// "faas.invoked_name" semantic conventions. It represents the name of the
-// invoked function.
-func FaaSInvokedName(val string) attribute.KeyValue {
- return FaaSInvokedNameKey.String(val)
-}
-
-// FaaSInvokedRegion returns an attribute KeyValue conforming to the
-// "faas.invoked_region" semantic conventions. It represents the cloud region
-// of the invoked function.
-func FaaSInvokedRegion(val string) attribute.KeyValue {
- return FaaSInvokedRegionKey.String(val)
-}
-
-// Attributes for Events represented using Log Records.
-const (
- // EventNameKey is the attribute Key conforming to the "event.name"
- // semantic conventions. It represents the identifies the class / type of
- // event.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: experimental
- // Examples: 'browser.mouse.click', 'device.app.lifecycle'
- // Note: Event names are subject to the same rules as [attribute
- // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.26.0/specification/common/attribute-naming.md).
- // Notably, event names are namespaced to avoid collisions and provide a
- // clean separation of semantics for events in separate domains like
- // browser, mobile, and kubernetes.
- EventNameKey = attribute.Key("event.name")
-)
-
-// EventName returns an attribute KeyValue conforming to the "event.name"
-// semantic conventions. It represents the identifies the class / type of
-// event.
-func EventName(val string) attribute.KeyValue {
- return EventNameKey.String(val)
-}
-
-// The attributes described in this section are rather generic. They may be
-// used in any Log Record they apply to.
-const (
- // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid"
- // semantic conventions. It represents a unique identifier for the Log
- // Record.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV'
- // Note: If an id is provided, other log records with the same id will be
- // considered duplicates and can be removed safely. This means, that two
- // distinguishable log records MUST have different values.
- // The id MAY be an [Universally Unique Lexicographically Sortable
- // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers
- // (e.g. UUID) may be used as needed.
- LogRecordUIDKey = attribute.Key("log.record.uid")
-)
-
-// LogRecordUID returns an attribute KeyValue conforming to the
-// "log.record.uid" semantic conventions. It represents a unique identifier for
-// the Log Record.
-func LogRecordUID(val string) attribute.KeyValue {
- return LogRecordUIDKey.String(val)
-}
-
-// Describes Log attributes
-const (
- // LogIostreamKey is the attribute Key conforming to the "log.iostream"
- // semantic conventions. It represents the stream associated with the log.
- // See below for a list of well-known values.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- LogIostreamKey = attribute.Key("log.iostream")
-)
-
-var (
- // Logs from stdout stream
- LogIostreamStdout = LogIostreamKey.String("stdout")
- // Events from stderr stream
- LogIostreamStderr = LogIostreamKey.String("stderr")
-)
-
-// A file to which log was emitted.
-const (
- // LogFileNameKey is the attribute Key conforming to the "log.file.name"
- // semantic conventions. It represents the basename of the file.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: experimental
- // Examples: 'audit.log'
- LogFileNameKey = attribute.Key("log.file.name")
-
- // LogFileNameResolvedKey is the attribute Key conforming to the
- // "log.file.name_resolved" semantic conventions. It represents the
- // basename of the file, with symlinks resolved.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'uuid.log'
- LogFileNameResolvedKey = attribute.Key("log.file.name_resolved")
-
- // LogFilePathKey is the attribute Key conforming to the "log.file.path"
- // semantic conventions. It represents the full path to the file.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/var/log/mysql/audit.log'
- LogFilePathKey = attribute.Key("log.file.path")
-
- // LogFilePathResolvedKey is the attribute Key conforming to the
- // "log.file.path_resolved" semantic conventions. It represents the full
- // path to the file, with symlinks resolved.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/var/lib/docker/uuid.log'
- LogFilePathResolvedKey = attribute.Key("log.file.path_resolved")
-)
-
-// LogFileName returns an attribute KeyValue conforming to the
-// "log.file.name" semantic conventions. It represents the basename of the
-// file.
-func LogFileName(val string) attribute.KeyValue {
- return LogFileNameKey.String(val)
-}
-
-// LogFileNameResolved returns an attribute KeyValue conforming to the
-// "log.file.name_resolved" semantic conventions. It represents the basename of
-// the file, with symlinks resolved.
-func LogFileNameResolved(val string) attribute.KeyValue {
- return LogFileNameResolvedKey.String(val)
-}
-
-// LogFilePath returns an attribute KeyValue conforming to the
-// "log.file.path" semantic conventions. It represents the full path to the
-// file.
-func LogFilePath(val string) attribute.KeyValue {
- return LogFilePathKey.String(val)
-}
-
-// LogFilePathResolved returns an attribute KeyValue conforming to the
-// "log.file.path_resolved" semantic conventions. It represents the full path
-// to the file, with symlinks resolved.
-func LogFilePathResolved(val string) attribute.KeyValue {
- return LogFilePathResolvedKey.String(val)
-}
-
-// Describes Database attributes
-const (
- // PoolNameKey is the attribute Key conforming to the "pool.name" semantic
- // conventions. It represents the name of the connection pool; unique
- // within the instrumented application. In case the connection pool
- // implementation doesn't provide a name, then the
- // [db.connection_string](/docs/database/database-spans.md#connection-level-attributes)
- // should be used
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: experimental
- // Examples: 'myDataSource'
- PoolNameKey = attribute.Key("pool.name")
-
- // StateKey is the attribute Key conforming to the "state" semantic
- // conventions. It represents the state of a connection in the pool
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: experimental
- // Examples: 'idle'
- StateKey = attribute.Key("state")
-)
-
-var (
- // idle
- StateIdle = StateKey.String("idle")
- // used
- StateUsed = StateKey.String("used")
-)
-
-// PoolName returns an attribute KeyValue conforming to the "pool.name"
-// semantic conventions. It represents the name of the connection pool; unique
-// within the instrumented application. In case the connection pool
-// implementation doesn't provide a name, then the
-// [db.connection_string](/docs/database/database-spans.md#connection-level-attributes)
-// should be used
-func PoolName(val string) attribute.KeyValue {
- return PoolNameKey.String(val)
-}
-
-// ASP.NET Core attributes
-const (
- // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to
- // the "aspnetcore.diagnostics.handler.type" semantic conventions. It
- // represents the full type name of the
- // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler)
- // implementation that handled the exception.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (if and only if the exception
- // was handled by this handler.)
- // Stability: experimental
- // Examples: 'Contoso.MyHandler'
- AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type")
-
- // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the
- // "aspnetcore.rate_limiting.policy" semantic conventions. It represents
- // the rate limiting policy name.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (if the matched endpoint for the
- // request had a rate-limiting policy.)
- // Stability: experimental
- // Examples: 'fixed', 'sliding', 'token'
- AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy")
-
- // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the
- // "aspnetcore.rate_limiting.result" semantic conventions. It represents
- // the rate-limiting result, shows whether the lease was acquired or
- // contains a rejection reason
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: experimental
- // Examples: 'acquired', 'request_canceled'
- AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result")
-
- // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the
- // "aspnetcore.request.is_unhandled" semantic conventions. It represents
- // the flag indicating if request was handled by the application pipeline.
- //
- // Type: boolean
- // RequirementLevel: ConditionallyRequired (if and only if the request was
- // not handled.)
- // Stability: experimental
- // Examples: True
- AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled")
-
- // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the
- // "aspnetcore.routing.is_fallback" semantic conventions. It represents a
- // value that indicates whether the matched route is a fallback route.
- //
- // Type: boolean
- // RequirementLevel: ConditionallyRequired (If and only if a route was
- // successfully matched.)
- // Stability: experimental
- // Examples: True
- AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback")
-)
-
-var (
- // Lease was acquired
- AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired")
- // Lease request was rejected by the endpoint limiter
- AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter")
- // Lease request was rejected by the global limiter
- AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter")
- // Lease request was canceled
- AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled")
-)
-
-// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming
-// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It
-// represents the full type name of the
-// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler)
-// implementation that handled the exception.
-func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue {
- return AspnetcoreDiagnosticsHandlerTypeKey.String(val)
-}
-
-// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to
-// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents
-// the rate limiting policy name.
-func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue {
- return AspnetcoreRateLimitingPolicyKey.String(val)
-}
-
-// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to
-// the "aspnetcore.request.is_unhandled" semantic conventions. It represents
-// the flag indicating if request was handled by the application pipeline.
-func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue {
- return AspnetcoreRequestIsUnhandledKey.Bool(val)
-}
-
-// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to
-// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a
-// value that indicates whether the matched route is a fallback route.
-func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue {
- return AspnetcoreRoutingIsFallbackKey.Bool(val)
-}
-
-// SignalR attributes
-const (
- // SignalrConnectionStatusKey is the attribute Key conforming to the
- // "signalr.connection.status" semantic conventions. It represents the
- // signalR HTTP connection closure status.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'app_shutdown', 'timeout'
- SignalrConnectionStatusKey = attribute.Key("signalr.connection.status")
-
- // SignalrTransportKey is the attribute Key conforming to the
- // "signalr.transport" semantic conventions. It represents the [SignalR
- // transport
- // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md)
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'web_sockets', 'long_polling'
- SignalrTransportKey = attribute.Key("signalr.transport")
-)
-
-var (
- // The connection was closed normally
- SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure")
- // The connection was closed due to a timeout
- SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout")
- // The connection was closed because the app is shutting down
- SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown")
-)
-
-var (
- // ServerSentEvents protocol
- SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events")
- // LongPolling protocol
- SignalrTransportLongPolling = SignalrTransportKey.String("long_polling")
- // WebSockets protocol
- SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets")
-)
-
-// Describes JVM buffer metric attributes.
-const (
- // JvmBufferPoolNameKey is the attribute Key conforming to the
- // "jvm.buffer.pool.name" semantic conventions. It represents the name of
- // the buffer pool.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: experimental
- // Examples: 'mapped', 'direct'
- // Note: Pool names are generally obtained via
- // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()).
- JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name")
-)
-
-// JvmBufferPoolName returns an attribute KeyValue conforming to the
-// "jvm.buffer.pool.name" semantic conventions. It represents the name of the
-// buffer pool.
-func JvmBufferPoolName(val string) attribute.KeyValue {
- return JvmBufferPoolNameKey.String(val)
-}
-
-// Describes JVM memory metric attributes.
-const (
- // JvmMemoryPoolNameKey is the attribute Key conforming to the
- // "jvm.memory.pool.name" semantic conventions. It represents the name of
- // the memory pool.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: stable
- // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space'
- // Note: Pool names are generally obtained via
- // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()).
- JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name")
-
- // JvmMemoryTypeKey is the attribute Key conforming to the
- // "jvm.memory.type" semantic conventions. It represents the type of
- // memory.
- //
- // Type: Enum
- // RequirementLevel: Recommended
- // Stability: stable
- // Examples: 'heap', 'non_heap'
- JvmMemoryTypeKey = attribute.Key("jvm.memory.type")
-)
-
-var (
- // Heap memory
- JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap")
- // Non-heap memory
- JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap")
-)
-
-// JvmMemoryPoolName returns an attribute KeyValue conforming to the
-// "jvm.memory.pool.name" semantic conventions. It represents the name of the
-// memory pool.
-func JvmMemoryPoolName(val string) attribute.KeyValue {
- return JvmMemoryPoolNameKey.String(val)
-}
-
-// Describes System metric attributes
-const (
- // SystemDeviceKey is the attribute Key conforming to the "system.device"
- // semantic conventions. It represents the device identifier
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '(identifier)'
- SystemDeviceKey = attribute.Key("system.device")
-)
-
-// SystemDevice returns an attribute KeyValue conforming to the
-// "system.device" semantic conventions. It represents the device identifier
-func SystemDevice(val string) attribute.KeyValue {
- return SystemDeviceKey.String(val)
-}
-
-// Describes System CPU metric attributes
-const (
- // SystemCPULogicalNumberKey is the attribute Key conforming to the
- // "system.cpu.logical_number" semantic conventions. It represents the
- // logical CPU number [0..n-1]
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1
- SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number")
-
- // SystemCPUStateKey is the attribute Key conforming to the
- // "system.cpu.state" semantic conventions. It represents the state of the
- // CPU
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'idle', 'interrupt'
- SystemCPUStateKey = attribute.Key("system.cpu.state")
-)
-
-var (
- // user
- SystemCPUStateUser = SystemCPUStateKey.String("user")
- // system
- SystemCPUStateSystem = SystemCPUStateKey.String("system")
- // nice
- SystemCPUStateNice = SystemCPUStateKey.String("nice")
- // idle
- SystemCPUStateIdle = SystemCPUStateKey.String("idle")
- // iowait
- SystemCPUStateIowait = SystemCPUStateKey.String("iowait")
- // interrupt
- SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt")
- // steal
- SystemCPUStateSteal = SystemCPUStateKey.String("steal")
-)
-
-// SystemCPULogicalNumber returns an attribute KeyValue conforming to the
-// "system.cpu.logical_number" semantic conventions. It represents the logical
-// CPU number [0..n-1]
-func SystemCPULogicalNumber(val int) attribute.KeyValue {
- return SystemCPULogicalNumberKey.Int(val)
-}
-
-// Describes System Memory metric attributes
-const (
- // SystemMemoryStateKey is the attribute Key conforming to the
- // "system.memory.state" semantic conventions. It represents the memory
- // state
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'free', 'cached'
- SystemMemoryStateKey = attribute.Key("system.memory.state")
-)
-
-var (
- // used
- SystemMemoryStateUsed = SystemMemoryStateKey.String("used")
- // free
- SystemMemoryStateFree = SystemMemoryStateKey.String("free")
- // shared
- SystemMemoryStateShared = SystemMemoryStateKey.String("shared")
- // buffers
- SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers")
- // cached
- SystemMemoryStateCached = SystemMemoryStateKey.String("cached")
-)
-
-// Describes System Memory Paging metric attributes
-const (
- // SystemPagingDirectionKey is the attribute Key conforming to the
- // "system.paging.direction" semantic conventions. It represents the paging
- // access direction
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'in'
- SystemPagingDirectionKey = attribute.Key("system.paging.direction")
-
- // SystemPagingStateKey is the attribute Key conforming to the
- // "system.paging.state" semantic conventions. It represents the memory
- // paging state
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'free'
- SystemPagingStateKey = attribute.Key("system.paging.state")
-
- // SystemPagingTypeKey is the attribute Key conforming to the
- // "system.paging.type" semantic conventions. It represents the memory
- // paging type
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'minor'
- SystemPagingTypeKey = attribute.Key("system.paging.type")
-)
-
-var (
- // in
- SystemPagingDirectionIn = SystemPagingDirectionKey.String("in")
- // out
- SystemPagingDirectionOut = SystemPagingDirectionKey.String("out")
-)
-
-var (
- // used
- SystemPagingStateUsed = SystemPagingStateKey.String("used")
- // free
- SystemPagingStateFree = SystemPagingStateKey.String("free")
-)
-
-var (
- // major
- SystemPagingTypeMajor = SystemPagingTypeKey.String("major")
- // minor
- SystemPagingTypeMinor = SystemPagingTypeKey.String("minor")
-)
-
-// Describes Filesystem metric attributes
-const (
- // SystemFilesystemModeKey is the attribute Key conforming to the
- // "system.filesystem.mode" semantic conventions. It represents the
- // filesystem mode
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'rw, ro'
- SystemFilesystemModeKey = attribute.Key("system.filesystem.mode")
-
- // SystemFilesystemMountpointKey is the attribute Key conforming to the
- // "system.filesystem.mountpoint" semantic conventions. It represents the
- // filesystem mount path
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/mnt/data'
- SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint")
-
- // SystemFilesystemStateKey is the attribute Key conforming to the
- // "system.filesystem.state" semantic conventions. It represents the
- // filesystem state
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'used'
- SystemFilesystemStateKey = attribute.Key("system.filesystem.state")
-
- // SystemFilesystemTypeKey is the attribute Key conforming to the
- // "system.filesystem.type" semantic conventions. It represents the
- // filesystem type
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'ext4'
- SystemFilesystemTypeKey = attribute.Key("system.filesystem.type")
-)
-
-var (
- // used
- SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used")
- // free
- SystemFilesystemStateFree = SystemFilesystemStateKey.String("free")
- // reserved
- SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved")
-)
-
-var (
- // fat32
- SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32")
- // exfat
- SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat")
- // ntfs
- SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs")
- // refs
- SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs")
- // hfsplus
- SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus")
- // ext4
- SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4")
-)
-
-// SystemFilesystemMode returns an attribute KeyValue conforming to the
-// "system.filesystem.mode" semantic conventions. It represents the filesystem
-// mode
-func SystemFilesystemMode(val string) attribute.KeyValue {
- return SystemFilesystemModeKey.String(val)
-}
-
-// SystemFilesystemMountpoint returns an attribute KeyValue conforming to
-// the "system.filesystem.mountpoint" semantic conventions. It represents the
-// filesystem mount path
-func SystemFilesystemMountpoint(val string) attribute.KeyValue {
- return SystemFilesystemMountpointKey.String(val)
-}
-
-// Describes Network metric attributes
-const (
- // SystemNetworkStateKey is the attribute Key conforming to the
- // "system.network.state" semantic conventions. It represents a stateless
- // protocol MUST NOT set this attribute
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'close_wait'
- SystemNetworkStateKey = attribute.Key("system.network.state")
-)
-
-var (
- // close
- SystemNetworkStateClose = SystemNetworkStateKey.String("close")
- // close_wait
- SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait")
- // closing
- SystemNetworkStateClosing = SystemNetworkStateKey.String("closing")
- // delete
- SystemNetworkStateDelete = SystemNetworkStateKey.String("delete")
- // established
- SystemNetworkStateEstablished = SystemNetworkStateKey.String("established")
- // fin_wait_1
- SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1")
- // fin_wait_2
- SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2")
- // last_ack
- SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack")
- // listen
- SystemNetworkStateListen = SystemNetworkStateKey.String("listen")
- // syn_recv
- SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv")
- // syn_sent
- SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent")
- // time_wait
- SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait")
-)
-
-// Describes System Process metric attributes
-const (
- // SystemProcessesStatusKey is the attribute Key conforming to the
- // "system.processes.status" semantic conventions. It represents the
- // process state, e.g., [Linux Process State
- // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES)
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'running'
- SystemProcessesStatusKey = attribute.Key("system.processes.status")
-)
-
-var (
- // running
- SystemProcessesStatusRunning = SystemProcessesStatusKey.String("running")
- // sleeping
- SystemProcessesStatusSleeping = SystemProcessesStatusKey.String("sleeping")
- // stopped
- SystemProcessesStatusStopped = SystemProcessesStatusKey.String("stopped")
- // defunct
- SystemProcessesStatusDefunct = SystemProcessesStatusKey.String("defunct")
-)
-
-// These attributes may be used to describe the client in a connection-based
-// network interaction where there is one side that initiates the connection
-// (the client is the side that initiates the connection). This covers all TCP
-// network interactions since TCP is connection-based and one side initiates
-// the connection (an exception is made for peer-to-peer communication over TCP
-// where the "user-facing" surface of the protocol / API doesn't expose a clear
-// notion of client and server). This also covers UDP network interactions
-// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS.
-const (
- // ClientAddressKey is the attribute Key conforming to the "client.address"
- // semantic conventions. It represents the client address - domain name if
- // available without reverse DNS lookup; otherwise, IP address or Unix
- // domain socket name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock'
- // Note: When observed from the server side, and when communicating through
- // an intermediary, `client.address` SHOULD represent the client address
- // behind any intermediaries, for example proxies, if it's available.
- ClientAddressKey = attribute.Key("client.address")
-
- // ClientPortKey is the attribute Key conforming to the "client.port"
- // semantic conventions. It represents the client port number.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 65123
- // Note: When observed from the server side, and when communicating through
- // an intermediary, `client.port` SHOULD represent the client port behind
- // any intermediaries, for example proxies, if it's available.
- ClientPortKey = attribute.Key("client.port")
-)
-
-// ClientAddress returns an attribute KeyValue conforming to the
-// "client.address" semantic conventions. It represents the client address -
-// domain name if available without reverse DNS lookup; otherwise, IP address
-// or Unix domain socket name.
-func ClientAddress(val string) attribute.KeyValue {
- return ClientAddressKey.String(val)
-}
-
-// ClientPort returns an attribute KeyValue conforming to the "client.port"
-// semantic conventions. It represents the client port number.
-func ClientPort(val int) attribute.KeyValue {
- return ClientPortKey.Int(val)
-}
-
-// The attributes used to describe telemetry in the context of databases.
-const (
- // DBCassandraConsistencyLevelKey is the attribute Key conforming to the
- // "db.cassandra.consistency_level" semantic conventions. It represents the
- // consistency level of the query. Based on consistency values from
- // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html).
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
-
- // DBCassandraCoordinatorDCKey is the attribute Key conforming to the
- // "db.cassandra.coordinator.dc" semantic conventions. It represents the
- // data center of the coordinating node for a query.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'us-west-2'
- DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
-
- // DBCassandraCoordinatorIDKey is the attribute Key conforming to the
- // "db.cassandra.coordinator.id" semantic conventions. It represents the ID
- // of the coordinating node for a query.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
- DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
-
- // DBCassandraIdempotenceKey is the attribute Key conforming to the
- // "db.cassandra.idempotence" semantic conventions. It represents the
- // whether or not the query is idempotent.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
-
- // DBCassandraPageSizeKey is the attribute Key conforming to the
- // "db.cassandra.page_size" semantic conventions. It represents the fetch
- // size used for paging, i.e. how many rows will be returned at once.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 5000
- DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
-
- // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming
- // to the "db.cassandra.speculative_execution_count" semantic conventions.
- // It represents the number of times a query was speculatively executed.
- // Not set or `0` if the query was not executed speculatively.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 0, 2
- DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
-
- // DBCassandraTableKey is the attribute Key conforming to the
- // "db.cassandra.table" semantic conventions. It represents the name of the
- // primary Cassandra table that the operation is acting upon, including the
- // keyspace name (if applicable).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'mytable'
- // Note: This mirrors the db.sql.table attribute but references cassandra
- // rather than sql. It is not recommended to attempt any client-side
- // parsing of `db.statement` just to get this property, but it should be
- // set if it is provided by the library being instrumented. If the
- // operation is acting upon an anonymous table, or more than one table,
- // this value MUST NOT be set.
- DBCassandraTableKey = attribute.Key("db.cassandra.table")
-
- // DBConnectionStringKey is the attribute Key conforming to the
- // "db.connection_string" semantic conventions. It represents the
- // connection string used to connect to the database. It is recommended to
- // remove embedded credentials.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;'
- DBConnectionStringKey = attribute.Key("db.connection_string")
-
- // DBCosmosDBClientIDKey is the attribute Key conforming to the
- // "db.cosmosdb.client_id" semantic conventions. It represents the unique
- // Cosmos client instance id.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d'
- DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id")
-
- // DBCosmosDBConnectionModeKey is the attribute Key conforming to the
- // "db.cosmosdb.connection_mode" semantic conventions. It represents the
- // cosmos client connection mode.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode")
-
- // DBCosmosDBContainerKey is the attribute Key conforming to the
- // "db.cosmosdb.container" semantic conventions. It represents the cosmos
- // DB container name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'anystring'
- DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container")
-
- // DBCosmosDBOperationTypeKey is the attribute Key conforming to the
- // "db.cosmosdb.operation_type" semantic conventions. It represents the
- // cosmosDB Operation Type.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type")
-
- // DBCosmosDBRequestChargeKey is the attribute Key conforming to the
- // "db.cosmosdb.request_charge" semantic conventions. It represents the rU
- // consumed for that operation
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 46.18, 1.0
- DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge")
-
- // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the
- // "db.cosmosdb.request_content_length" semantic conventions. It represents
- // the request payload size in bytes
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length")
-
- // DBCosmosDBStatusCodeKey is the attribute Key conforming to the
- // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos
- // DB status code.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 200, 201
- DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code")
-
- // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the
- // "db.cosmosdb.sub_status_code" semantic conventions. It represents the
- // cosmos DB sub status code.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1000, 1002
- DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code")
-
- // DBElasticsearchClusterNameKey is the attribute Key conforming to the
- // "db.elasticsearch.cluster.name" semantic conventions. It represents the
- // represents the identifier of an Elasticsearch cluster.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f'
- DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name")
-
- // DBElasticsearchNodeNameKey is the attribute Key conforming to the
- // "db.elasticsearch.node.name" semantic conventions. It represents the
- // represents the human-readable identifier of the node/instance to which a
- // request was routed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'instance-0000000001'
- DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name")
-
- // DBInstanceIDKey is the attribute Key conforming to the "db.instance.id"
- // semantic conventions. It represents an identifier (address, unique name,
- // or any other identifier) of the database instance that is executing
- // queries or mutations on the current connection. This is useful in cases
- // where the database is running in a clustered environment and the
- // instrumentation is able to record the node executing the query. The
- // client may obtain this value in databases like MySQL using queries like
- // `select @@hostname`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'mysql-e26b99z.example.com'
- DBInstanceIDKey = attribute.Key("db.instance.id")
-
- // DBJDBCDriverClassnameKey is the attribute Key conforming to the
- // "db.jdbc.driver_classname" semantic conventions. It represents the
- // fully-qualified class name of the [Java Database Connectivity
- // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/)
- // driver used to connect.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'org.postgresql.Driver',
- // 'com.microsoft.sqlserver.jdbc.SQLServerDriver'
- DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname")
-
- // DBMongoDBCollectionKey is the attribute Key conforming to the
- // "db.mongodb.collection" semantic conventions. It represents the MongoDB
- // collection being accessed within the database stated in `db.name`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'customers', 'products'
- DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection")
-
- // DBMSSQLInstanceNameKey is the attribute Key conforming to the
- // "db.mssql.instance_name" semantic conventions. It represents the
- // Microsoft SQL Server [instance
- // name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
- // connecting to. This name is used to determine the port of a named
- // instance.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MSSQLSERVER'
- // Note: If setting a `db.mssql.instance_name`, `server.port` is no longer
- // required (but still recommended if non-standard).
- DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name")
-
- // DBNameKey is the attribute Key conforming to the "db.name" semantic
- // conventions. It represents the this attribute is used to report the name
- // of the database being accessed. For commands that switch the database,
- // this should be set to the target database (even if the command fails).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'customers', 'main'
- // Note: In some SQL databases, the database name to be used is called
- // "schema name". In case there are multiple layers that could be
- // considered for database name (e.g. Oracle instance name and schema
- // name), the database name to be used is the more specific layer (e.g.
- // Oracle schema name).
- DBNameKey = attribute.Key("db.name")
-
- // DBOperationKey is the attribute Key conforming to the "db.operation"
- // semantic conventions. It represents the name of the operation being
- // executed, e.g. the [MongoDB command
- // name](https://docs.mongodb.com/manual/reference/command/#database-operations)
- // such as `findAndModify`, or the SQL keyword.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'findAndModify', 'HMSET', 'SELECT'
- // Note: When setting this to an SQL keyword, it is not recommended to
- // attempt any client-side parsing of `db.statement` just to get this
- // property, but it should be set if the operation name is provided by the
- // library being instrumented. If the SQL statement has an ambiguous
- // operation, or performs more than one operation, this value may be
- // omitted.
- DBOperationKey = attribute.Key("db.operation")
-
- // DBRedisDBIndexKey is the attribute Key conforming to the
- // "db.redis.database_index" semantic conventions. It represents the index
- // of the database being accessed as used in the [`SELECT`
- // command](https://redis.io/commands/select), provided as an integer. To
- // be used instead of the generic `db.name` attribute.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 0, 1, 15
- DBRedisDBIndexKey = attribute.Key("db.redis.database_index")
-
- // DBSQLTableKey is the attribute Key conforming to the "db.sql.table"
- // semantic conventions. It represents the name of the primary table that
- // the operation is acting upon, including the database name (if
- // applicable).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'public.users', 'customers'
- // Note: It is not recommended to attempt any client-side parsing of
- // `db.statement` just to get this property, but it should be set if it is
- // provided by the library being instrumented. If the operation is acting
- // upon an anonymous table, or more than one table, this value MUST NOT be
- // set.
- DBSQLTableKey = attribute.Key("db.sql.table")
-
- // DBStatementKey is the attribute Key conforming to the "db.statement"
- // semantic conventions. It represents the database statement being
- // executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"'
- DBStatementKey = attribute.Key("db.statement")
-
- // DBSystemKey is the attribute Key conforming to the "db.system" semantic
- // conventions. It represents an identifier for the database management
- // system (DBMS) product being used. See below for a list of well-known
- // identifiers.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- DBSystemKey = attribute.Key("db.system")
-
- // DBUserKey is the attribute Key conforming to the "db.user" semantic
- // conventions. It represents the username for accessing the database.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'readonly_user', 'reporting_user'
- DBUserKey = attribute.Key("db.user")
-)
-
-var (
- // all
- DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
- // each_quorum
- DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
- // quorum
- DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
- // local_quorum
- DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
- // one
- DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
- // two
- DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
- // three
- DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
- // local_one
- DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
- // any
- DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
- // serial
- DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
- // local_serial
- DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
-)
-
-var (
- // Gateway (HTTP) connections mode
- DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway")
- // Direct connection
- DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct")
-)
-
-var (
- // invalid
- DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid")
- // create
- DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create")
- // patch
- DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch")
- // read
- DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read")
- // read_feed
- DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed")
- // delete
- DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete")
- // replace
- DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace")
- // execute
- DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute")
- // query
- DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query")
- // head
- DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head")
- // head_feed
- DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed")
- // upsert
- DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert")
- // batch
- DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch")
- // query_plan
- DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan")
- // execute_javascript
- DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript")
-)
-
-var (
- // Some other SQL database. Fallback only. See notes
- DBSystemOtherSQL = DBSystemKey.String("other_sql")
- // Microsoft SQL Server
- DBSystemMSSQL = DBSystemKey.String("mssql")
- // Microsoft SQL Server Compact
- DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact")
- // MySQL
- DBSystemMySQL = DBSystemKey.String("mysql")
- // Oracle Database
- DBSystemOracle = DBSystemKey.String("oracle")
- // IBM DB2
- DBSystemDB2 = DBSystemKey.String("db2")
- // PostgreSQL
- DBSystemPostgreSQL = DBSystemKey.String("postgresql")
- // Amazon Redshift
- DBSystemRedshift = DBSystemKey.String("redshift")
- // Apache Hive
- DBSystemHive = DBSystemKey.String("hive")
- // Cloudscape
- DBSystemCloudscape = DBSystemKey.String("cloudscape")
- // HyperSQL DataBase
- DBSystemHSQLDB = DBSystemKey.String("hsqldb")
- // Progress Database
- DBSystemProgress = DBSystemKey.String("progress")
- // SAP MaxDB
- DBSystemMaxDB = DBSystemKey.String("maxdb")
- // SAP HANA
- DBSystemHanaDB = DBSystemKey.String("hanadb")
- // Ingres
- DBSystemIngres = DBSystemKey.String("ingres")
- // FirstSQL
- DBSystemFirstSQL = DBSystemKey.String("firstsql")
- // EnterpriseDB
- DBSystemEDB = DBSystemKey.String("edb")
- // InterSystems Caché
- DBSystemCache = DBSystemKey.String("cache")
- // Adabas (Adaptable Database System)
- DBSystemAdabas = DBSystemKey.String("adabas")
- // Firebird
- DBSystemFirebird = DBSystemKey.String("firebird")
- // Apache Derby
- DBSystemDerby = DBSystemKey.String("derby")
- // FileMaker
- DBSystemFilemaker = DBSystemKey.String("filemaker")
- // Informix
- DBSystemInformix = DBSystemKey.String("informix")
- // InstantDB
- DBSystemInstantDB = DBSystemKey.String("instantdb")
- // InterBase
- DBSystemInterbase = DBSystemKey.String("interbase")
- // MariaDB
- DBSystemMariaDB = DBSystemKey.String("mariadb")
- // Netezza
- DBSystemNetezza = DBSystemKey.String("netezza")
- // Pervasive PSQL
- DBSystemPervasive = DBSystemKey.String("pervasive")
- // PointBase
- DBSystemPointbase = DBSystemKey.String("pointbase")
- // SQLite
- DBSystemSqlite = DBSystemKey.String("sqlite")
- // Sybase
- DBSystemSybase = DBSystemKey.String("sybase")
- // Teradata
- DBSystemTeradata = DBSystemKey.String("teradata")
- // Vertica
- DBSystemVertica = DBSystemKey.String("vertica")
- // H2
- DBSystemH2 = DBSystemKey.String("h2")
- // ColdFusion IMQ
- DBSystemColdfusion = DBSystemKey.String("coldfusion")
- // Apache Cassandra
- DBSystemCassandra = DBSystemKey.String("cassandra")
- // Apache HBase
- DBSystemHBase = DBSystemKey.String("hbase")
- // MongoDB
- DBSystemMongoDB = DBSystemKey.String("mongodb")
- // Redis
- DBSystemRedis = DBSystemKey.String("redis")
- // Couchbase
- DBSystemCouchbase = DBSystemKey.String("couchbase")
- // CouchDB
- DBSystemCouchDB = DBSystemKey.String("couchdb")
- // Microsoft Azure Cosmos DB
- DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
- // Amazon DynamoDB
- DBSystemDynamoDB = DBSystemKey.String("dynamodb")
- // Neo4j
- DBSystemNeo4j = DBSystemKey.String("neo4j")
- // Apache Geode
- DBSystemGeode = DBSystemKey.String("geode")
- // Elasticsearch
- DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
- // Memcached
- DBSystemMemcached = DBSystemKey.String("memcached")
- // CockroachDB
- DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
- // OpenSearch
- DBSystemOpensearch = DBSystemKey.String("opensearch")
- // ClickHouse
- DBSystemClickhouse = DBSystemKey.String("clickhouse")
- // Cloud Spanner
- DBSystemSpanner = DBSystemKey.String("spanner")
- // Trino
- DBSystemTrino = DBSystemKey.String("trino")
-)
-
-// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the
-// "db.cassandra.coordinator.dc" semantic conventions. It represents the data
-// center of the coordinating node for a query.
-func DBCassandraCoordinatorDC(val string) attribute.KeyValue {
- return DBCassandraCoordinatorDCKey.String(val)
-}
-
-// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the
-// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of
-// the coordinating node for a query.
-func DBCassandraCoordinatorID(val string) attribute.KeyValue {
- return DBCassandraCoordinatorIDKey.String(val)
-}
-
-// DBCassandraIdempotence returns an attribute KeyValue conforming to the
-// "db.cassandra.idempotence" semantic conventions. It represents the whether
-// or not the query is idempotent.
-func DBCassandraIdempotence(val bool) attribute.KeyValue {
- return DBCassandraIdempotenceKey.Bool(val)
-}
-
-// DBCassandraPageSize returns an attribute KeyValue conforming to the
-// "db.cassandra.page_size" semantic conventions. It represents the fetch size
-// used for paging, i.e. how many rows will be returned at once.
-func DBCassandraPageSize(val int) attribute.KeyValue {
- return DBCassandraPageSizeKey.Int(val)
-}
-
-// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue
-// conforming to the "db.cassandra.speculative_execution_count" semantic
-// conventions. It represents the number of times a query was speculatively
-// executed. Not set or `0` if the query was not executed speculatively.
-func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
- return DBCassandraSpeculativeExecutionCountKey.Int(val)
-}
-
-// DBCassandraTable returns an attribute KeyValue conforming to the
-// "db.cassandra.table" semantic conventions. It represents the name of the
-// primary Cassandra table that the operation is acting upon, including the
-// keyspace name (if applicable).
-func DBCassandraTable(val string) attribute.KeyValue {
- return DBCassandraTableKey.String(val)
-}
-
-// DBConnectionString returns an attribute KeyValue conforming to the
-// "db.connection_string" semantic conventions. It represents the connection
-// string used to connect to the database. It is recommended to remove embedded
-// credentials.
-func DBConnectionString(val string) attribute.KeyValue {
- return DBConnectionStringKey.String(val)
-}
-
-// DBCosmosDBClientID returns an attribute KeyValue conforming to the
-// "db.cosmosdb.client_id" semantic conventions. It represents the unique
-// Cosmos client instance id.
-func DBCosmosDBClientID(val string) attribute.KeyValue {
- return DBCosmosDBClientIDKey.String(val)
-}
-
-// DBCosmosDBContainer returns an attribute KeyValue conforming to the
-// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB
-// container name.
-func DBCosmosDBContainer(val string) attribute.KeyValue {
- return DBCosmosDBContainerKey.String(val)
-}
-
-// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the
-// "db.cosmosdb.request_charge" semantic conventions. It represents the rU
-// consumed for that operation
-func DBCosmosDBRequestCharge(val float64) attribute.KeyValue {
- return DBCosmosDBRequestChargeKey.Float64(val)
-}
-
-// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming
-// to the "db.cosmosdb.request_content_length" semantic conventions. It
-// represents the request payload size in bytes
-func DBCosmosDBRequestContentLength(val int) attribute.KeyValue {
- return DBCosmosDBRequestContentLengthKey.Int(val)
-}
-
-// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the
-// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB
-// status code.
-func DBCosmosDBStatusCode(val int) attribute.KeyValue {
- return DBCosmosDBStatusCodeKey.Int(val)
-}
-
-// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the
-// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos
-// DB sub status code.
-func DBCosmosDBSubStatusCode(val int) attribute.KeyValue {
- return DBCosmosDBSubStatusCodeKey.Int(val)
-}
-
-// DBElasticsearchClusterName returns an attribute KeyValue conforming to
-// the "db.elasticsearch.cluster.name" semantic conventions. It represents the
-// represents the identifier of an Elasticsearch cluster.
-func DBElasticsearchClusterName(val string) attribute.KeyValue {
- return DBElasticsearchClusterNameKey.String(val)
-}
-
-// DBElasticsearchNodeName returns an attribute KeyValue conforming to the
-// "db.elasticsearch.node.name" semantic conventions. It represents the
-// represents the human-readable identifier of the node/instance to which a
-// request was routed.
-func DBElasticsearchNodeName(val string) attribute.KeyValue {
- return DBElasticsearchNodeNameKey.String(val)
-}
-
-// DBInstanceID returns an attribute KeyValue conforming to the
-// "db.instance.id" semantic conventions. It represents an identifier (address,
-// unique name, or any other identifier) of the database instance that is
-// executing queries or mutations on the current connection. This is useful in
-// cases where the database is running in a clustered environment and the
-// instrumentation is able to record the node executing the query. The client
-// may obtain this value in databases like MySQL using queries like `select
-// @@hostname`.
-func DBInstanceID(val string) attribute.KeyValue {
- return DBInstanceIDKey.String(val)
-}
-
-// DBJDBCDriverClassname returns an attribute KeyValue conforming to the
-// "db.jdbc.driver_classname" semantic conventions. It represents the
-// fully-qualified class name of the [Java Database Connectivity
-// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver
-// used to connect.
-func DBJDBCDriverClassname(val string) attribute.KeyValue {
- return DBJDBCDriverClassnameKey.String(val)
-}
-
-// DBMongoDBCollection returns an attribute KeyValue conforming to the
-// "db.mongodb.collection" semantic conventions. It represents the MongoDB
-// collection being accessed within the database stated in `db.name`.
-func DBMongoDBCollection(val string) attribute.KeyValue {
- return DBMongoDBCollectionKey.String(val)
-}
-
-// DBMSSQLInstanceName returns an attribute KeyValue conforming to the
-// "db.mssql.instance_name" semantic conventions. It represents the Microsoft
-// SQL Server [instance
-// name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
-// connecting to. This name is used to determine the port of a named instance.
-func DBMSSQLInstanceName(val string) attribute.KeyValue {
- return DBMSSQLInstanceNameKey.String(val)
-}
-
-// DBName returns an attribute KeyValue conforming to the "db.name" semantic
-// conventions. It represents the this attribute is used to report the name of
-// the database being accessed. For commands that switch the database, this
-// should be set to the target database (even if the command fails).
-func DBName(val string) attribute.KeyValue {
- return DBNameKey.String(val)
-}
-
-// DBOperation returns an attribute KeyValue conforming to the
-// "db.operation" semantic conventions. It represents the name of the operation
-// being executed, e.g. the [MongoDB command
-// name](https://docs.mongodb.com/manual/reference/command/#database-operations)
-// such as `findAndModify`, or the SQL keyword.
-func DBOperation(val string) attribute.KeyValue {
- return DBOperationKey.String(val)
-}
-
-// DBRedisDBIndex returns an attribute KeyValue conforming to the
-// "db.redis.database_index" semantic conventions. It represents the index of
-// the database being accessed as used in the [`SELECT`
-// command](https://redis.io/commands/select), provided as an integer. To be
-// used instead of the generic `db.name` attribute.
-func DBRedisDBIndex(val int) attribute.KeyValue {
- return DBRedisDBIndexKey.Int(val)
-}
-
-// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table"
-// semantic conventions. It represents the name of the primary table that the
-// operation is acting upon, including the database name (if applicable).
-func DBSQLTable(val string) attribute.KeyValue {
- return DBSQLTableKey.String(val)
-}
-
-// DBStatement returns an attribute KeyValue conforming to the
-// "db.statement" semantic conventions. It represents the database statement
-// being executed.
-func DBStatement(val string) attribute.KeyValue {
- return DBStatementKey.String(val)
-}
-
-// DBUser returns an attribute KeyValue conforming to the "db.user" semantic
-// conventions. It represents the username for accessing the database.
-func DBUser(val string) attribute.KeyValue {
- return DBUserKey.String(val)
-}
-
-// Describes deprecated HTTP attributes.
-const (
- // HTTPFlavorKey is the attribute Key conforming to the "http.flavor"
- // semantic conventions.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: deprecated
- // Deprecated: use `network.protocol.name` instead.
- HTTPFlavorKey = attribute.Key("http.flavor")
-
- // HTTPMethodKey is the attribute Key conforming to the "http.method"
- // semantic conventions.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: deprecated
- // Examples: 'GET', 'POST', 'HEAD'
- // Deprecated: use `http.request.method` instead.
- HTTPMethodKey = attribute.Key("http.method")
-
- // HTTPRequestContentLengthKey is the attribute Key conforming to the
- // "http.request_content_length" semantic conventions.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: deprecated
- // Examples: 3495
- // Deprecated: use `http.request.header.content-length` instead.
- HTTPRequestContentLengthKey = attribute.Key("http.request_content_length")
-
- // HTTPResponseContentLengthKey is the attribute Key conforming to the
- // "http.response_content_length" semantic conventions.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: deprecated
- // Examples: 3495
- // Deprecated: use `http.response.header.content-length` instead.
- HTTPResponseContentLengthKey = attribute.Key("http.response_content_length")
-
- // HTTPSchemeKey is the attribute Key conforming to the "http.scheme"
- // semantic conventions.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: deprecated
- // Examples: 'http', 'https'
- // Deprecated: use `url.scheme` instead.
- HTTPSchemeKey = attribute.Key("http.scheme")
-
- // HTTPStatusCodeKey is the attribute Key conforming to the
- // "http.status_code" semantic conventions.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: deprecated
- // Examples: 200
- // Deprecated: use `http.response.status_code` instead.
- HTTPStatusCodeKey = attribute.Key("http.status_code")
-
- // HTTPTargetKey is the attribute Key conforming to the "http.target"
- // semantic conventions.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: deprecated
- // Examples: '/search?q=OpenTelemetry#SemConv'
- // Deprecated: use `url.path` and `url.query` instead.
- HTTPTargetKey = attribute.Key("http.target")
-
- // HTTPURLKey is the attribute Key conforming to the "http.url" semantic
- // conventions.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: deprecated
- // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv'
- // Deprecated: use `url.full` instead.
- HTTPURLKey = attribute.Key("http.url")
-
- // HTTPUserAgentKey is the attribute Key conforming to the
- // "http.user_agent" semantic conventions.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: deprecated
- // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU
- // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko)
- // Version/14.1.2 Mobile/15E148 Safari/604.1'
- // Deprecated: use `user_agent.original` instead.
- HTTPUserAgentKey = attribute.Key("http.user_agent")
-)
-
-var (
- // HTTP/1.0
- //
- // Deprecated: use `network.protocol.name` instead.
- HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0")
- // HTTP/1.1
- //
- // Deprecated: use `network.protocol.name` instead.
- HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1")
- // HTTP/2
- //
- // Deprecated: use `network.protocol.name` instead.
- HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0")
- // HTTP/3
- //
- // Deprecated: use `network.protocol.name` instead.
- HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0")
- // SPDY protocol
- //
- // Deprecated: use `network.protocol.name` instead.
- HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY")
- // QUIC protocol
- //
- // Deprecated: use `network.protocol.name` instead.
- HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC")
-)
-
-// HTTPMethod returns an attribute KeyValue conforming to the "http.method"
-// semantic conventions.
-//
-// Deprecated: use `http.request.method` instead.
-func HTTPMethod(val string) attribute.KeyValue {
- return HTTPMethodKey.String(val)
-}
-
-// HTTPRequestContentLength returns an attribute KeyValue conforming to the
-// "http.request_content_length" semantic conventions.
-//
-// Deprecated: use `http.request.header.content-length` instead.
-func HTTPRequestContentLength(val int) attribute.KeyValue {
- return HTTPRequestContentLengthKey.Int(val)
-}
-
-// HTTPResponseContentLength returns an attribute KeyValue conforming to the
-// "http.response_content_length" semantic conventions.
-//
-// Deprecated: use `http.response.header.content-length` instead.
-func HTTPResponseContentLength(val int) attribute.KeyValue {
- return HTTPResponseContentLengthKey.Int(val)
-}
-
-// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme"
-// semantic conventions.
-//
-// Deprecated: use `url.scheme` instead.
-func HTTPScheme(val string) attribute.KeyValue {
- return HTTPSchemeKey.String(val)
-}
-
-// HTTPStatusCode returns an attribute KeyValue conforming to the
-// "http.status_code" semantic conventions.
-//
-// Deprecated: use `http.response.status_code` instead.
-func HTTPStatusCode(val int) attribute.KeyValue {
- return HTTPStatusCodeKey.Int(val)
-}
-
-// HTTPTarget returns an attribute KeyValue conforming to the "http.target"
-// semantic conventions.
-//
-// Deprecated: use `url.path` and `url.query` instead.
-func HTTPTarget(val string) attribute.KeyValue {
- return HTTPTargetKey.String(val)
-}
-
-// HTTPURL returns an attribute KeyValue conforming to the "http.url"
-// semantic conventions.
-//
-// Deprecated: use `url.full` instead.
-func HTTPURL(val string) attribute.KeyValue {
- return HTTPURLKey.String(val)
-}
-
-// HTTPUserAgent returns an attribute KeyValue conforming to the
-// "http.user_agent" semantic conventions.
-//
-// Deprecated: use `user_agent.original` instead.
-func HTTPUserAgent(val string) attribute.KeyValue {
- return HTTPUserAgentKey.String(val)
-}
-
-// These attributes may be used for any network related operation.
-const (
- // NetHostNameKey is the attribute Key conforming to the "net.host.name"
- // semantic conventions.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: deprecated
- // Examples: 'example.com'
- // Deprecated: use `server.address`.
- NetHostNameKey = attribute.Key("net.host.name")
-
- // NetHostPortKey is the attribute Key conforming to the "net.host.port"
- // semantic conventions.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: deprecated
- // Examples: 8080
- // Deprecated: use `server.port`.
- NetHostPortKey = attribute.Key("net.host.port")
-
- // NetPeerNameKey is the attribute Key conforming to the "net.peer.name"
- // semantic conventions.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: deprecated
- // Examples: 'example.com'
- // Deprecated: use `server.address` on client spans and `client.address` on
- // server spans.
- NetPeerNameKey = attribute.Key("net.peer.name")
-
- // NetPeerPortKey is the attribute Key conforming to the "net.peer.port"
- // semantic conventions.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: deprecated
- // Examples: 8080
- // Deprecated: use `server.port` on client spans and `client.port` on
- // server spans.
- NetPeerPortKey = attribute.Key("net.peer.port")
-
- // NetProtocolNameKey is the attribute Key conforming to the
- // "net.protocol.name" semantic conventions.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: deprecated
- // Examples: 'amqp', 'http', 'mqtt'
- // Deprecated: use `network.protocol.name`.
- NetProtocolNameKey = attribute.Key("net.protocol.name")
-
- // NetProtocolVersionKey is the attribute Key conforming to the
- // "net.protocol.version" semantic conventions.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: deprecated
- // Examples: '3.1.1'
- // Deprecated: use `network.protocol.version`.
- NetProtocolVersionKey = attribute.Key("net.protocol.version")
-
- // NetSockFamilyKey is the attribute Key conforming to the
- // "net.sock.family" semantic conventions.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: deprecated
- // Deprecated: use `network.transport` and `network.type`.
- NetSockFamilyKey = attribute.Key("net.sock.family")
-
- // NetSockHostAddrKey is the attribute Key conforming to the
- // "net.sock.host.addr" semantic conventions.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: deprecated
- // Examples: '/var/my.sock'
- // Deprecated: use `network.local.address`.
- NetSockHostAddrKey = attribute.Key("net.sock.host.addr")
-
- // NetSockHostPortKey is the attribute Key conforming to the
- // "net.sock.host.port" semantic conventions.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: deprecated
- // Examples: 8080
- // Deprecated: use `network.local.port`.
- NetSockHostPortKey = attribute.Key("net.sock.host.port")
-
- // NetSockPeerAddrKey is the attribute Key conforming to the
- // "net.sock.peer.addr" semantic conventions.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: deprecated
- // Examples: '192.168.0.1'
- // Deprecated: use `network.peer.address`.
- NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr")
-
- // NetSockPeerNameKey is the attribute Key conforming to the
- // "net.sock.peer.name" semantic conventions.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: deprecated
- // Examples: '/var/my.sock'
- // Deprecated: no replacement at this time.
- NetSockPeerNameKey = attribute.Key("net.sock.peer.name")
-
- // NetSockPeerPortKey is the attribute Key conforming to the
- // "net.sock.peer.port" semantic conventions.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: deprecated
- // Examples: 65531
- // Deprecated: use `network.peer.port`.
- NetSockPeerPortKey = attribute.Key("net.sock.peer.port")
-
- // NetTransportKey is the attribute Key conforming to the "net.transport"
- // semantic conventions.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: deprecated
- // Deprecated: use `network.transport`.
- NetTransportKey = attribute.Key("net.transport")
-)
-
-var (
- // IPv4 address
- //
- // Deprecated: use `network.transport` and `network.type`.
- NetSockFamilyInet = NetSockFamilyKey.String("inet")
- // IPv6 address
- //
- // Deprecated: use `network.transport` and `network.type`.
- NetSockFamilyInet6 = NetSockFamilyKey.String("inet6")
- // Unix domain socket path
- //
- // Deprecated: use `network.transport` and `network.type`.
- NetSockFamilyUnix = NetSockFamilyKey.String("unix")
-)
-
-var (
- // ip_tcp
- //
- // Deprecated: use `network.transport`.
- NetTransportTCP = NetTransportKey.String("ip_tcp")
- // ip_udp
- //
- // Deprecated: use `network.transport`.
- NetTransportUDP = NetTransportKey.String("ip_udp")
- // Named or anonymous pipe
- //
- // Deprecated: use `network.transport`.
- NetTransportPipe = NetTransportKey.String("pipe")
- // In-process communication
- //
- // Deprecated: use `network.transport`.
- NetTransportInProc = NetTransportKey.String("inproc")
- // Something else (non IP-based)
- //
- // Deprecated: use `network.transport`.
- NetTransportOther = NetTransportKey.String("other")
-)
-
-// NetHostName returns an attribute KeyValue conforming to the
-// "net.host.name" semantic conventions.
-//
-// Deprecated: use `server.address`.
-func NetHostName(val string) attribute.KeyValue {
- return NetHostNameKey.String(val)
-}
-
-// NetHostPort returns an attribute KeyValue conforming to the
-// "net.host.port" semantic conventions.
-//
-// Deprecated: use `server.port`.
-func NetHostPort(val int) attribute.KeyValue {
- return NetHostPortKey.Int(val)
-}
-
-// NetPeerName returns an attribute KeyValue conforming to the
-// "net.peer.name" semantic conventions.
-//
-// Deprecated: use `server.address` on client spans and `client.address` on
-// server spans.
-func NetPeerName(val string) attribute.KeyValue {
- return NetPeerNameKey.String(val)
-}
-
-// NetPeerPort returns an attribute KeyValue conforming to the
-// "net.peer.port" semantic conventions.
-//
-// Deprecated: use `server.port` on client spans and `client.port` on server
-// spans.
-func NetPeerPort(val int) attribute.KeyValue {
- return NetPeerPortKey.Int(val)
-}
-
-// NetProtocolName returns an attribute KeyValue conforming to the
-// "net.protocol.name" semantic conventions.
-//
-// Deprecated: use `network.protocol.name`.
-func NetProtocolName(val string) attribute.KeyValue {
- return NetProtocolNameKey.String(val)
-}
-
-// NetProtocolVersion returns an attribute KeyValue conforming to the
-// "net.protocol.version" semantic conventions.
-//
-// Deprecated: use `network.protocol.version`.
-func NetProtocolVersion(val string) attribute.KeyValue {
- return NetProtocolVersionKey.String(val)
-}
-
-// NetSockHostAddr returns an attribute KeyValue conforming to the
-// "net.sock.host.addr" semantic conventions.
-//
-// Deprecated: use `network.local.address`.
-func NetSockHostAddr(val string) attribute.KeyValue {
- return NetSockHostAddrKey.String(val)
-}
-
-// NetSockHostPort returns an attribute KeyValue conforming to the
-// "net.sock.host.port" semantic conventions.
-//
-// Deprecated: use `network.local.port`.
-func NetSockHostPort(val int) attribute.KeyValue {
- return NetSockHostPortKey.Int(val)
-}
-
-// NetSockPeerAddr returns an attribute KeyValue conforming to the
-// "net.sock.peer.addr" semantic conventions.
-//
-// Deprecated: use `network.peer.address`.
-func NetSockPeerAddr(val string) attribute.KeyValue {
- return NetSockPeerAddrKey.String(val)
-}
-
-// NetSockPeerName returns an attribute KeyValue conforming to the
-// "net.sock.peer.name" semantic conventions.
-//
-// Deprecated: no replacement at this time.
-func NetSockPeerName(val string) attribute.KeyValue {
- return NetSockPeerNameKey.String(val)
-}
-
-// NetSockPeerPort returns an attribute KeyValue conforming to the
-// "net.sock.peer.port" semantic conventions.
-//
-// Deprecated: use `network.peer.port`.
-func NetSockPeerPort(val int) attribute.KeyValue {
- return NetSockPeerPortKey.Int(val)
-}
-
-// These attributes may be used to describe the receiver of a network
-// exchange/packet. These should be used when there is no client/server
-// relationship between the two sides, or when that relationship is unknown.
-// This covers low-level network interactions (e.g. packet tracing) where you
-// don't know if there was a connection or which side initiated it. This also
-// covers unidirectional UDP flows and peer-to-peer communication where the
-// "user-facing" surface of the protocol / API doesn't expose a clear notion of
-// client and server.
-const (
- // DestinationAddressKey is the attribute Key conforming to the
- // "destination.address" semantic conventions. It represents the
- // destination address - domain name if available without reverse DNS
- // lookup; otherwise, IP address or Unix domain socket name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock'
- // Note: When observed from the source side, and when communicating through
- // an intermediary, `destination.address` SHOULD represent the destination
- // address behind any intermediaries, for example proxies, if it's
- // available.
- DestinationAddressKey = attribute.Key("destination.address")
-
- // DestinationPortKey is the attribute Key conforming to the
- // "destination.port" semantic conventions. It represents the destination
- // port number
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 3389, 2888
- DestinationPortKey = attribute.Key("destination.port")
-)
-
-// DestinationAddress returns an attribute KeyValue conforming to the
-// "destination.address" semantic conventions. It represents the destination
-// address - domain name if available without reverse DNS lookup; otherwise, IP
-// address or Unix domain socket name.
-func DestinationAddress(val string) attribute.KeyValue {
- return DestinationAddressKey.String(val)
-}
-
-// DestinationPort returns an attribute KeyValue conforming to the
-// "destination.port" semantic conventions. It represents the destination port
-// number
-func DestinationPort(val int) attribute.KeyValue {
- return DestinationPortKey.Int(val)
-}
-
-// These attributes may be used for any disk related operation.
-const (
- // DiskIoDirectionKey is the attribute Key conforming to the
- // "disk.io.direction" semantic conventions. It represents the disk IO
- // operation direction.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'read'
- DiskIoDirectionKey = attribute.Key("disk.io.direction")
-)
-
-var (
- // read
- DiskIoDirectionRead = DiskIoDirectionKey.String("read")
- // write
- DiskIoDirectionWrite = DiskIoDirectionKey.String("write")
-)
-
-// The shared attributes used to report an error.
-const (
- // ErrorTypeKey is the attribute Key conforming to the "error.type"
- // semantic conventions. It represents the describes a class of error the
- // operation ended with.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'timeout', 'java.net.UnknownHostException',
- // 'server_certificate_invalid', '500'
- // Note: The `error.type` SHOULD be predictable and SHOULD have low
- // cardinality.
- // Instrumentations SHOULD document the list of errors they report.
- //
- // The cardinality of `error.type` within one instrumentation library
- // SHOULD be low.
- // Telemetry consumers that aggregate data from multiple instrumentation
- // libraries and applications
- // should be prepared for `error.type` to have high cardinality at query
- // time when no
- // additional filters are applied.
- //
- // If the operation has completed successfully, instrumentations SHOULD NOT
- // set `error.type`.
- //
- // If a specific domain defines its own set of error identifiers (such as
- // HTTP or gRPC status codes),
- // it's RECOMMENDED to:
- //
- // * Use a domain-specific attribute
- // * Set `error.type` to capture all errors, regardless of whether they are
- // defined within the domain-specific set or not.
- ErrorTypeKey = attribute.Key("error.type")
-)
-
-var (
- // A fallback error value to be used when the instrumentation doesn't define a custom value
- ErrorTypeOther = ErrorTypeKey.String("_OTHER")
-)
-
-// The shared attributes used to report a single exception associated with a
-// span or log.
-const (
- // ExceptionEscapedKey is the attribute Key conforming to the
- // "exception.escaped" semantic conventions. It represents the sHOULD be
- // set to true if the exception event is recorded at a point where it is
- // known that the exception is escaping the scope of the span.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: An exception is considered to have escaped (or left) the scope of
- // a span,
- // if that span is ended while the exception is still logically "in
- // flight".
- // This may be actually "in flight" in some languages (e.g. if the
- // exception
- // is passed to a Context manager's `__exit__` method in Python) but will
- // usually be caught at the point of recording the exception in most
- // languages.
- //
- // It is usually not possible to determine at the point where an exception
- // is thrown
- // whether it will escape the scope of a span.
- // However, it is trivial to know that an exception
- // will escape, if one checks for an active exception just before ending
- // the span,
- // as done in the [example for recording span
- // exceptions](#recording-an-exception).
- //
- // It follows that an exception may still escape the scope of the span
- // even if the `exception.escaped` attribute was not set or set to false,
- // since the event might have been recorded at a time where it was not
- // clear whether the exception will escape.
- ExceptionEscapedKey = attribute.Key("exception.escaped")
-
- // ExceptionMessageKey is the attribute Key conforming to the
- // "exception.message" semantic conventions. It represents the exception
- // message.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Division by zero', "Can't convert 'int' object to str
- // implicitly"
- ExceptionMessageKey = attribute.Key("exception.message")
-
- // ExceptionStacktraceKey is the attribute Key conforming to the
- // "exception.stacktrace" semantic conventions. It represents a stacktrace
- // as a string in the natural representation for the language runtime. The
- // representation is to be determined and documented by each language SIG.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
- // exception\\n at '
- // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
- // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
- // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
- ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
-
- // ExceptionTypeKey is the attribute Key conforming to the "exception.type"
- // semantic conventions. It represents the type of the exception (its
- // fully-qualified class name, if applicable). The dynamic type of the
- // exception should be preferred over the static type in languages that
- // support it.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'java.net.ConnectException', 'OSError'
- ExceptionTypeKey = attribute.Key("exception.type")
-)
-
-// ExceptionEscaped returns an attribute KeyValue conforming to the
-// "exception.escaped" semantic conventions. It represents the sHOULD be set to
-// true if the exception event is recorded at a point where it is known that
-// the exception is escaping the scope of the span.
-func ExceptionEscaped(val bool) attribute.KeyValue {
- return ExceptionEscapedKey.Bool(val)
-}
-
-// ExceptionMessage returns an attribute KeyValue conforming to the
-// "exception.message" semantic conventions. It represents the exception
-// message.
-func ExceptionMessage(val string) attribute.KeyValue {
- return ExceptionMessageKey.String(val)
-}
-
-// ExceptionStacktrace returns an attribute KeyValue conforming to the
-// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
-// string in the natural representation for the language runtime. The
-// representation is to be determined and documented by each language SIG.
-func ExceptionStacktrace(val string) attribute.KeyValue {
- return ExceptionStacktraceKey.String(val)
-}
-
-// ExceptionType returns an attribute KeyValue conforming to the
-// "exception.type" semantic conventions. It represents the type of the
-// exception (its fully-qualified class name, if applicable). The dynamic type
-// of the exception should be preferred over the static type in languages that
-// support it.
-func ExceptionType(val string) attribute.KeyValue {
- return ExceptionTypeKey.String(val)
-}
-
-// Semantic convention attributes in the HTTP namespace.
-const (
- // HTTPRequestBodySizeKey is the attribute Key conforming to the
- // "http.request.body.size" semantic conventions. It represents the size of
- // the request payload body in bytes. This is the number of bytes
- // transferred excluding headers and is often, but not always, present as
- // the
- // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
- // header. For requests using transport encoding, this should be the
- // compressed size.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 3495
- HTTPRequestBodySizeKey = attribute.Key("http.request.body.size")
-
- // HTTPRequestMethodKey is the attribute Key conforming to the
- // "http.request.method" semantic conventions. It represents the hTTP
- // request method.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'GET', 'POST', 'HEAD'
- // Note: HTTP request method value SHOULD be "known" to the
- // instrumentation.
- // By default, this convention defines "known" methods as the ones listed
- // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods)
- // and the PATCH method defined in
- // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html).
- //
- // If the HTTP request method is not known to instrumentation, it MUST set
- // the `http.request.method` attribute to `_OTHER`.
- //
- // If the HTTP instrumentation could end up converting valid HTTP request
- // methods to `_OTHER`, then it MUST provide a way to override
- // the list of known HTTP methods. If this override is done via environment
- // variable, then the environment variable MUST be named
- // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated
- // list of case-sensitive known HTTP methods
- // (this list MUST be a full override of the default known method, it is
- // not a list of known methods in addition to the defaults).
- //
- // HTTP method names are case-sensitive and `http.request.method` attribute
- // value MUST match a known HTTP method name exactly.
- // Instrumentations for specific web frameworks that consider HTTP methods
- // to be case insensitive, SHOULD populate a canonical equivalent.
- // Tracing instrumentations that do so, MUST also set
- // `http.request.method_original` to the original value.
- HTTPRequestMethodKey = attribute.Key("http.request.method")
-
- // HTTPRequestMethodOriginalKey is the attribute Key conforming to the
- // "http.request.method_original" semantic conventions. It represents the
- // original HTTP method sent by the client in the request line.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'GeT', 'ACL', 'foo'
- HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original")
-
- // HTTPRequestResendCountKey is the attribute Key conforming to the
- // "http.request.resend_count" semantic conventions. It represents the
- // ordinal number of request resending attempt (for any reason, including
- // redirects).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 3
- // Note: The resend count SHOULD be updated each time an HTTP request gets
- // resent by the client, regardless of what was the cause of the resending
- // (e.g. redirection, authorization failure, 503 Server Unavailable,
- // network issues, or any other).
- HTTPRequestResendCountKey = attribute.Key("http.request.resend_count")
-
- // HTTPResponseBodySizeKey is the attribute Key conforming to the
- // "http.response.body.size" semantic conventions. It represents the size
- // of the response payload body in bytes. This is the number of bytes
- // transferred excluding headers and is often, but not always, present as
- // the
- // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
- // header. For requests using transport encoding, this should be the
- // compressed size.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 3495
- HTTPResponseBodySizeKey = attribute.Key("http.response.body.size")
-
- // HTTPResponseStatusCodeKey is the attribute Key conforming to the
- // "http.response.status_code" semantic conventions. It represents the
- // [HTTP response status
- // code](https://tools.ietf.org/html/rfc7231#section-6).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 200
- HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code")
-
- // HTTPRouteKey is the attribute Key conforming to the "http.route"
- // semantic conventions. It represents the matched route, that is, the path
- // template in the format used by the respective server framework.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '/users/:userID?', '{controller}/{action}/{id?}'
- // Note: MUST NOT be populated when this is not supported by the HTTP
- // server framework as the route attribute should have low-cardinality and
- // the URI path can NOT substitute it.
- // SHOULD include the [application
- // root](/docs/http/http-spans.md#http-server-definitions) if there is one.
- HTTPRouteKey = attribute.Key("http.route")
-)
-
-var (
- // CONNECT method
- HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT")
- // DELETE method
- HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE")
- // GET method
- HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET")
- // HEAD method
- HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD")
- // OPTIONS method
- HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS")
- // PATCH method
- HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH")
- // POST method
- HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST")
- // PUT method
- HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT")
- // TRACE method
- HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE")
- // Any HTTP method that the instrumentation has no prior knowledge of
- HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER")
-)
-
-// HTTPRequestBodySize returns an attribute KeyValue conforming to the
-// "http.request.body.size" semantic conventions. It represents the size of the
-// request payload body in bytes. This is the number of bytes transferred
-// excluding headers and is often, but not always, present as the
-// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
-// header. For requests using transport encoding, this should be the compressed
-// size.
-func HTTPRequestBodySize(val int) attribute.KeyValue {
- return HTTPRequestBodySizeKey.Int(val)
-}
-
-// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the
-// "http.request.method_original" semantic conventions. It represents the
-// original HTTP method sent by the client in the request line.
-func HTTPRequestMethodOriginal(val string) attribute.KeyValue {
- return HTTPRequestMethodOriginalKey.String(val)
-}
-
-// HTTPRequestResendCount returns an attribute KeyValue conforming to the
-// "http.request.resend_count" semantic conventions. It represents the ordinal
-// number of request resending attempt (for any reason, including redirects).
-func HTTPRequestResendCount(val int) attribute.KeyValue {
- return HTTPRequestResendCountKey.Int(val)
-}
-
-// HTTPResponseBodySize returns an attribute KeyValue conforming to the
-// "http.response.body.size" semantic conventions. It represents the size of
-// the response payload body in bytes. This is the number of bytes transferred
-// excluding headers and is often, but not always, present as the
-// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
-// header. For requests using transport encoding, this should be the compressed
-// size.
-func HTTPResponseBodySize(val int) attribute.KeyValue {
- return HTTPResponseBodySizeKey.Int(val)
-}
-
-// HTTPResponseStatusCode returns an attribute KeyValue conforming to the
-// "http.response.status_code" semantic conventions. It represents the [HTTP
-// response status code](https://tools.ietf.org/html/rfc7231#section-6).
-func HTTPResponseStatusCode(val int) attribute.KeyValue {
- return HTTPResponseStatusCodeKey.Int(val)
-}
-
-// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
-// semantic conventions. It represents the matched route, that is, the path
-// template in the format used by the respective server framework.
-func HTTPRoute(val string) attribute.KeyValue {
- return HTTPRouteKey.String(val)
-}
-
-// Attributes describing telemetry around messaging systems and messaging
-// activities.
-const (
- // MessagingBatchMessageCountKey is the attribute Key conforming to the
- // "messaging.batch.message_count" semantic conventions. It represents the
- // number of messages sent, received, or processed in the scope of the
- // batching operation.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 0, 1, 2
- // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
- // spans that operate with a single message. When a messaging client
- // library supports both batch and single-message API for the same
- // operation, instrumentations SHOULD use `messaging.batch.message_count`
- // for batching APIs and SHOULD NOT use it for single-message APIs.
- MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
-
- // MessagingClientIDKey is the attribute Key conforming to the
- // "messaging.client_id" semantic conventions. It represents a unique
- // identifier for the client that consumes or produces a message.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'client-5', 'myhost@8742@s8083jm'
- MessagingClientIDKey = attribute.Key("messaging.client_id")
-
- // MessagingDestinationAnonymousKey is the attribute Key conforming to the
- // "messaging.destination.anonymous" semantic conventions. It represents a
- // boolean that is true if the message destination is anonymous (could be
- // unnamed or have auto-generated name).
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
-
- // MessagingDestinationNameKey is the attribute Key conforming to the
- // "messaging.destination.name" semantic conventions. It represents the
- // message destination name
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MyQueue', 'MyTopic'
- // Note: Destination name SHOULD uniquely identify a specific queue, topic
- // or other entity within the broker. If
- // the broker doesn't have such notion, the destination name SHOULD
- // uniquely identify the broker.
- MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
-
- // MessagingDestinationTemplateKey is the attribute Key conforming to the
- // "messaging.destination.template" semantic conventions. It represents the
- // low cardinality representation of the messaging destination name
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/customers/{customerID}'
- // Note: Destination names could be constructed from templates. An example
- // would be a destination name involving a user name or product id.
- // Although the destination name in this case is of high cardinality, the
- // underlying template is of low cardinality and can be effectively used
- // for grouping and aggregation.
- MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
-
- // MessagingDestinationTemporaryKey is the attribute Key conforming to the
- // "messaging.destination.temporary" semantic conventions. It represents a
- // boolean that is true if the message destination is temporary and might
- // not exist anymore after messages are processed.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
-
- // MessagingDestinationPublishAnonymousKey is the attribute Key conforming
- // to the "messaging.destination_publish.anonymous" semantic conventions.
- // It represents a boolean that is true if the publish message destination
- // is anonymous (could be unnamed or have auto-generated name).
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous")
-
- // MessagingDestinationPublishNameKey is the attribute Key conforming to
- // the "messaging.destination_publish.name" semantic conventions. It
- // represents the name of the original destination the message was
- // published to
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MyQueue', 'MyTopic'
- // Note: The name SHOULD uniquely identify a specific queue, topic, or
- // other entity within the broker. If
- // the broker doesn't have such notion, the original destination name
- // SHOULD uniquely identify the broker.
- MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name")
-
- // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming
- // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions.
- // It represents the ordering key for a given message. If the attribute is
- // not present, the message does not have an ordering key.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'ordering_key'
- MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key")
-
- // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the
- // "messaging.kafka.consumer.group" semantic conventions. It represents the
- // name of the Kafka Consumer Group that is handling the message. Only
- // applies to consumers, not producers.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'my-group'
- MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group")
-
- // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to
- // the "messaging.kafka.destination.partition" semantic conventions. It
- // represents the partition the message is sent to.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 2
- MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition")
-
- // MessagingKafkaMessageKeyKey is the attribute Key conforming to the
- // "messaging.kafka.message.key" semantic conventions. It represents the
- // message keys in Kafka are used for grouping alike messages to ensure
- // they're processed on the same partition. They differ from
- // `messaging.message.id` in that they're not unique. If the key is `null`,
- // the attribute MUST NOT be set.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myKey'
- // Note: If the key type is not string, it's string representation has to
- // be supplied for the attribute. If the key has no unambiguous, canonical
- // string form, don't include its value.
- MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
-
- // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the
- // "messaging.kafka.message.offset" semantic conventions. It represents the
- // offset of a record in the corresponding Kafka partition.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 42
- MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset")
-
- // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
- // "messaging.kafka.message.tombstone" semantic conventions. It represents
- // a boolean that is true if the message is a tombstone.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
-
- // MessagingMessageBodySizeKey is the attribute Key conforming to the
- // "messaging.message.body.size" semantic conventions. It represents the
- // size of the message body in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1439
- // Note: This can refer to both the compressed or uncompressed body size.
- // If both sizes are known, the uncompressed
- // body size should be used.
- MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size")
-
- // MessagingMessageConversationIDKey is the attribute Key conforming to the
- // "messaging.message.conversation_id" semantic conventions. It represents
- // the conversation ID identifying the conversation to which the message
- // belongs, represented as a string. Sometimes called "Correlation ID".
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MyConversationID'
- MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
-
- // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the
- // "messaging.message.envelope.size" semantic conventions. It represents
- // the size of the message body and metadata in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 2738
- // Note: This can refer to both the compressed or uncompressed size. If
- // both sizes are known, the uncompressed
- // size should be used.
- MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size")
-
- // MessagingMessageIDKey is the attribute Key conforming to the
- // "messaging.message.id" semantic conventions. It represents a value used
- // by the messaging system as an identifier for the message, represented as
- // a string.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '452a7c7c7c7048c2f887f61572b18fc2'
- MessagingMessageIDKey = attribute.Key("messaging.message.id")
-
- // MessagingOperationKey is the attribute Key conforming to the
- // "messaging.operation" semantic conventions. It represents a string
- // identifying the kind of messaging operation.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: If a custom value is used, it MUST be of low cardinality.
- MessagingOperationKey = attribute.Key("messaging.operation")
-
- // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key
- // conforming to the "messaging.rabbitmq.destination.routing_key" semantic
- // conventions. It represents the rabbitMQ message routing key.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myKey'
- MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
-
- // MessagingRocketmqClientGroupKey is the attribute Key conforming to the
- // "messaging.rocketmq.client_group" semantic conventions. It represents
- // the name of the RocketMQ producer/consumer group that is handling the
- // message. The client type is identified by the SpanKind.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myConsumerGroup'
- MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
-
- // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to
- // the "messaging.rocketmq.consumption_model" semantic conventions. It
- // represents the model of message consumption. This only applies to
- // consumer spans.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
-
- // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key
- // conforming to the "messaging.rocketmq.message.delay_time_level" semantic
- // conventions. It represents the delay time level for delay message, which
- // determines the message delay time.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 3
- MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
-
- // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key
- // conforming to the "messaging.rocketmq.message.delivery_timestamp"
- // semantic conventions. It represents the timestamp in milliseconds that
- // the delay message is expected to be delivered to consumer.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1665987217045
- MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
-
- // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.group" semantic conventions. It represents
- // the it is essential for FIFO message. Messages that belong to the same
- // message group are always processed one by one within the same consumer
- // group.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myMessageGroup'
- MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
-
- // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.keys" semantic conventions. It represents
- // the key(s) of message, another way to mark message besides message id.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'keyA', 'keyB'
- MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
-
- // MessagingRocketmqMessageTagKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.tag" semantic conventions. It represents the
- // secondary classifier of message besides topic.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'tagA'
- MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
-
- // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.type" semantic conventions. It represents
- // the type of message.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
-
- // MessagingRocketmqNamespaceKey is the attribute Key conforming to the
- // "messaging.rocketmq.namespace" semantic conventions. It represents the
- // namespace of RocketMQ resources, resources in different namespaces are
- // individual.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myNamespace'
- MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
-
- // MessagingSystemKey is the attribute Key conforming to the
- // "messaging.system" semantic conventions. It represents an identifier for
- // the messaging system being used. See below for a list of well-known
- // identifiers.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingSystemKey = attribute.Key("messaging.system")
-)
-
-var (
- // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created
- MessagingOperationPublish = MessagingOperationKey.String("publish")
- // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios
- MessagingOperationCreate = MessagingOperationKey.String("create")
- // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages
- MessagingOperationReceive = MessagingOperationKey.String("receive")
- // One or more messages are passed to a consumer. This operation refers to push-based scenarios, where consumer register callbacks which get called by messaging SDKs
- MessagingOperationDeliver = MessagingOperationKey.String("deliver")
-)
-
-var (
- // Clustering consumption model
- MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
- // Broadcasting consumption model
- MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
-)
-
-var (
- // Normal message
- MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
- // FIFO message
- MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
- // Delay message
- MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
- // Transaction message
- MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
-)
-
-var (
- // Apache ActiveMQ
- MessagingSystemActivemq = MessagingSystemKey.String("activemq")
- // Amazon Simple Queue Service (SQS)
- MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs")
- // Azure Event Grid
- MessagingSystemAzureEventgrid = MessagingSystemKey.String("azure_eventgrid")
- // Azure Event Hubs
- MessagingSystemAzureEventhubs = MessagingSystemKey.String("azure_eventhubs")
- // Azure Service Bus
- MessagingSystemAzureServicebus = MessagingSystemKey.String("azure_servicebus")
- // Google Cloud Pub/Sub
- MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub")
- // Java Message Service
- MessagingSystemJms = MessagingSystemKey.String("jms")
- // Apache Kafka
- MessagingSystemKafka = MessagingSystemKey.String("kafka")
- // RabbitMQ
- MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq")
- // Apache RocketMQ
- MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq")
-)
-
-// MessagingBatchMessageCount returns an attribute KeyValue conforming to
-// the "messaging.batch.message_count" semantic conventions. It represents the
-// number of messages sent, received, or processed in the scope of the batching
-// operation.
-func MessagingBatchMessageCount(val int) attribute.KeyValue {
- return MessagingBatchMessageCountKey.Int(val)
-}
-
-// MessagingClientID returns an attribute KeyValue conforming to the
-// "messaging.client_id" semantic conventions. It represents a unique
-// identifier for the client that consumes or produces a message.
-func MessagingClientID(val string) attribute.KeyValue {
- return MessagingClientIDKey.String(val)
-}
-
-// MessagingDestinationAnonymous returns an attribute KeyValue conforming to
-// the "messaging.destination.anonymous" semantic conventions. It represents a
-// boolean that is true if the message destination is anonymous (could be
-// unnamed or have auto-generated name).
-func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
- return MessagingDestinationAnonymousKey.Bool(val)
-}
-
-// MessagingDestinationName returns an attribute KeyValue conforming to the
-// "messaging.destination.name" semantic conventions. It represents the message
-// destination name
-func MessagingDestinationName(val string) attribute.KeyValue {
- return MessagingDestinationNameKey.String(val)
-}
-
-// MessagingDestinationTemplate returns an attribute KeyValue conforming to
-// the "messaging.destination.template" semantic conventions. It represents the
-// low cardinality representation of the messaging destination name
-func MessagingDestinationTemplate(val string) attribute.KeyValue {
- return MessagingDestinationTemplateKey.String(val)
-}
-
-// MessagingDestinationTemporary returns an attribute KeyValue conforming to
-// the "messaging.destination.temporary" semantic conventions. It represents a
-// boolean that is true if the message destination is temporary and might not
-// exist anymore after messages are processed.
-func MessagingDestinationTemporary(val bool) attribute.KeyValue {
- return MessagingDestinationTemporaryKey.Bool(val)
-}
-
-// MessagingDestinationPublishAnonymous returns an attribute KeyValue
-// conforming to the "messaging.destination_publish.anonymous" semantic
-// conventions. It represents a boolean that is true if the publish message
-// destination is anonymous (could be unnamed or have auto-generated name).
-func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue {
- return MessagingDestinationPublishAnonymousKey.Bool(val)
-}
-
-// MessagingDestinationPublishName returns an attribute KeyValue conforming
-// to the "messaging.destination_publish.name" semantic conventions. It
-// represents the name of the original destination the message was published to
-func MessagingDestinationPublishName(val string) attribute.KeyValue {
- return MessagingDestinationPublishNameKey.String(val)
-}
-
-// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue
-// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic
-// conventions. It represents the ordering key for a given message. If the
-// attribute is not present, the message does not have an ordering key.
-func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue {
- return MessagingGCPPubsubMessageOrderingKeyKey.String(val)
-}
-
-// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to
-// the "messaging.kafka.consumer.group" semantic conventions. It represents the
-// name of the Kafka Consumer Group that is handling the message. Only applies
-// to consumers, not producers.
-func MessagingKafkaConsumerGroup(val string) attribute.KeyValue {
- return MessagingKafkaConsumerGroupKey.String(val)
-}
-
-// MessagingKafkaDestinationPartition returns an attribute KeyValue
-// conforming to the "messaging.kafka.destination.partition" semantic
-// conventions. It represents the partition the message is sent to.
-func MessagingKafkaDestinationPartition(val int) attribute.KeyValue {
- return MessagingKafkaDestinationPartitionKey.Int(val)
-}
-
-// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
-// "messaging.kafka.message.key" semantic conventions. It represents the
-// message keys in Kafka are used for grouping alike messages to ensure they're
-// processed on the same partition. They differ from `messaging.message.id` in
-// that they're not unique. If the key is `null`, the attribute MUST NOT be
-// set.
-func MessagingKafkaMessageKey(val string) attribute.KeyValue {
- return MessagingKafkaMessageKeyKey.String(val)
-}
-
-// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to
-// the "messaging.kafka.message.offset" semantic conventions. It represents the
-// offset of a record in the corresponding Kafka partition.
-func MessagingKafkaMessageOffset(val int) attribute.KeyValue {
- return MessagingKafkaMessageOffsetKey.Int(val)
-}
-
-// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming
-// to the "messaging.kafka.message.tombstone" semantic conventions. It
-// represents a boolean that is true if the message is a tombstone.
-func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
- return MessagingKafkaMessageTombstoneKey.Bool(val)
-}
-
-// MessagingMessageBodySize returns an attribute KeyValue conforming to the
-// "messaging.message.body.size" semantic conventions. It represents the size
-// of the message body in bytes.
-func MessagingMessageBodySize(val int) attribute.KeyValue {
- return MessagingMessageBodySizeKey.Int(val)
-}
-
-// MessagingMessageConversationID returns an attribute KeyValue conforming
-// to the "messaging.message.conversation_id" semantic conventions. It
-// represents the conversation ID identifying the conversation to which the
-// message belongs, represented as a string. Sometimes called "Correlation ID".
-func MessagingMessageConversationID(val string) attribute.KeyValue {
- return MessagingMessageConversationIDKey.String(val)
-}
-
-// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to
-// the "messaging.message.envelope.size" semantic conventions. It represents
-// the size of the message body and metadata in bytes.
-func MessagingMessageEnvelopeSize(val int) attribute.KeyValue {
- return MessagingMessageEnvelopeSizeKey.Int(val)
-}
-
-// MessagingMessageID returns an attribute KeyValue conforming to the
-// "messaging.message.id" semantic conventions. It represents a value used by
-// the messaging system as an identifier for the message, represented as a
-// string.
-func MessagingMessageID(val string) attribute.KeyValue {
- return MessagingMessageIDKey.String(val)
-}
-
-// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue
-// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
-// conventions. It represents the rabbitMQ message routing key.
-func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue {
- return MessagingRabbitmqDestinationRoutingKeyKey.String(val)
-}
-
-// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.client_group" semantic conventions. It represents
-// the name of the RocketMQ producer/consumer group that is handling the
-// message. The client type is identified by the SpanKind.
-func MessagingRocketmqClientGroup(val string) attribute.KeyValue {
- return MessagingRocketmqClientGroupKey.String(val)
-}
-
-// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue
-// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
-// conventions. It represents the delay time level for delay message, which
-// determines the message delay time.
-func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue {
- return MessagingRocketmqMessageDelayTimeLevelKey.Int(val)
-}
-
-// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue
-// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
-// conventions. It represents the timestamp in milliseconds that the delay
-// message is expected to be delivered to consumer.
-func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue {
- return MessagingRocketmqMessageDeliveryTimestampKey.Int(val)
-}
-
-// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.message.group" semantic conventions. It represents
-// the it is essential for FIFO message. Messages that belong to the same
-// message group are always processed one by one within the same consumer
-// group.
-func MessagingRocketmqMessageGroup(val string) attribute.KeyValue {
- return MessagingRocketmqMessageGroupKey.String(val)
-}
-
-// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.message.keys" semantic conventions. It represents
-// the key(s) of message, another way to mark message besides message id.
-func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue {
- return MessagingRocketmqMessageKeysKey.StringSlice(val)
-}
-
-// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.message.tag" semantic conventions. It represents the
-// secondary classifier of message besides topic.
-func MessagingRocketmqMessageTag(val string) attribute.KeyValue {
- return MessagingRocketmqMessageTagKey.String(val)
-}
-
-// MessagingRocketmqNamespace returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.namespace" semantic conventions. It represents the
-// namespace of RocketMQ resources, resources in different namespaces are
-// individual.
-func MessagingRocketmqNamespace(val string) attribute.KeyValue {
- return MessagingRocketmqNamespaceKey.String(val)
-}
-
-// These attributes may be used for any network related operation.
-const (
- // NetworkCarrierIccKey is the attribute Key conforming to the
- // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
- // alpha-2 2-character country code associated with the mobile carrier
- // network.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'DE'
- NetworkCarrierIccKey = attribute.Key("network.carrier.icc")
-
- // NetworkCarrierMccKey is the attribute Key conforming to the
- // "network.carrier.mcc" semantic conventions. It represents the mobile
- // carrier country code.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '310'
- NetworkCarrierMccKey = attribute.Key("network.carrier.mcc")
-
- // NetworkCarrierMncKey is the attribute Key conforming to the
- // "network.carrier.mnc" semantic conventions. It represents the mobile
- // carrier network code.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '001'
- NetworkCarrierMncKey = attribute.Key("network.carrier.mnc")
-
- // NetworkCarrierNameKey is the attribute Key conforming to the
- // "network.carrier.name" semantic conventions. It represents the name of
- // the mobile carrier.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'sprint'
- NetworkCarrierNameKey = attribute.Key("network.carrier.name")
-
- // NetworkConnectionSubtypeKey is the attribute Key conforming to the
- // "network.connection.subtype" semantic conventions. It represents the
- // this describes more details regarding the connection.type. It may be the
- // type of cell technology connection, but it could be used for describing
- // details about a wifi connection.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'LTE'
- NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype")
-
- // NetworkConnectionTypeKey is the attribute Key conforming to the
- // "network.connection.type" semantic conventions. It represents the
- // internet connection type.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'wifi'
- NetworkConnectionTypeKey = attribute.Key("network.connection.type")
-
- // NetworkIoDirectionKey is the attribute Key conforming to the
- // "network.io.direction" semantic conventions. It represents the network
- // IO operation direction.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'transmit'
- NetworkIoDirectionKey = attribute.Key("network.io.direction")
-
- // NetworkLocalAddressKey is the attribute Key conforming to the
- // "network.local.address" semantic conventions. It represents the local
- // address of the network connection - IP address or Unix domain socket
- // name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '10.1.2.80', '/tmp/my.sock'
- NetworkLocalAddressKey = attribute.Key("network.local.address")
-
- // NetworkLocalPortKey is the attribute Key conforming to the
- // "network.local.port" semantic conventions. It represents the local port
- // number of the network connection.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 65123
- NetworkLocalPortKey = attribute.Key("network.local.port")
-
- // NetworkPeerAddressKey is the attribute Key conforming to the
- // "network.peer.address" semantic conventions. It represents the peer
- // address of the network connection - IP address or Unix domain socket
- // name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '10.1.2.80', '/tmp/my.sock'
- NetworkPeerAddressKey = attribute.Key("network.peer.address")
-
- // NetworkPeerPortKey is the attribute Key conforming to the
- // "network.peer.port" semantic conventions. It represents the peer port
- // number of the network connection.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 65123
- NetworkPeerPortKey = attribute.Key("network.peer.port")
-
- // NetworkProtocolNameKey is the attribute Key conforming to the
- // "network.protocol.name" semantic conventions. It represents the [OSI
- // application layer](https://osi-model.com/application-layer/) or non-OSI
- // equivalent.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'amqp', 'http', 'mqtt'
- // Note: The value SHOULD be normalized to lowercase.
- NetworkProtocolNameKey = attribute.Key("network.protocol.name")
-
- // NetworkProtocolVersionKey is the attribute Key conforming to the
- // "network.protocol.version" semantic conventions. It represents the
- // version of the protocol specified in `network.protocol.name`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '3.1.1'
- // Note: `network.protocol.version` refers to the version of the protocol
- // used and might be different from the protocol client's version. If the
- // HTTP client has a version of `0.27.2`, but sends HTTP version `1.1`,
- // this attribute should be set to `1.1`.
- NetworkProtocolVersionKey = attribute.Key("network.protocol.version")
-
- // NetworkTransportKey is the attribute Key conforming to the
- // "network.transport" semantic conventions. It represents the [OSI
- // transport layer](https://osi-model.com/transport-layer/) or
- // [inter-process communication
- // method](https://wikipedia.org/wiki/Inter-process_communication).
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'tcp', 'udp'
- // Note: The value SHOULD be normalized to lowercase.
- //
- // Consider always setting the transport when setting a port number, since
- // a port number is ambiguous without knowing the transport. For example
- // different processes could be listening on TCP port 12345 and UDP port
- // 12345.
- NetworkTransportKey = attribute.Key("network.transport")
-
- // NetworkTypeKey is the attribute Key conforming to the "network.type"
- // semantic conventions. It represents the [OSI network
- // layer](https://osi-model.com/network-layer/) or non-OSI equivalent.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'ipv4', 'ipv6'
- // Note: The value SHOULD be normalized to lowercase.
- NetworkTypeKey = attribute.Key("network.type")
-)
-
-var (
- // GPRS
- NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs")
- // EDGE
- NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge")
- // UMTS
- NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts")
- // CDMA
- NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma")
- // EVDO Rel. 0
- NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0")
- // EVDO Rev. A
- NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a")
- // CDMA2000 1XRTT
- NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt")
- // HSDPA
- NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa")
- // HSUPA
- NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa")
- // HSPA
- NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa")
- // IDEN
- NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden")
- // EVDO Rev. B
- NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b")
- // LTE
- NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte")
- // EHRPD
- NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd")
- // HSPAP
- NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap")
- // GSM
- NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm")
- // TD-SCDMA
- NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma")
- // IWLAN
- NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan")
- // 5G NR (New Radio)
- NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr")
- // 5G NRNSA (New Radio Non-Standalone)
- NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa")
- // LTE CA
- NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca")
-)
-
-var (
- // wifi
- NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi")
- // wired
- NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired")
- // cell
- NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell")
- // unavailable
- NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable")
- // unknown
- NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown")
-)
-
-var (
- // transmit
- NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit")
- // receive
- NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive")
-)
-
-var (
- // TCP
- NetworkTransportTCP = NetworkTransportKey.String("tcp")
- // UDP
- NetworkTransportUDP = NetworkTransportKey.String("udp")
- // Named or anonymous pipe
- NetworkTransportPipe = NetworkTransportKey.String("pipe")
- // Unix domain socket
- NetworkTransportUnix = NetworkTransportKey.String("unix")
-)
-
-var (
- // IPv4
- NetworkTypeIpv4 = NetworkTypeKey.String("ipv4")
- // IPv6
- NetworkTypeIpv6 = NetworkTypeKey.String("ipv6")
-)
-
-// NetworkCarrierIcc returns an attribute KeyValue conforming to the
-// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
-// alpha-2 2-character country code associated with the mobile carrier network.
-func NetworkCarrierIcc(val string) attribute.KeyValue {
- return NetworkCarrierIccKey.String(val)
-}
-
-// NetworkCarrierMcc returns an attribute KeyValue conforming to the
-// "network.carrier.mcc" semantic conventions. It represents the mobile carrier
-// country code.
-func NetworkCarrierMcc(val string) attribute.KeyValue {
- return NetworkCarrierMccKey.String(val)
-}
-
-// NetworkCarrierMnc returns an attribute KeyValue conforming to the
-// "network.carrier.mnc" semantic conventions. It represents the mobile carrier
-// network code.
-func NetworkCarrierMnc(val string) attribute.KeyValue {
- return NetworkCarrierMncKey.String(val)
-}
-
-// NetworkCarrierName returns an attribute KeyValue conforming to the
-// "network.carrier.name" semantic conventions. It represents the name of the
-// mobile carrier.
-func NetworkCarrierName(val string) attribute.KeyValue {
- return NetworkCarrierNameKey.String(val)
-}
-
-// NetworkLocalAddress returns an attribute KeyValue conforming to the
-// "network.local.address" semantic conventions. It represents the local
-// address of the network connection - IP address or Unix domain socket name.
-func NetworkLocalAddress(val string) attribute.KeyValue {
- return NetworkLocalAddressKey.String(val)
-}
-
-// NetworkLocalPort returns an attribute KeyValue conforming to the
-// "network.local.port" semantic conventions. It represents the local port
-// number of the network connection.
-func NetworkLocalPort(val int) attribute.KeyValue {
- return NetworkLocalPortKey.Int(val)
-}
-
-// NetworkPeerAddress returns an attribute KeyValue conforming to the
-// "network.peer.address" semantic conventions. It represents the peer address
-// of the network connection - IP address or Unix domain socket name.
-func NetworkPeerAddress(val string) attribute.KeyValue {
- return NetworkPeerAddressKey.String(val)
-}
-
-// NetworkPeerPort returns an attribute KeyValue conforming to the
-// "network.peer.port" semantic conventions. It represents the peer port number
-// of the network connection.
-func NetworkPeerPort(val int) attribute.KeyValue {
- return NetworkPeerPortKey.Int(val)
-}
-
-// NetworkProtocolName returns an attribute KeyValue conforming to the
-// "network.protocol.name" semantic conventions. It represents the [OSI
-// application layer](https://osi-model.com/application-layer/) or non-OSI
-// equivalent.
-func NetworkProtocolName(val string) attribute.KeyValue {
- return NetworkProtocolNameKey.String(val)
-}
-
-// NetworkProtocolVersion returns an attribute KeyValue conforming to the
-// "network.protocol.version" semantic conventions. It represents the version
-// of the protocol specified in `network.protocol.name`.
-func NetworkProtocolVersion(val string) attribute.KeyValue {
- return NetworkProtocolVersionKey.String(val)
-}
-
-// Attributes for remote procedure calls.
-const (
- // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the
- // "rpc.connect_rpc.error_code" semantic conventions. It represents the
- // [error codes](https://connect.build/docs/protocol/#error-codes) of the
- // Connect request. Error codes are always string values.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code")
-
- // RPCGRPCStatusCodeKey is the attribute Key conforming to the
- // "rpc.grpc.status_code" semantic conventions. It represents the [numeric
- // status
- // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of
- // the gRPC request.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
-
- // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the
- // "rpc.jsonrpc.error_code" semantic conventions. It represents the
- // `error.code` property of response if it is an error response.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: -32700, 100
- RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
-
- // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the
- // "rpc.jsonrpc.error_message" semantic conventions. It represents the
- // `error.message` property of response if it is an error response.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Parse error', 'User already exists'
- RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
-
- // RPCJsonrpcRequestIDKey is the attribute Key conforming to the
- // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
- // property of request or response. Since protocol allows id to be int,
- // string, `null` or missing (for notifications), value is expected to be
- // cast to string for simplicity. Use empty string in case of `null` value.
- // Omit entirely if this is a notification.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '10', 'request-7', ''
- RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
-
- // RPCJsonrpcVersionKey is the attribute Key conforming to the
- // "rpc.jsonrpc.version" semantic conventions. It represents the protocol
- // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
- // doesn't specify this, the value can be omitted.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2.0', '1.0'
- RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
-
- // RPCMethodKey is the attribute Key conforming to the "rpc.method"
- // semantic conventions. It represents the name of the (logical) method
- // being called, must be equal to the $method part in the span name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'exampleMethod'
- // Note: This is the logical name of the method from the RPC interface
- // perspective, which can be different from the name of any implementing
- // method/function. The `code.function` attribute may be used to store the
- // latter (e.g., method actually executing the call on the server side, RPC
- // client stub method on the client side).
- RPCMethodKey = attribute.Key("rpc.method")
-
- // RPCServiceKey is the attribute Key conforming to the "rpc.service"
- // semantic conventions. It represents the full (logical) name of the
- // service being called, including its package name, if applicable.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myservice.EchoService'
- // Note: This is the logical name of the service from the RPC interface
- // perspective, which can be different from the name of any implementing
- // class. The `code.namespace` attribute may be used to store the latter
- // (despite the attribute name, it may include a class name; e.g., class
- // with method actually executing the call on the server side, RPC client
- // stub class on the client side).
- RPCServiceKey = attribute.Key("rpc.service")
-
- // RPCSystemKey is the attribute Key conforming to the "rpc.system"
- // semantic conventions. It represents a string identifying the remoting
- // system. See below for a list of well-known identifiers.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- RPCSystemKey = attribute.Key("rpc.system")
-)
-
-var (
- // cancelled
- RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled")
- // unknown
- RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown")
- // invalid_argument
- RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument")
- // deadline_exceeded
- RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded")
- // not_found
- RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found")
- // already_exists
- RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists")
- // permission_denied
- RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied")
- // resource_exhausted
- RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted")
- // failed_precondition
- RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition")
- // aborted
- RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted")
- // out_of_range
- RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range")
- // unimplemented
- RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented")
- // internal
- RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal")
- // unavailable
- RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable")
- // data_loss
- RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss")
- // unauthenticated
- RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated")
-)
-
-var (
- // OK
- RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
- // CANCELLED
- RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
- // UNKNOWN
- RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
- // INVALID_ARGUMENT
- RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
- // DEADLINE_EXCEEDED
- RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
- // NOT_FOUND
- RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
- // ALREADY_EXISTS
- RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
- // PERMISSION_DENIED
- RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
- // RESOURCE_EXHAUSTED
- RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
- // FAILED_PRECONDITION
- RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
- // ABORTED
- RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
- // OUT_OF_RANGE
- RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
- // UNIMPLEMENTED
- RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
- // INTERNAL
- RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
- // UNAVAILABLE
- RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
- // DATA_LOSS
- RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
- // UNAUTHENTICATED
- RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
-)
-
-var (
- // gRPC
- RPCSystemGRPC = RPCSystemKey.String("grpc")
- // Java RMI
- RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
- // .NET WCF
- RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
- // Apache Dubbo
- RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
- // Connect RPC
- RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc")
-)
-
-// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.error_code" semantic conventions. It represents the
-// `error.code` property of response if it is an error response.
-func RPCJsonrpcErrorCode(val int) attribute.KeyValue {
- return RPCJsonrpcErrorCodeKey.Int(val)
-}
-
-// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.error_message" semantic conventions. It represents the
-// `error.message` property of response if it is an error response.
-func RPCJsonrpcErrorMessage(val string) attribute.KeyValue {
- return RPCJsonrpcErrorMessageKey.String(val)
-}
-
-// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
-// property of request or response. Since protocol allows id to be int, string,
-// `null` or missing (for notifications), value is expected to be cast to
-// string for simplicity. Use empty string in case of `null` value. Omit
-// entirely if this is a notification.
-func RPCJsonrpcRequestID(val string) attribute.KeyValue {
- return RPCJsonrpcRequestIDKey.String(val)
-}
-
-// RPCJsonrpcVersion returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
-// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
-// doesn't specify this, the value can be omitted.
-func RPCJsonrpcVersion(val string) attribute.KeyValue {
- return RPCJsonrpcVersionKey.String(val)
-}
-
-// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
-// semantic conventions. It represents the name of the (logical) method being
-// called, must be equal to the $method part in the span name.
-func RPCMethod(val string) attribute.KeyValue {
- return RPCMethodKey.String(val)
-}
-
-// RPCService returns an attribute KeyValue conforming to the "rpc.service"
-// semantic conventions. It represents the full (logical) name of the service
-// being called, including its package name, if applicable.
-func RPCService(val string) attribute.KeyValue {
- return RPCServiceKey.String(val)
-}
-
-// These attributes may be used to describe the server in a connection-based
-// network interaction where there is one side that initiates the connection
-// (the client is the side that initiates the connection). This covers all TCP
-// network interactions since TCP is connection-based and one side initiates
-// the connection (an exception is made for peer-to-peer communication over TCP
-// where the "user-facing" surface of the protocol / API doesn't expose a clear
-// notion of client and server). This also covers UDP network interactions
-// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS.
-const (
- // ServerAddressKey is the attribute Key conforming to the "server.address"
- // semantic conventions. It represents the server domain name if available
- // without reverse DNS lookup; otherwise, IP address or Unix domain socket
- // name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock'
- // Note: When observed from the client side, and when communicating through
- // an intermediary, `server.address` SHOULD represent the server address
- // behind any intermediaries, for example proxies, if it's available.
- ServerAddressKey = attribute.Key("server.address")
-
- // ServerPortKey is the attribute Key conforming to the "server.port"
- // semantic conventions. It represents the server port number.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 80, 8080, 443
- // Note: When observed from the client side, and when communicating through
- // an intermediary, `server.port` SHOULD represent the server port behind
- // any intermediaries, for example proxies, if it's available.
- ServerPortKey = attribute.Key("server.port")
-)
-
-// ServerAddress returns an attribute KeyValue conforming to the
-// "server.address" semantic conventions. It represents the server domain name
-// if available without reverse DNS lookup; otherwise, IP address or Unix
-// domain socket name.
-func ServerAddress(val string) attribute.KeyValue {
- return ServerAddressKey.String(val)
-}
-
-// ServerPort returns an attribute KeyValue conforming to the "server.port"
-// semantic conventions. It represents the server port number.
-func ServerPort(val int) attribute.KeyValue {
- return ServerPortKey.Int(val)
-}
-
-// These attributes may be used to describe the sender of a network
-// exchange/packet. These should be used when there is no client/server
-// relationship between the two sides, or when that relationship is unknown.
-// This covers low-level network interactions (e.g. packet tracing) where you
-// don't know if there was a connection or which side initiated it. This also
-// covers unidirectional UDP flows and peer-to-peer communication where the
-// "user-facing" surface of the protocol / API doesn't expose a clear notion of
-// client and server.
-const (
- // SourceAddressKey is the attribute Key conforming to the "source.address"
- // semantic conventions. It represents the source address - domain name if
- // available without reverse DNS lookup; otherwise, IP address or Unix
- // domain socket name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock'
- // Note: When observed from the destination side, and when communicating
- // through an intermediary, `source.address` SHOULD represent the source
- // address behind any intermediaries, for example proxies, if it's
- // available.
- SourceAddressKey = attribute.Key("source.address")
-
- // SourcePortKey is the attribute Key conforming to the "source.port"
- // semantic conventions. It represents the source port number
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 3389, 2888
- SourcePortKey = attribute.Key("source.port")
-)
-
-// SourceAddress returns an attribute KeyValue conforming to the
-// "source.address" semantic conventions. It represents the source address -
-// domain name if available without reverse DNS lookup; otherwise, IP address
-// or Unix domain socket name.
-func SourceAddress(val string) attribute.KeyValue {
- return SourceAddressKey.String(val)
-}
-
-// SourcePort returns an attribute KeyValue conforming to the "source.port"
-// semantic conventions. It represents the source port number
-func SourcePort(val int) attribute.KeyValue {
- return SourcePortKey.Int(val)
-}
-
-// Semantic convention attributes in the TLS namespace.
-const (
- // TLSCipherKey is the attribute Key conforming to the "tls.cipher"
- // semantic conventions. It represents the string indicating the
- // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5)
- // used during the current connection.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA',
- // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256'
- // Note: The values allowed for `tls.cipher` MUST be one of the
- // `Descriptions` of the [registered TLS Cipher
- // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4).
- TLSCipherKey = attribute.Key("tls.cipher")
-
- // TLSClientCertificateKey is the attribute Key conforming to the
- // "tls.client.certificate" semantic conventions. It represents the
- // pEM-encoded stand-alone certificate offered by the client. This is
- // usually mutually-exclusive of `client.certificate_chain` since this
- // value also exists in that list.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MII...'
- TLSClientCertificateKey = attribute.Key("tls.client.certificate")
-
- // TLSClientCertificateChainKey is the attribute Key conforming to the
- // "tls.client.certificate_chain" semantic conventions. It represents the
- // array of PEM-encoded certificates that make up the certificate chain
- // offered by the client. This is usually mutually-exclusive of
- // `client.certificate` since that value should be the first certificate in
- // the chain.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MII...', 'MI...'
- TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain")
-
- // TLSClientHashMd5Key is the attribute Key conforming to the
- // "tls.client.hash.md5" semantic conventions. It represents the
- // certificate fingerprint using the MD5 digest of DER-encoded version of
- // certificate offered by the client. For consistency with other hash
- // values, this value should be formatted as an uppercase hash.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC'
- TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5")
-
- // TLSClientHashSha1Key is the attribute Key conforming to the
- // "tls.client.hash.sha1" semantic conventions. It represents the
- // certificate fingerprint using the SHA1 digest of DER-encoded version of
- // certificate offered by the client. For consistency with other hash
- // values, this value should be formatted as an uppercase hash.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A'
- TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1")
-
- // TLSClientHashSha256Key is the attribute Key conforming to the
- // "tls.client.hash.sha256" semantic conventions. It represents the
- // certificate fingerprint using the SHA256 digest of DER-encoded version
- // of certificate offered by the client. For consistency with other hash
- // values, this value should be formatted as an uppercase hash.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0'
- TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256")
-
- // TLSClientIssuerKey is the attribute Key conforming to the
- // "tls.client.issuer" semantic conventions. It represents the
- // distinguished name of
- // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6)
- // of the issuer of the x.509 certificate presented by the client.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example,
- // DC=com'
- TLSClientIssuerKey = attribute.Key("tls.client.issuer")
-
- // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3"
- // semantic conventions. It represents a hash that identifies clients based
- // on how they perform an SSL/TLS handshake.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'd4e5b18d6b55c71272893221c96ba240'
- TLSClientJa3Key = attribute.Key("tls.client.ja3")
-
- // TLSClientNotAfterKey is the attribute Key conforming to the
- // "tls.client.not_after" semantic conventions. It represents the date/Time
- // indicating when client certificate is no longer considered valid.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2021-01-01T00:00:00.000Z'
- TLSClientNotAfterKey = attribute.Key("tls.client.not_after")
-
- // TLSClientNotBeforeKey is the attribute Key conforming to the
- // "tls.client.not_before" semantic conventions. It represents the
- // date/Time indicating when client certificate is first considered valid.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1970-01-01T00:00:00.000Z'
- TLSClientNotBeforeKey = attribute.Key("tls.client.not_before")
-
- // TLSClientServerNameKey is the attribute Key conforming to the
- // "tls.client.server_name" semantic conventions. It represents the also
- // called an SNI, this tells the server which hostname to which the client
- // is attempting to connect to.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry.io'
- TLSClientServerNameKey = attribute.Key("tls.client.server_name")
-
- // TLSClientSubjectKey is the attribute Key conforming to the
- // "tls.client.subject" semantic conventions. It represents the
- // distinguished name of subject of the x.509 certificate presented by the
- // client.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com'
- TLSClientSubjectKey = attribute.Key("tls.client.subject")
-
- // TLSClientSupportedCiphersKey is the attribute Key conforming to the
- // "tls.client.supported_ciphers" semantic conventions. It represents the
- // array of ciphers offered by the client during the client hello.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
- // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."'
- TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers")
-
- // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic
- // conventions. It represents the string indicating the curve used for the
- // given cipher, when applicable
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'secp256r1'
- TLSCurveKey = attribute.Key("tls.curve")
-
- // TLSEstablishedKey is the attribute Key conforming to the
- // "tls.established" semantic conventions. It represents the boolean flag
- // indicating if the TLS negotiation was successful and transitioned to an
- // encrypted tunnel.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: True
- TLSEstablishedKey = attribute.Key("tls.established")
-
- // TLSNextProtocolKey is the attribute Key conforming to the
- // "tls.next_protocol" semantic conventions. It represents the string
- // indicating the protocol being tunneled. Per the values in the [IANA
- // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids),
- // this string should be lower case.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'http/1.1'
- TLSNextProtocolKey = attribute.Key("tls.next_protocol")
-
- // TLSProtocolNameKey is the attribute Key conforming to the
- // "tls.protocol.name" semantic conventions. It represents the normalized
- // lowercase protocol name parsed from original string of the negotiated
- // [SSL/TLS protocol
- // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- TLSProtocolNameKey = attribute.Key("tls.protocol.name")
-
- // TLSProtocolVersionKey is the attribute Key conforming to the
- // "tls.protocol.version" semantic conventions. It represents the numeric
- // part of the version parsed from the original string of the negotiated
- // [SSL/TLS protocol
- // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1.2', '3'
- TLSProtocolVersionKey = attribute.Key("tls.protocol.version")
-
- // TLSResumedKey is the attribute Key conforming to the "tls.resumed"
- // semantic conventions. It represents the boolean flag indicating if this
- // TLS connection was resumed from an existing TLS negotiation.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: True
- TLSResumedKey = attribute.Key("tls.resumed")
-
- // TLSServerCertificateKey is the attribute Key conforming to the
- // "tls.server.certificate" semantic conventions. It represents the
- // pEM-encoded stand-alone certificate offered by the server. This is
- // usually mutually-exclusive of `server.certificate_chain` since this
- // value also exists in that list.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MII...'
- TLSServerCertificateKey = attribute.Key("tls.server.certificate")
-
- // TLSServerCertificateChainKey is the attribute Key conforming to the
- // "tls.server.certificate_chain" semantic conventions. It represents the
- // array of PEM-encoded certificates that make up the certificate chain
- // offered by the server. This is usually mutually-exclusive of
- // `server.certificate` since that value should be the first certificate in
- // the chain.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MII...', 'MI...'
- TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain")
-
- // TLSServerHashMd5Key is the attribute Key conforming to the
- // "tls.server.hash.md5" semantic conventions. It represents the
- // certificate fingerprint using the MD5 digest of DER-encoded version of
- // certificate offered by the server. For consistency with other hash
- // values, this value should be formatted as an uppercase hash.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC'
- TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5")
-
- // TLSServerHashSha1Key is the attribute Key conforming to the
- // "tls.server.hash.sha1" semantic conventions. It represents the
- // certificate fingerprint using the SHA1 digest of DER-encoded version of
- // certificate offered by the server. For consistency with other hash
- // values, this value should be formatted as an uppercase hash.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A'
- TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1")
-
- // TLSServerHashSha256Key is the attribute Key conforming to the
- // "tls.server.hash.sha256" semantic conventions. It represents the
- // certificate fingerprint using the SHA256 digest of DER-encoded version
- // of certificate offered by the server. For consistency with other hash
- // values, this value should be formatted as an uppercase hash.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0'
- TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256")
-
- // TLSServerIssuerKey is the attribute Key conforming to the
- // "tls.server.issuer" semantic conventions. It represents the
- // distinguished name of
- // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6)
- // of the issuer of the x.509 certificate presented by the client.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example,
- // DC=com'
- TLSServerIssuerKey = attribute.Key("tls.server.issuer")
-
- // TLSServerJa3sKey is the attribute Key conforming to the
- // "tls.server.ja3s" semantic conventions. It represents a hash that
- // identifies servers based on how they perform an SSL/TLS handshake.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'd4e5b18d6b55c71272893221c96ba240'
- TLSServerJa3sKey = attribute.Key("tls.server.ja3s")
-
- // TLSServerNotAfterKey is the attribute Key conforming to the
- // "tls.server.not_after" semantic conventions. It represents the date/Time
- // indicating when server certificate is no longer considered valid.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2021-01-01T00:00:00.000Z'
- TLSServerNotAfterKey = attribute.Key("tls.server.not_after")
-
- // TLSServerNotBeforeKey is the attribute Key conforming to the
- // "tls.server.not_before" semantic conventions. It represents the
- // date/Time indicating when server certificate is first considered valid.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1970-01-01T00:00:00.000Z'
- TLSServerNotBeforeKey = attribute.Key("tls.server.not_before")
-
- // TLSServerSubjectKey is the attribute Key conforming to the
- // "tls.server.subject" semantic conventions. It represents the
- // distinguished name of subject of the x.509 certificate presented by the
- // server.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com'
- TLSServerSubjectKey = attribute.Key("tls.server.subject")
-)
-
-var (
- // ssl
- TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl")
- // tls
- TLSProtocolNameTLS = TLSProtocolNameKey.String("tls")
-)
-
-// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher"
-// semantic conventions. It represents the string indicating the
-// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used
-// during the current connection.
-func TLSCipher(val string) attribute.KeyValue {
- return TLSCipherKey.String(val)
-}
-
-// TLSClientCertificate returns an attribute KeyValue conforming to the
-// "tls.client.certificate" semantic conventions. It represents the pEM-encoded
-// stand-alone certificate offered by the client. This is usually
-// mutually-exclusive of `client.certificate_chain` since this value also
-// exists in that list.
-func TLSClientCertificate(val string) attribute.KeyValue {
- return TLSClientCertificateKey.String(val)
-}
-
-// TLSClientCertificateChain returns an attribute KeyValue conforming to the
-// "tls.client.certificate_chain" semantic conventions. It represents the array
-// of PEM-encoded certificates that make up the certificate chain offered by
-// the client. This is usually mutually-exclusive of `client.certificate` since
-// that value should be the first certificate in the chain.
-func TLSClientCertificateChain(val ...string) attribute.KeyValue {
- return TLSClientCertificateChainKey.StringSlice(val)
-}
-
-// TLSClientHashMd5 returns an attribute KeyValue conforming to the
-// "tls.client.hash.md5" semantic conventions. It represents the certificate
-// fingerprint using the MD5 digest of DER-encoded version of certificate
-// offered by the client. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSClientHashMd5(val string) attribute.KeyValue {
- return TLSClientHashMd5Key.String(val)
-}
-
-// TLSClientHashSha1 returns an attribute KeyValue conforming to the
-// "tls.client.hash.sha1" semantic conventions. It represents the certificate
-// fingerprint using the SHA1 digest of DER-encoded version of certificate
-// offered by the client. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSClientHashSha1(val string) attribute.KeyValue {
- return TLSClientHashSha1Key.String(val)
-}
-
-// TLSClientHashSha256 returns an attribute KeyValue conforming to the
-// "tls.client.hash.sha256" semantic conventions. It represents the certificate
-// fingerprint using the SHA256 digest of DER-encoded version of certificate
-// offered by the client. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSClientHashSha256(val string) attribute.KeyValue {
- return TLSClientHashSha256Key.String(val)
-}
-
-// TLSClientIssuer returns an attribute KeyValue conforming to the
-// "tls.client.issuer" semantic conventions. It represents the distinguished
-// name of
-// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of
-// the issuer of the x.509 certificate presented by the client.
-func TLSClientIssuer(val string) attribute.KeyValue {
- return TLSClientIssuerKey.String(val)
-}
-
-// TLSClientJa3 returns an attribute KeyValue conforming to the
-// "tls.client.ja3" semantic conventions. It represents a hash that identifies
-// clients based on how they perform an SSL/TLS handshake.
-func TLSClientJa3(val string) attribute.KeyValue {
- return TLSClientJa3Key.String(val)
-}
-
-// TLSClientNotAfter returns an attribute KeyValue conforming to the
-// "tls.client.not_after" semantic conventions. It represents the date/Time
-// indicating when client certificate is no longer considered valid.
-func TLSClientNotAfter(val string) attribute.KeyValue {
- return TLSClientNotAfterKey.String(val)
-}
-
-// TLSClientNotBefore returns an attribute KeyValue conforming to the
-// "tls.client.not_before" semantic conventions. It represents the date/Time
-// indicating when client certificate is first considered valid.
-func TLSClientNotBefore(val string) attribute.KeyValue {
- return TLSClientNotBeforeKey.String(val)
-}
-
-// TLSClientServerName returns an attribute KeyValue conforming to the
-// "tls.client.server_name" semantic conventions. It represents the also called
-// an SNI, this tells the server which hostname to which the client is
-// attempting to connect to.
-func TLSClientServerName(val string) attribute.KeyValue {
- return TLSClientServerNameKey.String(val)
-}
-
-// TLSClientSubject returns an attribute KeyValue conforming to the
-// "tls.client.subject" semantic conventions. It represents the distinguished
-// name of subject of the x.509 certificate presented by the client.
-func TLSClientSubject(val string) attribute.KeyValue {
- return TLSClientSubjectKey.String(val)
-}
-
-// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the
-// "tls.client.supported_ciphers" semantic conventions. It represents the array
-// of ciphers offered by the client during the client hello.
-func TLSClientSupportedCiphers(val ...string) attribute.KeyValue {
- return TLSClientSupportedCiphersKey.StringSlice(val)
-}
-
-// TLSCurve returns an attribute KeyValue conforming to the "tls.curve"
-// semantic conventions. It represents the string indicating the curve used for
-// the given cipher, when applicable
-func TLSCurve(val string) attribute.KeyValue {
- return TLSCurveKey.String(val)
-}
-
-// TLSEstablished returns an attribute KeyValue conforming to the
-// "tls.established" semantic conventions. It represents the boolean flag
-// indicating if the TLS negotiation was successful and transitioned to an
-// encrypted tunnel.
-func TLSEstablished(val bool) attribute.KeyValue {
- return TLSEstablishedKey.Bool(val)
-}
-
-// TLSNextProtocol returns an attribute KeyValue conforming to the
-// "tls.next_protocol" semantic conventions. It represents the string
-// indicating the protocol being tunneled. Per the values in the [IANA
-// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids),
-// this string should be lower case.
-func TLSNextProtocol(val string) attribute.KeyValue {
- return TLSNextProtocolKey.String(val)
-}
-
-// TLSProtocolVersion returns an attribute KeyValue conforming to the
-// "tls.protocol.version" semantic conventions. It represents the numeric part
-// of the version parsed from the original string of the negotiated [SSL/TLS
-// protocol
-// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
-func TLSProtocolVersion(val string) attribute.KeyValue {
- return TLSProtocolVersionKey.String(val)
-}
-
-// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed"
-// semantic conventions. It represents the boolean flag indicating if this TLS
-// connection was resumed from an existing TLS negotiation.
-func TLSResumed(val bool) attribute.KeyValue {
- return TLSResumedKey.Bool(val)
-}
-
-// TLSServerCertificate returns an attribute KeyValue conforming to the
-// "tls.server.certificate" semantic conventions. It represents the pEM-encoded
-// stand-alone certificate offered by the server. This is usually
-// mutually-exclusive of `server.certificate_chain` since this value also
-// exists in that list.
-func TLSServerCertificate(val string) attribute.KeyValue {
- return TLSServerCertificateKey.String(val)
-}
-
-// TLSServerCertificateChain returns an attribute KeyValue conforming to the
-// "tls.server.certificate_chain" semantic conventions. It represents the array
-// of PEM-encoded certificates that make up the certificate chain offered by
-// the server. This is usually mutually-exclusive of `server.certificate` since
-// that value should be the first certificate in the chain.
-func TLSServerCertificateChain(val ...string) attribute.KeyValue {
- return TLSServerCertificateChainKey.StringSlice(val)
-}
-
-// TLSServerHashMd5 returns an attribute KeyValue conforming to the
-// "tls.server.hash.md5" semantic conventions. It represents the certificate
-// fingerprint using the MD5 digest of DER-encoded version of certificate
-// offered by the server. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSServerHashMd5(val string) attribute.KeyValue {
- return TLSServerHashMd5Key.String(val)
-}
-
-// TLSServerHashSha1 returns an attribute KeyValue conforming to the
-// "tls.server.hash.sha1" semantic conventions. It represents the certificate
-// fingerprint using the SHA1 digest of DER-encoded version of certificate
-// offered by the server. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSServerHashSha1(val string) attribute.KeyValue {
- return TLSServerHashSha1Key.String(val)
-}
-
-// TLSServerHashSha256 returns an attribute KeyValue conforming to the
-// "tls.server.hash.sha256" semantic conventions. It represents the certificate
-// fingerprint using the SHA256 digest of DER-encoded version of certificate
-// offered by the server. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSServerHashSha256(val string) attribute.KeyValue {
- return TLSServerHashSha256Key.String(val)
-}
-
-// TLSServerIssuer returns an attribute KeyValue conforming to the
-// "tls.server.issuer" semantic conventions. It represents the distinguished
-// name of
-// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of
-// the issuer of the x.509 certificate presented by the client.
-func TLSServerIssuer(val string) attribute.KeyValue {
- return TLSServerIssuerKey.String(val)
-}
-
-// TLSServerJa3s returns an attribute KeyValue conforming to the
-// "tls.server.ja3s" semantic conventions. It represents a hash that identifies
-// servers based on how they perform an SSL/TLS handshake.
-func TLSServerJa3s(val string) attribute.KeyValue {
- return TLSServerJa3sKey.String(val)
-}
-
-// TLSServerNotAfter returns an attribute KeyValue conforming to the
-// "tls.server.not_after" semantic conventions. It represents the date/Time
-// indicating when server certificate is no longer considered valid.
-func TLSServerNotAfter(val string) attribute.KeyValue {
- return TLSServerNotAfterKey.String(val)
-}
-
-// TLSServerNotBefore returns an attribute KeyValue conforming to the
-// "tls.server.not_before" semantic conventions. It represents the date/Time
-// indicating when server certificate is first considered valid.
-func TLSServerNotBefore(val string) attribute.KeyValue {
- return TLSServerNotBeforeKey.String(val)
-}
-
-// TLSServerSubject returns an attribute KeyValue conforming to the
-// "tls.server.subject" semantic conventions. It represents the distinguished
-// name of subject of the x.509 certificate presented by the server.
-func TLSServerSubject(val string) attribute.KeyValue {
- return TLSServerSubjectKey.String(val)
-}
-
-// Attributes describing URL.
-const (
- // URLFragmentKey is the attribute Key conforming to the "url.fragment"
- // semantic conventions. It represents the [URI
- // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'SemConv'
- URLFragmentKey = attribute.Key("url.fragment")
-
- // URLFullKey is the attribute Key conforming to the "url.full" semantic
- // conventions. It represents the absolute URL describing a network
- // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv',
- // '//localhost'
- // Note: For network calls, URL usually has
- // `scheme://host[:port][path][?query][#fragment]` format, where the
- // fragment is not transmitted over HTTP, but if it is known, it SHOULD be
- // included nevertheless.
- // `url.full` MUST NOT contain credentials passed via URL in form of
- // `https://username:password@www.example.com/`. In such case username and
- // password SHOULD be redacted and attribute's value SHOULD be
- // `https://REDACTED:REDACTED@www.example.com/`.
- // `url.full` SHOULD capture the absolute URL when it is available (or can
- // be reconstructed) and SHOULD NOT be validated or modified except for
- // sanitizing purposes.
- URLFullKey = attribute.Key("url.full")
-
- // URLPathKey is the attribute Key conforming to the "url.path" semantic
- // conventions. It represents the [URI
- // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '/search'
- URLPathKey = attribute.Key("url.path")
-
- // URLQueryKey is the attribute Key conforming to the "url.query" semantic
- // conventions. It represents the [URI
- // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'q=OpenTelemetry'
- // Note: Sensitive content provided in query string SHOULD be scrubbed when
- // instrumentations can identify it.
- URLQueryKey = attribute.Key("url.query")
-
- // URLSchemeKey is the attribute Key conforming to the "url.scheme"
- // semantic conventions. It represents the [URI
- // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
- // identifying the used protocol.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'https', 'ftp', 'telnet'
- URLSchemeKey = attribute.Key("url.scheme")
-)
-
-// URLFragment returns an attribute KeyValue conforming to the
-// "url.fragment" semantic conventions. It represents the [URI
-// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
-func URLFragment(val string) attribute.KeyValue {
- return URLFragmentKey.String(val)
-}
-
-// URLFull returns an attribute KeyValue conforming to the "url.full"
-// semantic conventions. It represents the absolute URL describing a network
-// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
-func URLFull(val string) attribute.KeyValue {
- return URLFullKey.String(val)
-}
-
-// URLPath returns an attribute KeyValue conforming to the "url.path"
-// semantic conventions. It represents the [URI
-// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
-func URLPath(val string) attribute.KeyValue {
- return URLPathKey.String(val)
-}
-
-// URLQuery returns an attribute KeyValue conforming to the "url.query"
-// semantic conventions. It represents the [URI
-// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
-func URLQuery(val string) attribute.KeyValue {
- return URLQueryKey.String(val)
-}
-
-// URLScheme returns an attribute KeyValue conforming to the "url.scheme"
-// semantic conventions. It represents the [URI
-// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
-// identifying the used protocol.
-func URLScheme(val string) attribute.KeyValue {
- return URLSchemeKey.String(val)
-}
-
-// Describes user-agent attributes.
-const (
- // UserAgentOriginalKey is the attribute Key conforming to the
- // "user_agent.original" semantic conventions. It represents the value of
- // the [HTTP
- // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
- // header sent by the client.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU
- // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko)
- // Version/14.1.2 Mobile/15E148 Safari/604.1'
- UserAgentOriginalKey = attribute.Key("user_agent.original")
-)
-
-// UserAgentOriginal returns an attribute KeyValue conforming to the
-// "user_agent.original" semantic conventions. It represents the value of the
-// [HTTP
-// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
-// header sent by the client.
-func UserAgentOriginal(val string) attribute.KeyValue {
- return UserAgentOriginalKey.String(val)
-}
-
-// Session is defined as the period of time encompassing all activities
-// performed by the application and the actions executed by the end user.
-// Consequently, a Session is represented as a collection of Logs, Events, and
-// Spans emitted by the Client Application throughout the Session's duration.
-// Each Session is assigned a unique identifier, which is included as an
-// attribute in the Logs, Events, and Spans generated during the Session's
-// lifecycle.
-// When a session reaches end of life, typically due to user inactivity or
-// session timeout, a new session identifier will be assigned. The previous
-// session identifier may be provided by the instrumentation so that telemetry
-// backends can link the two sessions.
-const (
- // SessionIDKey is the attribute Key conforming to the "session.id"
- // semantic conventions. It represents a unique id to identify a session.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '00112233-4455-6677-8899-aabbccddeeff'
- SessionIDKey = attribute.Key("session.id")
-
- // SessionPreviousIDKey is the attribute Key conforming to the
- // "session.previous_id" semantic conventions. It represents the previous
- // `session.id` for this user, when known.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '00112233-4455-6677-8899-aabbccddeeff'
- SessionPreviousIDKey = attribute.Key("session.previous_id")
-)
-
-// SessionID returns an attribute KeyValue conforming to the "session.id"
-// semantic conventions. It represents a unique id to identify a session.
-func SessionID(val string) attribute.KeyValue {
- return SessionIDKey.String(val)
-}
-
-// SessionPreviousID returns an attribute KeyValue conforming to the
-// "session.previous_id" semantic conventions. It represents the previous
-// `session.id` for this user, when known.
-func SessionPreviousID(val string) attribute.KeyValue {
- return SessionPreviousIDKey.String(val)
-}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go
deleted file mode 100644
index d27e8a8f8..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Package semconv implements OpenTelemetry semantic conventions.
-//
-// OpenTelemetry semantic conventions are agreed standardized naming
-// patterns for OpenTelemetry things. This package represents the v1.24.0
-// version of the OpenTelemetry semantic conventions.
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go
deleted file mode 100644
index 6c019aafc..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go
+++ /dev/null
@@ -1,200 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// This event represents an occurrence of a lifecycle transition on the iOS
-// platform.
-const (
- // IosStateKey is the attribute Key conforming to the "ios.state" semantic
- // conventions. It represents the this attribute represents the state the
- // application has transitioned into at the occurrence of the event.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: experimental
- // Note: The iOS lifecycle states are defined in the [UIApplicationDelegate
- // documentation](https://developer.apple.com/documentation/uikit/uiapplicationdelegate#1656902),
- // and from which the `OS terminology` column values are derived.
- IosStateKey = attribute.Key("ios.state")
-)
-
-var (
- // The app has become `active`. Associated with UIKit notification `applicationDidBecomeActive`
- IosStateActive = IosStateKey.String("active")
- // The app is now `inactive`. Associated with UIKit notification `applicationWillResignActive`
- IosStateInactive = IosStateKey.String("inactive")
- // The app is now in the background. This value is associated with UIKit notification `applicationDidEnterBackground`
- IosStateBackground = IosStateKey.String("background")
- // The app is now in the foreground. This value is associated with UIKit notification `applicationWillEnterForeground`
- IosStateForeground = IosStateKey.String("foreground")
- // The app is about to terminate. Associated with UIKit notification `applicationWillTerminate`
- IosStateTerminate = IosStateKey.String("terminate")
-)
-
-// This event represents an occurrence of a lifecycle transition on the Android
-// platform.
-const (
- // AndroidStateKey is the attribute Key conforming to the "android.state"
- // semantic conventions. It represents the this attribute represents the
- // state the application has transitioned into at the occurrence of the
- // event.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: experimental
- // Note: The Android lifecycle states are defined in [Activity lifecycle
- // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc),
- // and from which the `OS identifiers` are derived.
- AndroidStateKey = attribute.Key("android.state")
-)
-
-var (
- // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time
- AndroidStateCreated = AndroidStateKey.String("created")
- // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state
- AndroidStateBackground = AndroidStateKey.String("background")
- // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states
- AndroidStateForeground = AndroidStateKey.String("foreground")
-)
-
-// This semantic convention defines the attributes used to represent a feature
-// flag evaluation as an event.
-const (
- // FeatureFlagKeyKey is the attribute Key conforming to the
- // "feature_flag.key" semantic conventions. It represents the unique
- // identifier of the feature flag.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: experimental
- // Examples: 'logo-color'
- FeatureFlagKeyKey = attribute.Key("feature_flag.key")
-
- // FeatureFlagProviderNameKey is the attribute Key conforming to the
- // "feature_flag.provider_name" semantic conventions. It represents the
- // name of the service provider that performs the flag evaluation.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: experimental
- // Examples: 'Flag Manager'
- FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
-
- // FeatureFlagVariantKey is the attribute Key conforming to the
- // "feature_flag.variant" semantic conventions. It represents the sHOULD be
- // a semantic identifier for a value. If one is unavailable, a stringified
- // version of the value can be used.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: experimental
- // Examples: 'red', 'true', 'on'
- // Note: A semantic identifier, commonly referred to as a variant, provides
- // a means
- // for referring to a value without including the value itself. This can
- // provide additional context for understanding the meaning behind a value.
- // For example, the variant `red` maybe be used for the value `#c05543`.
- //
- // A stringified version of the value can be used in situations where a
- // semantic identifier is unavailable. String representation of the value
- // should be determined by the implementer.
- FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
-)
-
-// FeatureFlagKey returns an attribute KeyValue conforming to the
-// "feature_flag.key" semantic conventions. It represents the unique identifier
-// of the feature flag.
-func FeatureFlagKey(val string) attribute.KeyValue {
- return FeatureFlagKeyKey.String(val)
-}
-
-// FeatureFlagProviderName returns an attribute KeyValue conforming to the
-// "feature_flag.provider_name" semantic conventions. It represents the name of
-// the service provider that performs the flag evaluation.
-func FeatureFlagProviderName(val string) attribute.KeyValue {
- return FeatureFlagProviderNameKey.String(val)
-}
-
-// FeatureFlagVariant returns an attribute KeyValue conforming to the
-// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
-// semantic identifier for a value. If one is unavailable, a stringified
-// version of the value can be used.
-func FeatureFlagVariant(val string) attribute.KeyValue {
- return FeatureFlagVariantKey.String(val)
-}
-
-// RPC received/sent message.
-const (
- // MessageCompressedSizeKey is the attribute Key conforming to the
- // "message.compressed_size" semantic conventions. It represents the
- // compressed size of the message in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- MessageCompressedSizeKey = attribute.Key("message.compressed_size")
-
- // MessageIDKey is the attribute Key conforming to the "message.id"
- // semantic conventions. It represents the mUST be calculated as two
- // different counters starting from `1` one for sent messages and one for
- // received message.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: This way we guarantee that the values will be consistent between
- // different implementations.
- MessageIDKey = attribute.Key("message.id")
-
- // MessageTypeKey is the attribute Key conforming to the "message.type"
- // semantic conventions. It represents the whether this is a received or
- // sent message.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- MessageTypeKey = attribute.Key("message.type")
-
- // MessageUncompressedSizeKey is the attribute Key conforming to the
- // "message.uncompressed_size" semantic conventions. It represents the
- // uncompressed size of the message in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
-)
-
-var (
- // sent
- MessageTypeSent = MessageTypeKey.String("SENT")
- // received
- MessageTypeReceived = MessageTypeKey.String("RECEIVED")
-)
-
-// MessageCompressedSize returns an attribute KeyValue conforming to the
-// "message.compressed_size" semantic conventions. It represents the compressed
-// size of the message in bytes.
-func MessageCompressedSize(val int) attribute.KeyValue {
- return MessageCompressedSizeKey.Int(val)
-}
-
-// MessageID returns an attribute KeyValue conforming to the "message.id"
-// semantic conventions. It represents the mUST be calculated as two different
-// counters starting from `1` one for sent messages and one for received
-// message.
-func MessageID(val int) attribute.KeyValue {
- return MessageIDKey.Int(val)
-}
-
-// MessageUncompressedSize returns an attribute KeyValue conforming to the
-// "message.uncompressed_size" semantic conventions. It represents the
-// uncompressed size of the message in bytes.
-func MessageUncompressedSize(val int) attribute.KeyValue {
- return MessageUncompressedSizeKey.Int(val)
-}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go
deleted file mode 100644
index 7235bb51d..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
-
-const (
- // ExceptionEventName is the name of the Span event representing an exception.
- ExceptionEventName = "exception"
-)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go
deleted file mode 100644
index a6b953f62..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go
+++ /dev/null
@@ -1,1071 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
-
-const (
-
- // DBClientConnectionsUsage is the metric conforming to the
- // "db.client.connections.usage" semantic conventions. It represents the number
- // of connections that are currently in state described by the `state`
- // attribute.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionsUsageName = "db.client.connections.usage"
- DBClientConnectionsUsageUnit = "{connection}"
- DBClientConnectionsUsageDescription = "The number of connections that are currently in state described by the `state` attribute"
-
- // DBClientConnectionsIdleMax is the metric conforming to the
- // "db.client.connections.idle.max" semantic conventions. It represents the
- // maximum number of idle open connections allowed.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionsIdleMaxName = "db.client.connections.idle.max"
- DBClientConnectionsIdleMaxUnit = "{connection}"
- DBClientConnectionsIdleMaxDescription = "The maximum number of idle open connections allowed"
-
- // DBClientConnectionsIdleMin is the metric conforming to the
- // "db.client.connections.idle.min" semantic conventions. It represents the
- // minimum number of idle open connections allowed.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionsIdleMinName = "db.client.connections.idle.min"
- DBClientConnectionsIdleMinUnit = "{connection}"
- DBClientConnectionsIdleMinDescription = "The minimum number of idle open connections allowed"
-
- // DBClientConnectionsMax is the metric conforming to the
- // "db.client.connections.max" semantic conventions. It represents the maximum
- // number of open connections allowed.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionsMaxName = "db.client.connections.max"
- DBClientConnectionsMaxUnit = "{connection}"
- DBClientConnectionsMaxDescription = "The maximum number of open connections allowed"
-
- // DBClientConnectionsPendingRequests is the metric conforming to the
- // "db.client.connections.pending_requests" semantic conventions. It represents
- // the number of pending requests for an open connection, cumulative for the
- // entire pool.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Experimental
- DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests"
- DBClientConnectionsPendingRequestsUnit = "{request}"
- DBClientConnectionsPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool"
-
- // DBClientConnectionsTimeouts is the metric conforming to the
- // "db.client.connections.timeouts" semantic conventions. It represents the
- // number of connection timeouts that have occurred trying to obtain a
- // connection from the pool.
- // Instrument: counter
- // Unit: {timeout}
- // Stability: Experimental
- DBClientConnectionsTimeoutsName = "db.client.connections.timeouts"
- DBClientConnectionsTimeoutsUnit = "{timeout}"
- DBClientConnectionsTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool"
-
- // DBClientConnectionsCreateTime is the metric conforming to the
- // "db.client.connections.create_time" semantic conventions. It represents the
- // time it took to create a new connection.
- // Instrument: histogram
- // Unit: ms
- // Stability: Experimental
- DBClientConnectionsCreateTimeName = "db.client.connections.create_time"
- DBClientConnectionsCreateTimeUnit = "ms"
- DBClientConnectionsCreateTimeDescription = "The time it took to create a new connection"
-
- // DBClientConnectionsWaitTime is the metric conforming to the
- // "db.client.connections.wait_time" semantic conventions. It represents the
- // time it took to obtain an open connection from the pool.
- // Instrument: histogram
- // Unit: ms
- // Stability: Experimental
- DBClientConnectionsWaitTimeName = "db.client.connections.wait_time"
- DBClientConnectionsWaitTimeUnit = "ms"
- DBClientConnectionsWaitTimeDescription = "The time it took to obtain an open connection from the pool"
-
- // DBClientConnectionsUseTime is the metric conforming to the
- // "db.client.connections.use_time" semantic conventions. It represents the
- // time between borrowing a connection and returning it to the pool.
- // Instrument: histogram
- // Unit: ms
- // Stability: Experimental
- DBClientConnectionsUseTimeName = "db.client.connections.use_time"
- DBClientConnectionsUseTimeUnit = "ms"
- DBClientConnectionsUseTimeDescription = "The time between borrowing a connection and returning it to the pool"
-
- // AspnetcoreRoutingMatchAttempts is the metric conforming to the
- // "aspnetcore.routing.match_attempts" semantic conventions. It represents the
- // number of requests that were attempted to be matched to an endpoint.
- // Instrument: counter
- // Unit: {match_attempt}
- // Stability: Experimental
- AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts"
- AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}"
- AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint."
-
- // AspnetcoreDiagnosticsExceptions is the metric conforming to the
- // "aspnetcore.diagnostics.exceptions" semantic conventions. It represents the
- // number of exceptions caught by exception handling middleware.
- // Instrument: counter
- // Unit: {exception}
- // Stability: Experimental
- AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions"
- AspnetcoreDiagnosticsExceptionsUnit = "{exception}"
- AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware."
-
- // AspnetcoreRateLimitingActiveRequestLeases is the metric conforming to the
- // "aspnetcore.rate_limiting.active_request_leases" semantic conventions. It
- // represents the number of requests that are currently active on the server
- // that hold a rate limiting lease.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Experimental
- AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases"
- AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}"
- AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease."
-
- // AspnetcoreRateLimitingRequestLeaseDuration is the metric conforming to the
- // "aspnetcore.rate_limiting.request_lease.duration" semantic conventions. It
- // represents the duration of rate limiting lease held by requests on the
- // server.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration"
- AspnetcoreRateLimitingRequestLeaseDurationUnit = "s"
- AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server."
-
- // AspnetcoreRateLimitingRequestTimeInQueue is the metric conforming to the
- // "aspnetcore.rate_limiting.request.time_in_queue" semantic conventions. It
- // represents the time the request spent in a queue waiting to acquire a rate
- // limiting lease.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue"
- AspnetcoreRateLimitingRequestTimeInQueueUnit = "s"
- AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease."
-
- // AspnetcoreRateLimitingQueuedRequests is the metric conforming to the
- // "aspnetcore.rate_limiting.queued_requests" semantic conventions. It
- // represents the number of requests that are currently queued, waiting to
- // acquire a rate limiting lease.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Experimental
- AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests"
- AspnetcoreRateLimitingQueuedRequestsUnit = "{request}"
- AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease."
-
- // AspnetcoreRateLimitingRequests is the metric conforming to the
- // "aspnetcore.rate_limiting.requests" semantic conventions. It represents the
- // number of requests that tried to acquire a rate limiting lease.
- // Instrument: counter
- // Unit: {request}
- // Stability: Experimental
- AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests"
- AspnetcoreRateLimitingRequestsUnit = "{request}"
- AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease."
-
- // DNSLookupDuration is the metric conforming to the "dns.lookup.duration"
- // semantic conventions. It represents the measures the time taken to perform a
- // DNS lookup.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- DNSLookupDurationName = "dns.lookup.duration"
- DNSLookupDurationUnit = "s"
- DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup."
-
- // HTTPClientOpenConnections is the metric conforming to the
- // "http.client.open_connections" semantic conventions. It represents the
- // number of outbound HTTP connections that are currently active or idle on the
- // client.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- HTTPClientOpenConnectionsName = "http.client.open_connections"
- HTTPClientOpenConnectionsUnit = "{connection}"
- HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client."
-
- // HTTPClientConnectionDuration is the metric conforming to the
- // "http.client.connection.duration" semantic conventions. It represents the
- // duration of the successfully established outbound HTTP connections.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- HTTPClientConnectionDurationName = "http.client.connection.duration"
- HTTPClientConnectionDurationUnit = "s"
- HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections."
-
- // HTTPClientActiveRequests is the metric conforming to the
- // "http.client.active_requests" semantic conventions. It represents the number
- // of active HTTP requests.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Experimental
- HTTPClientActiveRequestsName = "http.client.active_requests"
- HTTPClientActiveRequestsUnit = "{request}"
- HTTPClientActiveRequestsDescription = "Number of active HTTP requests."
-
- // HTTPClientRequestTimeInQueue is the metric conforming to the
- // "http.client.request.time_in_queue" semantic conventions. It represents the
- // amount of time requests spent on a queue waiting for an available
- // connection.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- HTTPClientRequestTimeInQueueName = "http.client.request.time_in_queue"
- HTTPClientRequestTimeInQueueUnit = "s"
- HTTPClientRequestTimeInQueueDescription = "The amount of time requests spent on a queue waiting for an available connection."
-
- // KestrelActiveConnections is the metric conforming to the
- // "kestrel.active_connections" semantic conventions. It represents the number
- // of connections that are currently active on the server.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- KestrelActiveConnectionsName = "kestrel.active_connections"
- KestrelActiveConnectionsUnit = "{connection}"
- KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server."
-
- // KestrelConnectionDuration is the metric conforming to the
- // "kestrel.connection.duration" semantic conventions. It represents the
- // duration of connections on the server.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- KestrelConnectionDurationName = "kestrel.connection.duration"
- KestrelConnectionDurationUnit = "s"
- KestrelConnectionDurationDescription = "The duration of connections on the server."
-
- // KestrelRejectedConnections is the metric conforming to the
- // "kestrel.rejected_connections" semantic conventions. It represents the
- // number of connections rejected by the server.
- // Instrument: counter
- // Unit: {connection}
- // Stability: Experimental
- KestrelRejectedConnectionsName = "kestrel.rejected_connections"
- KestrelRejectedConnectionsUnit = "{connection}"
- KestrelRejectedConnectionsDescription = "Number of connections rejected by the server."
-
- // KestrelQueuedConnections is the metric conforming to the
- // "kestrel.queued_connections" semantic conventions. It represents the number
- // of connections that are currently queued and are waiting to start.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- KestrelQueuedConnectionsName = "kestrel.queued_connections"
- KestrelQueuedConnectionsUnit = "{connection}"
- KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start."
-
- // KestrelQueuedRequests is the metric conforming to the
- // "kestrel.queued_requests" semantic conventions. It represents the number of
- // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are
- // currently queued and are waiting to start.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Experimental
- KestrelQueuedRequestsName = "kestrel.queued_requests"
- KestrelQueuedRequestsUnit = "{request}"
- KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start."
-
- // KestrelUpgradedConnections is the metric conforming to the
- // "kestrel.upgraded_connections" semantic conventions. It represents the
- // number of connections that are currently upgraded (WebSockets). .
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- KestrelUpgradedConnectionsName = "kestrel.upgraded_connections"
- KestrelUpgradedConnectionsUnit = "{connection}"
- KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ."
-
- // KestrelTLSHandshakeDuration is the metric conforming to the
- // "kestrel.tls_handshake.duration" semantic conventions. It represents the
- // duration of TLS handshakes on the server.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration"
- KestrelTLSHandshakeDurationUnit = "s"
- KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server."
-
- // KestrelActiveTLSHandshakes is the metric conforming to the
- // "kestrel.active_tls_handshakes" semantic conventions. It represents the
- // number of TLS handshakes that are currently in progress on the server.
- // Instrument: updowncounter
- // Unit: {handshake}
- // Stability: Experimental
- KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes"
- KestrelActiveTLSHandshakesUnit = "{handshake}"
- KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server."
-
- // SignalrServerConnectionDuration is the metric conforming to the
- // "signalr.server.connection.duration" semantic conventions. It represents the
- // duration of connections on the server.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- SignalrServerConnectionDurationName = "signalr.server.connection.duration"
- SignalrServerConnectionDurationUnit = "s"
- SignalrServerConnectionDurationDescription = "The duration of connections on the server."
-
- // SignalrServerActiveConnections is the metric conforming to the
- // "signalr.server.active_connections" semantic conventions. It represents the
- // number of connections that are currently active on the server.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- SignalrServerActiveConnectionsName = "signalr.server.active_connections"
- SignalrServerActiveConnectionsUnit = "{connection}"
- SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server."
-
- // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration"
- // semantic conventions. It represents the measures the duration of the
- // function's logic execution.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- FaaSInvokeDurationName = "faas.invoke_duration"
- FaaSInvokeDurationUnit = "s"
- FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution"
-
- // FaaSInitDuration is the metric conforming to the "faas.init_duration"
- // semantic conventions. It represents the measures the duration of the
- // function's initialization, such as a cold start.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- FaaSInitDurationName = "faas.init_duration"
- FaaSInitDurationUnit = "s"
- FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start"
-
- // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic
- // conventions. It represents the number of invocation cold starts.
- // Instrument: counter
- // Unit: {coldstart}
- // Stability: Experimental
- FaaSColdstartsName = "faas.coldstarts"
- FaaSColdstartsUnit = "{coldstart}"
- FaaSColdstartsDescription = "Number of invocation cold starts"
-
- // FaaSErrors is the metric conforming to the "faas.errors" semantic
- // conventions. It represents the number of invocation errors.
- // Instrument: counter
- // Unit: {error}
- // Stability: Experimental
- FaaSErrorsName = "faas.errors"
- FaaSErrorsUnit = "{error}"
- FaaSErrorsDescription = "Number of invocation errors"
-
- // FaaSInvocations is the metric conforming to the "faas.invocations" semantic
- // conventions. It represents the number of successful invocations.
- // Instrument: counter
- // Unit: {invocation}
- // Stability: Experimental
- FaaSInvocationsName = "faas.invocations"
- FaaSInvocationsUnit = "{invocation}"
- FaaSInvocationsDescription = "Number of successful invocations"
-
- // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic
- // conventions. It represents the number of invocation timeouts.
- // Instrument: counter
- // Unit: {timeout}
- // Stability: Experimental
- FaaSTimeoutsName = "faas.timeouts"
- FaaSTimeoutsUnit = "{timeout}"
- FaaSTimeoutsDescription = "Number of invocation timeouts"
-
- // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic
- // conventions. It represents the distribution of max memory usage per
- // invocation.
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- FaaSMemUsageName = "faas.mem_usage"
- FaaSMemUsageUnit = "By"
- FaaSMemUsageDescription = "Distribution of max memory usage per invocation"
-
- // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic
- // conventions. It represents the distribution of CPU usage per invocation.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- FaaSCPUUsageName = "faas.cpu_usage"
- FaaSCPUUsageUnit = "s"
- FaaSCPUUsageDescription = "Distribution of CPU usage per invocation"
-
- // FaaSNetIo is the metric conforming to the "faas.net_io" semantic
- // conventions. It represents the distribution of net I/O usage per invocation.
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- FaaSNetIoName = "faas.net_io"
- FaaSNetIoUnit = "By"
- FaaSNetIoDescription = "Distribution of net I/O usage per invocation"
-
- // HTTPServerRequestDuration is the metric conforming to the
- // "http.server.request.duration" semantic conventions. It represents the
- // duration of HTTP server requests.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- HTTPServerRequestDurationName = "http.server.request.duration"
- HTTPServerRequestDurationUnit = "s"
- HTTPServerRequestDurationDescription = "Duration of HTTP server requests."
-
- // HTTPServerActiveRequests is the metric conforming to the
- // "http.server.active_requests" semantic conventions. It represents the number
- // of active HTTP server requests.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Experimental
- HTTPServerActiveRequestsName = "http.server.active_requests"
- HTTPServerActiveRequestsUnit = "{request}"
- HTTPServerActiveRequestsDescription = "Number of active HTTP server requests."
-
- // HTTPServerRequestBodySize is the metric conforming to the
- // "http.server.request.body.size" semantic conventions. It represents the size
- // of HTTP server request bodies.
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- HTTPServerRequestBodySizeName = "http.server.request.body.size"
- HTTPServerRequestBodySizeUnit = "By"
- HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies."
-
- // HTTPServerResponseBodySize is the metric conforming to the
- // "http.server.response.body.size" semantic conventions. It represents the
- // size of HTTP server response bodies.
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- HTTPServerResponseBodySizeName = "http.server.response.body.size"
- HTTPServerResponseBodySizeUnit = "By"
- HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies."
-
- // HTTPClientRequestDuration is the metric conforming to the
- // "http.client.request.duration" semantic conventions. It represents the
- // duration of HTTP client requests.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- HTTPClientRequestDurationName = "http.client.request.duration"
- HTTPClientRequestDurationUnit = "s"
- HTTPClientRequestDurationDescription = "Duration of HTTP client requests."
-
- // HTTPClientRequestBodySize is the metric conforming to the
- // "http.client.request.body.size" semantic conventions. It represents the size
- // of HTTP client request bodies.
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- HTTPClientRequestBodySizeName = "http.client.request.body.size"
- HTTPClientRequestBodySizeUnit = "By"
- HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies."
-
- // HTTPClientResponseBodySize is the metric conforming to the
- // "http.client.response.body.size" semantic conventions. It represents the
- // size of HTTP client response bodies.
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- HTTPClientResponseBodySizeName = "http.client.response.body.size"
- HTTPClientResponseBodySizeUnit = "By"
- HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies."
-
- // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic
- // conventions. It represents the measure of initial memory requested.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- JvmMemoryInitName = "jvm.memory.init"
- JvmMemoryInitUnit = "By"
- JvmMemoryInitDescription = "Measure of initial memory requested."
-
- // JvmSystemCPUUtilization is the metric conforming to the
- // "jvm.system.cpu.utilization" semantic conventions. It represents the recent
- // CPU utilization for the whole system as reported by the JVM.
- // Instrument: gauge
- // Unit: 1
- // Stability: Experimental
- JvmSystemCPUUtilizationName = "jvm.system.cpu.utilization"
- JvmSystemCPUUtilizationUnit = "1"
- JvmSystemCPUUtilizationDescription = "Recent CPU utilization for the whole system as reported by the JVM."
-
- // JvmSystemCPULoad1m is the metric conforming to the "jvm.system.cpu.load_1m"
- // semantic conventions. It represents the average CPU load of the whole system
- // for the last minute as reported by the JVM.
- // Instrument: gauge
- // Unit: {run_queue_item}
- // Stability: Experimental
- JvmSystemCPULoad1mName = "jvm.system.cpu.load_1m"
- JvmSystemCPULoad1mUnit = "{run_queue_item}"
- JvmSystemCPULoad1mDescription = "Average CPU load of the whole system for the last minute as reported by the JVM."
-
- // JvmBufferMemoryUsage is the metric conforming to the
- // "jvm.buffer.memory.usage" semantic conventions. It represents the measure of
- // memory used by buffers.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- JvmBufferMemoryUsageName = "jvm.buffer.memory.usage"
- JvmBufferMemoryUsageUnit = "By"
- JvmBufferMemoryUsageDescription = "Measure of memory used by buffers."
-
- // JvmBufferMemoryLimit is the metric conforming to the
- // "jvm.buffer.memory.limit" semantic conventions. It represents the measure of
- // total memory capacity of buffers.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- JvmBufferMemoryLimitName = "jvm.buffer.memory.limit"
- JvmBufferMemoryLimitUnit = "By"
- JvmBufferMemoryLimitDescription = "Measure of total memory capacity of buffers."
-
- // JvmBufferCount is the metric conforming to the "jvm.buffer.count" semantic
- // conventions. It represents the number of buffers in the pool.
- // Instrument: updowncounter
- // Unit: {buffer}
- // Stability: Experimental
- JvmBufferCountName = "jvm.buffer.count"
- JvmBufferCountUnit = "{buffer}"
- JvmBufferCountDescription = "Number of buffers in the pool."
-
- // JvmMemoryUsed is the metric conforming to the "jvm.memory.used" semantic
- // conventions. It represents the measure of memory used.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Stable
- JvmMemoryUsedName = "jvm.memory.used"
- JvmMemoryUsedUnit = "By"
- JvmMemoryUsedDescription = "Measure of memory used."
-
- // JvmMemoryCommitted is the metric conforming to the "jvm.memory.committed"
- // semantic conventions. It represents the measure of memory committed.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Stable
- JvmMemoryCommittedName = "jvm.memory.committed"
- JvmMemoryCommittedUnit = "By"
- JvmMemoryCommittedDescription = "Measure of memory committed."
-
- // JvmMemoryLimit is the metric conforming to the "jvm.memory.limit" semantic
- // conventions. It represents the measure of max obtainable memory.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Stable
- JvmMemoryLimitName = "jvm.memory.limit"
- JvmMemoryLimitUnit = "By"
- JvmMemoryLimitDescription = "Measure of max obtainable memory."
-
- // JvmMemoryUsedAfterLastGc is the metric conforming to the
- // "jvm.memory.used_after_last_gc" semantic conventions. It represents the
- // measure of memory used, as measured after the most recent garbage collection
- // event on this pool.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Stable
- JvmMemoryUsedAfterLastGcName = "jvm.memory.used_after_last_gc"
- JvmMemoryUsedAfterLastGcUnit = "By"
- JvmMemoryUsedAfterLastGcDescription = "Measure of memory used, as measured after the most recent garbage collection event on this pool."
-
- // JvmGcDuration is the metric conforming to the "jvm.gc.duration" semantic
- // conventions. It represents the duration of JVM garbage collection actions.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- JvmGcDurationName = "jvm.gc.duration"
- JvmGcDurationUnit = "s"
- JvmGcDurationDescription = "Duration of JVM garbage collection actions."
-
- // JvmThreadCount is the metric conforming to the "jvm.thread.count" semantic
- // conventions. It represents the number of executing platform threads.
- // Instrument: updowncounter
- // Unit: {thread}
- // Stability: Stable
- JvmThreadCountName = "jvm.thread.count"
- JvmThreadCountUnit = "{thread}"
- JvmThreadCountDescription = "Number of executing platform threads."
-
- // JvmClassLoaded is the metric conforming to the "jvm.class.loaded" semantic
- // conventions. It represents the number of classes loaded since JVM start.
- // Instrument: counter
- // Unit: {class}
- // Stability: Stable
- JvmClassLoadedName = "jvm.class.loaded"
- JvmClassLoadedUnit = "{class}"
- JvmClassLoadedDescription = "Number of classes loaded since JVM start."
-
- // JvmClassUnloaded is the metric conforming to the "jvm.class.unloaded"
- // semantic conventions. It represents the number of classes unloaded since JVM
- // start.
- // Instrument: counter
- // Unit: {class}
- // Stability: Stable
- JvmClassUnloadedName = "jvm.class.unloaded"
- JvmClassUnloadedUnit = "{class}"
- JvmClassUnloadedDescription = "Number of classes unloaded since JVM start."
-
- // JvmClassCount is the metric conforming to the "jvm.class.count" semantic
- // conventions. It represents the number of classes currently loaded.
- // Instrument: updowncounter
- // Unit: {class}
- // Stability: Stable
- JvmClassCountName = "jvm.class.count"
- JvmClassCountUnit = "{class}"
- JvmClassCountDescription = "Number of classes currently loaded."
-
- // JvmCPUCount is the metric conforming to the "jvm.cpu.count" semantic
- // conventions. It represents the number of processors available to the Java
- // virtual machine.
- // Instrument: updowncounter
- // Unit: {cpu}
- // Stability: Stable
- JvmCPUCountName = "jvm.cpu.count"
- JvmCPUCountUnit = "{cpu}"
- JvmCPUCountDescription = "Number of processors available to the Java virtual machine."
-
- // JvmCPUTime is the metric conforming to the "jvm.cpu.time" semantic
- // conventions. It represents the cPU time used by the process as reported by
- // the JVM.
- // Instrument: counter
- // Unit: s
- // Stability: Stable
- JvmCPUTimeName = "jvm.cpu.time"
- JvmCPUTimeUnit = "s"
- JvmCPUTimeDescription = "CPU time used by the process as reported by the JVM."
-
- // JvmCPURecentUtilization is the metric conforming to the
- // "jvm.cpu.recent_utilization" semantic conventions. It represents the recent
- // CPU utilization for the process as reported by the JVM.
- // Instrument: gauge
- // Unit: 1
- // Stability: Stable
- JvmCPURecentUtilizationName = "jvm.cpu.recent_utilization"
- JvmCPURecentUtilizationUnit = "1"
- JvmCPURecentUtilizationDescription = "Recent CPU utilization for the process as reported by the JVM."
-
- // MessagingPublishDuration is the metric conforming to the
- // "messaging.publish.duration" semantic conventions. It represents the
- // measures the duration of publish operation.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- MessagingPublishDurationName = "messaging.publish.duration"
- MessagingPublishDurationUnit = "s"
- MessagingPublishDurationDescription = "Measures the duration of publish operation."
-
- // MessagingReceiveDuration is the metric conforming to the
- // "messaging.receive.duration" semantic conventions. It represents the
- // measures the duration of receive operation.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- MessagingReceiveDurationName = "messaging.receive.duration"
- MessagingReceiveDurationUnit = "s"
- MessagingReceiveDurationDescription = "Measures the duration of receive operation."
-
- // MessagingDeliverDuration is the metric conforming to the
- // "messaging.deliver.duration" semantic conventions. It represents the
- // measures the duration of deliver operation.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- MessagingDeliverDurationName = "messaging.deliver.duration"
- MessagingDeliverDurationUnit = "s"
- MessagingDeliverDurationDescription = "Measures the duration of deliver operation."
-
- // MessagingPublishMessages is the metric conforming to the
- // "messaging.publish.messages" semantic conventions. It represents the
- // measures the number of published messages.
- // Instrument: counter
- // Unit: {message}
- // Stability: Experimental
- MessagingPublishMessagesName = "messaging.publish.messages"
- MessagingPublishMessagesUnit = "{message}"
- MessagingPublishMessagesDescription = "Measures the number of published messages."
-
- // MessagingReceiveMessages is the metric conforming to the
- // "messaging.receive.messages" semantic conventions. It represents the
- // measures the number of received messages.
- // Instrument: counter
- // Unit: {message}
- // Stability: Experimental
- MessagingReceiveMessagesName = "messaging.receive.messages"
- MessagingReceiveMessagesUnit = "{message}"
- MessagingReceiveMessagesDescription = "Measures the number of received messages."
-
- // MessagingDeliverMessages is the metric conforming to the
- // "messaging.deliver.messages" semantic conventions. It represents the
- // measures the number of delivered messages.
- // Instrument: counter
- // Unit: {message}
- // Stability: Experimental
- MessagingDeliverMessagesName = "messaging.deliver.messages"
- MessagingDeliverMessagesUnit = "{message}"
- MessagingDeliverMessagesDescription = "Measures the number of delivered messages."
-
- // RPCServerDuration is the metric conforming to the "rpc.server.duration"
- // semantic conventions. It represents the measures the duration of inbound
- // RPC.
- // Instrument: histogram
- // Unit: ms
- // Stability: Experimental
- RPCServerDurationName = "rpc.server.duration"
- RPCServerDurationUnit = "ms"
- RPCServerDurationDescription = "Measures the duration of inbound RPC."
-
- // RPCServerRequestSize is the metric conforming to the
- // "rpc.server.request.size" semantic conventions. It represents the measures
- // the size of RPC request messages (uncompressed).
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- RPCServerRequestSizeName = "rpc.server.request.size"
- RPCServerRequestSizeUnit = "By"
- RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)."
-
- // RPCServerResponseSize is the metric conforming to the
- // "rpc.server.response.size" semantic conventions. It represents the measures
- // the size of RPC response messages (uncompressed).
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- RPCServerResponseSizeName = "rpc.server.response.size"
- RPCServerResponseSizeUnit = "By"
- RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)."
-
- // RPCServerRequestsPerRPC is the metric conforming to the
- // "rpc.server.requests_per_rpc" semantic conventions. It represents the
- // measures the number of messages received per RPC.
- // Instrument: histogram
- // Unit: {count}
- // Stability: Experimental
- RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc"
- RPCServerRequestsPerRPCUnit = "{count}"
- RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC."
-
- // RPCServerResponsesPerRPC is the metric conforming to the
- // "rpc.server.responses_per_rpc" semantic conventions. It represents the
- // measures the number of messages sent per RPC.
- // Instrument: histogram
- // Unit: {count}
- // Stability: Experimental
- RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc"
- RPCServerResponsesPerRPCUnit = "{count}"
- RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC."
-
- // RPCClientDuration is the metric conforming to the "rpc.client.duration"
- // semantic conventions. It represents the measures the duration of outbound
- // RPC.
- // Instrument: histogram
- // Unit: ms
- // Stability: Experimental
- RPCClientDurationName = "rpc.client.duration"
- RPCClientDurationUnit = "ms"
- RPCClientDurationDescription = "Measures the duration of outbound RPC."
-
- // RPCClientRequestSize is the metric conforming to the
- // "rpc.client.request.size" semantic conventions. It represents the measures
- // the size of RPC request messages (uncompressed).
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- RPCClientRequestSizeName = "rpc.client.request.size"
- RPCClientRequestSizeUnit = "By"
- RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)."
-
- // RPCClientResponseSize is the metric conforming to the
- // "rpc.client.response.size" semantic conventions. It represents the measures
- // the size of RPC response messages (uncompressed).
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- RPCClientResponseSizeName = "rpc.client.response.size"
- RPCClientResponseSizeUnit = "By"
- RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)."
-
- // RPCClientRequestsPerRPC is the metric conforming to the
- // "rpc.client.requests_per_rpc" semantic conventions. It represents the
- // measures the number of messages received per RPC.
- // Instrument: histogram
- // Unit: {count}
- // Stability: Experimental
- RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc"
- RPCClientRequestsPerRPCUnit = "{count}"
- RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC."
-
- // RPCClientResponsesPerRPC is the metric conforming to the
- // "rpc.client.responses_per_rpc" semantic conventions. It represents the
- // measures the number of messages sent per RPC.
- // Instrument: histogram
- // Unit: {count}
- // Stability: Experimental
- RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc"
- RPCClientResponsesPerRPCUnit = "{count}"
- RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC."
-
- // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic
- // conventions. It represents the seconds each logical CPU spent on each mode.
- // Instrument: counter
- // Unit: s
- // Stability: Experimental
- SystemCPUTimeName = "system.cpu.time"
- SystemCPUTimeUnit = "s"
- SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode"
-
- // SystemCPUUtilization is the metric conforming to the
- // "system.cpu.utilization" semantic conventions. It represents the difference
- // in system.cpu.time since the last measurement, divided by the elapsed time
- // and number of logical CPUs.
- // Instrument: gauge
- // Unit: 1
- // Stability: Experimental
- SystemCPUUtilizationName = "system.cpu.utilization"
- SystemCPUUtilizationUnit = "1"
- SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs"
-
- // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency"
- // semantic conventions. It represents the reports the current frequency of the
- // CPU in Hz.
- // Instrument: gauge
- // Unit: {Hz}
- // Stability: Experimental
- SystemCPUFrequencyName = "system.cpu.frequency"
- SystemCPUFrequencyUnit = "{Hz}"
- SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz"
-
- // SystemCPUPhysicalCount is the metric conforming to the
- // "system.cpu.physical.count" semantic conventions. It represents the reports
- // the number of actual physical processor cores on the hardware.
- // Instrument: updowncounter
- // Unit: {cpu}
- // Stability: Experimental
- SystemCPUPhysicalCountName = "system.cpu.physical.count"
- SystemCPUPhysicalCountUnit = "{cpu}"
- SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware"
-
- // SystemCPULogicalCount is the metric conforming to the
- // "system.cpu.logical.count" semantic conventions. It represents the reports
- // the number of logical (virtual) processor cores created by the operating
- // system to manage multitasking.
- // Instrument: updowncounter
- // Unit: {cpu}
- // Stability: Experimental
- SystemCPULogicalCountName = "system.cpu.logical.count"
- SystemCPULogicalCountUnit = "{cpu}"
- SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking"
-
- // SystemMemoryUsage is the metric conforming to the "system.memory.usage"
- // semantic conventions. It represents the reports memory in use by state.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- SystemMemoryUsageName = "system.memory.usage"
- SystemMemoryUsageUnit = "By"
- SystemMemoryUsageDescription = "Reports memory in use by state."
-
- // SystemMemoryLimit is the metric conforming to the "system.memory.limit"
- // semantic conventions. It represents the total memory available in the
- // system.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- SystemMemoryLimitName = "system.memory.limit"
- SystemMemoryLimitUnit = "By"
- SystemMemoryLimitDescription = "Total memory available in the system."
-
- // SystemMemoryUtilization is the metric conforming to the
- // "system.memory.utilization" semantic conventions.
- // Instrument: gauge
- // Unit: 1
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemMemoryUtilizationName = "system.memory.utilization"
- SystemMemoryUtilizationUnit = "1"
-
- // SystemPagingUsage is the metric conforming to the "system.paging.usage"
- // semantic conventions. It represents the unix swap or windows pagefile usage.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- SystemPagingUsageName = "system.paging.usage"
- SystemPagingUsageUnit = "By"
- SystemPagingUsageDescription = "Unix swap or windows pagefile usage"
-
- // SystemPagingUtilization is the metric conforming to the
- // "system.paging.utilization" semantic conventions.
- // Instrument: gauge
- // Unit: 1
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemPagingUtilizationName = "system.paging.utilization"
- SystemPagingUtilizationUnit = "1"
-
- // SystemPagingFaults is the metric conforming to the "system.paging.faults"
- // semantic conventions.
- // Instrument: counter
- // Unit: {fault}
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemPagingFaultsName = "system.paging.faults"
- SystemPagingFaultsUnit = "{fault}"
-
- // SystemPagingOperations is the metric conforming to the
- // "system.paging.operations" semantic conventions.
- // Instrument: counter
- // Unit: {operation}
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemPagingOperationsName = "system.paging.operations"
- SystemPagingOperationsUnit = "{operation}"
-
- // SystemDiskIo is the metric conforming to the "system.disk.io" semantic
- // conventions.
- // Instrument: counter
- // Unit: By
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemDiskIoName = "system.disk.io"
- SystemDiskIoUnit = "By"
-
- // SystemDiskOperations is the metric conforming to the
- // "system.disk.operations" semantic conventions.
- // Instrument: counter
- // Unit: {operation}
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemDiskOperationsName = "system.disk.operations"
- SystemDiskOperationsUnit = "{operation}"
-
- // SystemDiskIoTime is the metric conforming to the "system.disk.io_time"
- // semantic conventions. It represents the time disk spent activated.
- // Instrument: counter
- // Unit: s
- // Stability: Experimental
- SystemDiskIoTimeName = "system.disk.io_time"
- SystemDiskIoTimeUnit = "s"
- SystemDiskIoTimeDescription = "Time disk spent activated"
-
- // SystemDiskOperationTime is the metric conforming to the
- // "system.disk.operation_time" semantic conventions. It represents the sum of
- // the time each operation took to complete.
- // Instrument: counter
- // Unit: s
- // Stability: Experimental
- SystemDiskOperationTimeName = "system.disk.operation_time"
- SystemDiskOperationTimeUnit = "s"
- SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete"
-
- // SystemDiskMerged is the metric conforming to the "system.disk.merged"
- // semantic conventions.
- // Instrument: counter
- // Unit: {operation}
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemDiskMergedName = "system.disk.merged"
- SystemDiskMergedUnit = "{operation}"
-
- // SystemFilesystemUsage is the metric conforming to the
- // "system.filesystem.usage" semantic conventions.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemFilesystemUsageName = "system.filesystem.usage"
- SystemFilesystemUsageUnit = "By"
-
- // SystemFilesystemUtilization is the metric conforming to the
- // "system.filesystem.utilization" semantic conventions.
- // Instrument: gauge
- // Unit: 1
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemFilesystemUtilizationName = "system.filesystem.utilization"
- SystemFilesystemUtilizationUnit = "1"
-
- // SystemNetworkDropped is the metric conforming to the
- // "system.network.dropped" semantic conventions. It represents the count of
- // packets that are dropped or discarded even though there was no error.
- // Instrument: counter
- // Unit: {packet}
- // Stability: Experimental
- SystemNetworkDroppedName = "system.network.dropped"
- SystemNetworkDroppedUnit = "{packet}"
- SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error"
-
- // SystemNetworkPackets is the metric conforming to the
- // "system.network.packets" semantic conventions.
- // Instrument: counter
- // Unit: {packet}
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemNetworkPacketsName = "system.network.packets"
- SystemNetworkPacketsUnit = "{packet}"
-
- // SystemNetworkErrors is the metric conforming to the "system.network.errors"
- // semantic conventions. It represents the count of network errors detected.
- // Instrument: counter
- // Unit: {error}
- // Stability: Experimental
- SystemNetworkErrorsName = "system.network.errors"
- SystemNetworkErrorsUnit = "{error}"
- SystemNetworkErrorsDescription = "Count of network errors detected"
-
- // SystemNetworkIo is the metric conforming to the "system.network.io" semantic
- // conventions.
- // Instrument: counter
- // Unit: By
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemNetworkIoName = "system.network.io"
- SystemNetworkIoUnit = "By"
-
- // SystemNetworkConnections is the metric conforming to the
- // "system.network.connections" semantic conventions.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemNetworkConnectionsName = "system.network.connections"
- SystemNetworkConnectionsUnit = "{connection}"
-
- // SystemProcessesCount is the metric conforming to the
- // "system.processes.count" semantic conventions. It represents the total
- // number of processes in each state.
- // Instrument: updowncounter
- // Unit: {process}
- // Stability: Experimental
- SystemProcessesCountName = "system.processes.count"
- SystemProcessesCountUnit = "{process}"
- SystemProcessesCountDescription = "Total number of processes in each state"
-
- // SystemProcessesCreated is the metric conforming to the
- // "system.processes.created" semantic conventions. It represents the total
- // number of processes created over uptime of the host.
- // Instrument: counter
- // Unit: {process}
- // Stability: Experimental
- SystemProcessesCreatedName = "system.processes.created"
- SystemProcessesCreatedUnit = "{process}"
- SystemProcessesCreatedDescription = "Total number of processes created over uptime of the host"
-
- // SystemLinuxMemoryAvailable is the metric conforming to the
- // "system.linux.memory.available" semantic conventions. It represents an
- // estimate of how much memory is available for starting new applications,
- // without causing swapping.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- SystemLinuxMemoryAvailableName = "system.linux.memory.available"
- SystemLinuxMemoryAvailableUnit = "By"
- SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping"
-)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go
deleted file mode 100644
index d66bbe9c2..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go
+++ /dev/null
@@ -1,2545 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// A cloud environment (e.g. GCP, Azure, AWS).
-const (
- // CloudAccountIDKey is the attribute Key conforming to the
- // "cloud.account.id" semantic conventions. It represents the cloud account
- // ID the resource is assigned to.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '111111111111', 'opentelemetry'
- CloudAccountIDKey = attribute.Key("cloud.account.id")
-
- // CloudAvailabilityZoneKey is the attribute Key conforming to the
- // "cloud.availability_zone" semantic conventions. It represents the cloud
- // regions often have multiple, isolated locations known as zones to
- // increase availability. Availability zone represents the zone where the
- // resource is running.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'us-east-1c'
- // Note: Availability zones are called "zones" on Alibaba Cloud and Google
- // Cloud.
- CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
-
- // CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
- // semantic conventions. It represents the cloud platform in use.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: The prefix of the service SHOULD match the one specified in
- // `cloud.provider`.
- CloudPlatformKey = attribute.Key("cloud.platform")
-
- // CloudProviderKey is the attribute Key conforming to the "cloud.provider"
- // semantic conventions. It represents the name of the cloud provider.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- CloudProviderKey = attribute.Key("cloud.provider")
-
- // CloudRegionKey is the attribute Key conforming to the "cloud.region"
- // semantic conventions. It represents the geographical region the resource
- // is running.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'us-central1', 'us-east-1'
- // Note: Refer to your provider's docs to see the available regions, for
- // example [Alibaba Cloud
- // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS
- // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/),
- // [Azure
- // regions](https://azure.microsoft.com/global-infrastructure/geographies/),
- // [Google Cloud regions](https://cloud.google.com/about/locations), or
- // [Tencent Cloud
- // regions](https://www.tencentcloud.com/document/product/213/6091).
- CloudRegionKey = attribute.Key("cloud.region")
-
- // CloudResourceIDKey is the attribute Key conforming to the
- // "cloud.resource_id" semantic conventions. It represents the cloud
- // provider-specific native identifier of the monitored cloud resource
- // (e.g. an
- // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
- // on AWS, a [fully qualified resource
- // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id)
- // on Azure, a [full resource
- // name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
- // on GCP)
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function',
- // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID',
- // '/subscriptions/<SUBSCIPTION_GUID>/resourceGroups/<RG>/providers/Microsoft.Web/sites/<FUNCAPP>/functions/<FUNC>'
- // Note: On some cloud providers, it may not be possible to determine the
- // full ID at startup,
- // so it may be necessary to set `cloud.resource_id` as a span attribute
- // instead.
- //
- // The exact value to use for `cloud.resource_id` depends on the cloud
- // provider.
- // The following well-known definitions MUST be used if you set this
- // attribute and they apply:
- //
- // * **AWS Lambda:** The function
- // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
- // Take care not to use the "invoked ARN" directly but replace any
- // [alias
- // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html)
- // with the resolved function version, as the same runtime instance may
- // be invokable with
- // multiple different aliases.
- // * **GCP:** The [URI of the
- // resource](https://cloud.google.com/iam/docs/full-resource-names)
- // * **Azure:** The [Fully Qualified Resource
- // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id)
- // of the invoked function,
- // *not* the function app, having the form
- // `/subscriptions/<SUBSCIPTION_GUID>/resourceGroups/<RG>/providers/Microsoft.Web/sites/<FUNCAPP>/functions/<FUNC>`.
- // This means that a span attribute MUST be used, as an Azure function
- // app can host multiple functions that would usually share
- // a TracerProvider.
- CloudResourceIDKey = attribute.Key("cloud.resource_id")
-)
-
-var (
- // Alibaba Cloud Elastic Compute Service
- CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
- // Alibaba Cloud Function Compute
- CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
- // Red Hat OpenShift on Alibaba Cloud
- CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift")
- // AWS Elastic Compute Cloud
- CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
- // AWS Elastic Container Service
- CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
- // AWS Elastic Kubernetes Service
- CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
- // AWS Lambda
- CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
- // AWS Elastic Beanstalk
- CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
- // AWS App Runner
- CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
- // Red Hat OpenShift on AWS (ROSA)
- CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift")
- // Azure Virtual Machines
- CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
- // Azure Container Instances
- CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
- // Azure Kubernetes Service
- CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
- // Azure Functions
- CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
- // Azure App Service
- CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
- // Azure Red Hat OpenShift
- CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift")
- // Google Bare Metal Solution (BMS)
- CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution")
- // Google Cloud Compute Engine (GCE)
- CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
- // Google Cloud Run
- CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
- // Google Cloud Kubernetes Engine (GKE)
- CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
- // Google Cloud Functions (GCF)
- CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
- // Google Cloud App Engine (GAE)
- CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
- // Red Hat OpenShift on Google Cloud
- CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift")
- // Red Hat OpenShift on IBM Cloud
- CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift")
- // Tencent Cloud Cloud Virtual Machine (CVM)
- CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
- // Tencent Cloud Elastic Kubernetes Service (EKS)
- CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
- // Tencent Cloud Serverless Cloud Function (SCF)
- CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
-)
-
-var (
- // Alibaba Cloud
- CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
- // Amazon Web Services
- CloudProviderAWS = CloudProviderKey.String("aws")
- // Microsoft Azure
- CloudProviderAzure = CloudProviderKey.String("azure")
- // Google Cloud Platform
- CloudProviderGCP = CloudProviderKey.String("gcp")
- // Heroku Platform as a Service
- CloudProviderHeroku = CloudProviderKey.String("heroku")
- // IBM Cloud
- CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud")
- // Tencent Cloud
- CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
-)
-
-// CloudAccountID returns an attribute KeyValue conforming to the
-// "cloud.account.id" semantic conventions. It represents the cloud account ID
-// the resource is assigned to.
-func CloudAccountID(val string) attribute.KeyValue {
- return CloudAccountIDKey.String(val)
-}
-
-// CloudAvailabilityZone returns an attribute KeyValue conforming to the
-// "cloud.availability_zone" semantic conventions. It represents the cloud
-// regions often have multiple, isolated locations known as zones to increase
-// availability. Availability zone represents the zone where the resource is
-// running.
-func CloudAvailabilityZone(val string) attribute.KeyValue {
- return CloudAvailabilityZoneKey.String(val)
-}
-
-// CloudRegion returns an attribute KeyValue conforming to the
-// "cloud.region" semantic conventions. It represents the geographical region
-// the resource is running.
-func CloudRegion(val string) attribute.KeyValue {
- return CloudRegionKey.String(val)
-}
-
-// CloudResourceID returns an attribute KeyValue conforming to the
-// "cloud.resource_id" semantic conventions. It represents the cloud
-// provider-specific native identifier of the monitored cloud resource (e.g. an
-// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
-// on AWS, a [fully qualified resource
-// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on
-// Azure, a [full resource
-// name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
-// on GCP)
-func CloudResourceID(val string) attribute.KeyValue {
- return CloudResourceIDKey.String(val)
-}
-
-// A container instance.
-const (
- // ContainerCommandKey is the attribute Key conforming to the
- // "container.command" semantic conventions. It represents the command used
- // to run the container (i.e. the command name).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'otelcontribcol'
- // Note: If using embedded credentials or sensitive data, it is recommended
- // to remove them to prevent potential leakage.
- ContainerCommandKey = attribute.Key("container.command")
-
- // ContainerCommandArgsKey is the attribute Key conforming to the
- // "container.command_args" semantic conventions. It represents the all the
- // command arguments (including the command/executable itself) run by the
- // container. [2]
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'otelcontribcol, --config, config.yaml'
- ContainerCommandArgsKey = attribute.Key("container.command_args")
-
- // ContainerCommandLineKey is the attribute Key conforming to the
- // "container.command_line" semantic conventions. It represents the full
- // command run by the container as a single string representing the full
- // command. [2]
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'otelcontribcol --config config.yaml'
- ContainerCommandLineKey = attribute.Key("container.command_line")
-
- // ContainerIDKey is the attribute Key conforming to the "container.id"
- // semantic conventions. It represents the container ID. Usually a UUID, as
- // for example used to [identify Docker
- // containers](https://docs.docker.com/engine/reference/run/#container-identification).
- // The UUID might be abbreviated.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'a3bf90e006b2'
- ContainerIDKey = attribute.Key("container.id")
-
- // ContainerImageIDKey is the attribute Key conforming to the
- // "container.image.id" semantic conventions. It represents the runtime
- // specific image identifier. Usually a hash algorithm followed by a UUID.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f'
- // Note: Docker defines a sha256 of the image id; `container.image.id`
- // corresponds to the `Image` field from the Docker container inspect
- // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect)
- // endpoint.
- // K8S defines a link to the container registry repository with digest
- // `"imageID": "registry.azurecr.io
- // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`.
- // The ID is assinged by the container runtime and can vary in different
- // environments. Consider using `oci.manifest.digest` if it is important to
- // identify the same image in different environments/runtimes.
- ContainerImageIDKey = attribute.Key("container.image.id")
-
- // ContainerImageNameKey is the attribute Key conforming to the
- // "container.image.name" semantic conventions. It represents the name of
- // the image the container was built on.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'gcr.io/opentelemetry/operator'
- ContainerImageNameKey = attribute.Key("container.image.name")
-
- // ContainerImageRepoDigestsKey is the attribute Key conforming to the
- // "container.image.repo_digests" semantic conventions. It represents the
- // repo digests of the container image as provided by the container
- // runtime.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb',
- // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578'
- // Note:
- // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect)
- // and
- // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238)
- // report those under the `RepoDigests` field.
- ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests")
-
- // ContainerImageTagsKey is the attribute Key conforming to the
- // "container.image.tags" semantic conventions. It represents the container
- // image tags. An example can be found in [Docker Image
- // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect).
- // Should be only the `<tag>` section of the full name for example from
- // `registry.example.com/my-org/my-image:<tag>`.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'v1.27.1', '3.5.7-0'
- ContainerImageTagsKey = attribute.Key("container.image.tags")
-
- // ContainerNameKey is the attribute Key conforming to the "container.name"
- // semantic conventions. It represents the container name used by container
- // runtime.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry-autoconf'
- ContainerNameKey = attribute.Key("container.name")
-
- // ContainerRuntimeKey is the attribute Key conforming to the
- // "container.runtime" semantic conventions. It represents the container
- // runtime managing this container.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'docker', 'containerd', 'rkt'
- ContainerRuntimeKey = attribute.Key("container.runtime")
-)
-
-// ContainerCommand returns an attribute KeyValue conforming to the
-// "container.command" semantic conventions. It represents the command used to
-// run the container (i.e. the command name).
-func ContainerCommand(val string) attribute.KeyValue {
- return ContainerCommandKey.String(val)
-}
-
-// ContainerCommandArgs returns an attribute KeyValue conforming to the
-// "container.command_args" semantic conventions. It represents the all the
-// command arguments (including the command/executable itself) run by the
-// container. [2]
-func ContainerCommandArgs(val ...string) attribute.KeyValue {
- return ContainerCommandArgsKey.StringSlice(val)
-}
-
-// ContainerCommandLine returns an attribute KeyValue conforming to the
-// "container.command_line" semantic conventions. It represents the full
-// command run by the container as a single string representing the full
-// command. [2]
-func ContainerCommandLine(val string) attribute.KeyValue {
- return ContainerCommandLineKey.String(val)
-}
-
-// ContainerID returns an attribute KeyValue conforming to the
-// "container.id" semantic conventions. It represents the container ID. Usually
-// a UUID, as for example used to [identify Docker
-// containers](https://docs.docker.com/engine/reference/run/#container-identification).
-// The UUID might be abbreviated.
-func ContainerID(val string) attribute.KeyValue {
- return ContainerIDKey.String(val)
-}
-
-// ContainerImageID returns an attribute KeyValue conforming to the
-// "container.image.id" semantic conventions. It represents the runtime
-// specific image identifier. Usually a hash algorithm followed by a UUID.
-func ContainerImageID(val string) attribute.KeyValue {
- return ContainerImageIDKey.String(val)
-}
-
-// ContainerImageName returns an attribute KeyValue conforming to the
-// "container.image.name" semantic conventions. It represents the name of the
-// image the container was built on.
-func ContainerImageName(val string) attribute.KeyValue {
- return ContainerImageNameKey.String(val)
-}
-
-// ContainerImageRepoDigests returns an attribute KeyValue conforming to the
-// "container.image.repo_digests" semantic conventions. It represents the repo
-// digests of the container image as provided by the container runtime.
-func ContainerImageRepoDigests(val ...string) attribute.KeyValue {
- return ContainerImageRepoDigestsKey.StringSlice(val)
-}
-
-// ContainerImageTags returns an attribute KeyValue conforming to the
-// "container.image.tags" semantic conventions. It represents the container
-// image tags. An example can be found in [Docker Image
-// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect).
-// Should be only the `<tag>` section of the full name for example from
-// `registry.example.com/my-org/my-image:<tag>`.
-func ContainerImageTags(val ...string) attribute.KeyValue {
- return ContainerImageTagsKey.StringSlice(val)
-}
-
-// ContainerName returns an attribute KeyValue conforming to the
-// "container.name" semantic conventions. It represents the container name used
-// by container runtime.
-func ContainerName(val string) attribute.KeyValue {
- return ContainerNameKey.String(val)
-}
-
-// ContainerRuntime returns an attribute KeyValue conforming to the
-// "container.runtime" semantic conventions. It represents the container
-// runtime managing this container.
-func ContainerRuntime(val string) attribute.KeyValue {
- return ContainerRuntimeKey.String(val)
-}
-
-// Describes device attributes.
-const (
- // DeviceIDKey is the attribute Key conforming to the "device.id" semantic
- // conventions. It represents a unique identifier representing the device
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
- // Note: The device identifier MUST only be defined using the values
- // outlined below. This value is not an advertising identifier and MUST NOT
- // be used as such. On iOS (Swift or Objective-C), this value MUST be equal
- // to the [vendor
- // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor).
- // On Android (Java or Kotlin), this value MUST be equal to the Firebase
- // Installation ID or a globally unique UUID which is persisted across
- // sessions in your application. More information can be found
- // [here](https://developer.android.com/training/articles/user-data-ids) on
- // best practices and exact implementation details. Caution should be taken
- // when storing personal data or anything which can identify a user. GDPR
- // and data protection laws may apply, ensure you do your own due
- // diligence.
- DeviceIDKey = attribute.Key("device.id")
-
- // DeviceManufacturerKey is the attribute Key conforming to the
- // "device.manufacturer" semantic conventions. It represents the name of
- // the device manufacturer
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Apple', 'Samsung'
- // Note: The Android OS provides this field via
- // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
- // iOS apps SHOULD hardcode the value `Apple`.
- DeviceManufacturerKey = attribute.Key("device.manufacturer")
-
- // DeviceModelIdentifierKey is the attribute Key conforming to the
- // "device.model.identifier" semantic conventions. It represents the model
- // identifier for the device
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'iPhone3,4', 'SM-G920F'
- // Note: It's recommended this value represents a machine-readable version
- // of the model identifier rather than the market or consumer-friendly name
- // of the device.
- DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
-
- // DeviceModelNameKey is the attribute Key conforming to the
- // "device.model.name" semantic conventions. It represents the marketing
- // name for the device model
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
- // Note: It's recommended this value represents a human-readable version of
- // the device model rather than a machine-readable alternative.
- DeviceModelNameKey = attribute.Key("device.model.name")
-)
-
-// DeviceID returns an attribute KeyValue conforming to the "device.id"
-// semantic conventions. It represents a unique identifier representing the
-// device
-func DeviceID(val string) attribute.KeyValue {
- return DeviceIDKey.String(val)
-}
-
-// DeviceManufacturer returns an attribute KeyValue conforming to the
-// "device.manufacturer" semantic conventions. It represents the name of the
-// device manufacturer
-func DeviceManufacturer(val string) attribute.KeyValue {
- return DeviceManufacturerKey.String(val)
-}
-
-// DeviceModelIdentifier returns an attribute KeyValue conforming to the
-// "device.model.identifier" semantic conventions. It represents the model
-// identifier for the device
-func DeviceModelIdentifier(val string) attribute.KeyValue {
- return DeviceModelIdentifierKey.String(val)
-}
-
-// DeviceModelName returns an attribute KeyValue conforming to the
-// "device.model.name" semantic conventions. It represents the marketing name
-// for the device model
-func DeviceModelName(val string) attribute.KeyValue {
- return DeviceModelNameKey.String(val)
-}
-
-// A host is defined as a computing instance. For example, physical servers,
-// virtual machines, switches or disk array.
-const (
- // HostArchKey is the attribute Key conforming to the "host.arch" semantic
- // conventions. It represents the CPU architecture the host system is
- // running on.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- HostArchKey = attribute.Key("host.arch")
-
- // HostCPUCacheL2SizeKey is the attribute Key conforming to the
- // "host.cpu.cache.l2.size" semantic conventions. It represents the amount
- // of level 2 memory cache available to the processor (in Bytes).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 12288000
- HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size")
-
- // HostCPUFamilyKey is the attribute Key conforming to the
- // "host.cpu.family" semantic conventions. It represents the family or
- // generation of the CPU.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '6', 'PA-RISC 1.1e'
- HostCPUFamilyKey = attribute.Key("host.cpu.family")
-
- // HostCPUModelIDKey is the attribute Key conforming to the
- // "host.cpu.model.id" semantic conventions. It represents the model
- // identifier. It provides more granular information about the CPU,
- // distinguishing it from other CPUs within the same family.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '6', '9000/778/B180L'
- HostCPUModelIDKey = attribute.Key("host.cpu.model.id")
-
- // HostCPUModelNameKey is the attribute Key conforming to the
- // "host.cpu.model.name" semantic conventions. It represents the model
- // designation of the processor.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz'
- HostCPUModelNameKey = attribute.Key("host.cpu.model.name")
-
- // HostCPUSteppingKey is the attribute Key conforming to the
- // "host.cpu.stepping" semantic conventions. It represents the stepping or
- // core revisions.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1
- HostCPUSteppingKey = attribute.Key("host.cpu.stepping")
-
- // HostCPUVendorIDKey is the attribute Key conforming to the
- // "host.cpu.vendor.id" semantic conventions. It represents the processor
- // manufacturer identifier. A maximum 12-character string.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'GenuineIntel'
- // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor
- // ID string in EBX, EDX and ECX registers. Writing these to memory in this
- // order results in a 12-character string.
- HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id")
-
- // HostIDKey is the attribute Key conforming to the "host.id" semantic
- // conventions. It represents the unique host ID. For Cloud, this must be
- // the instance_id assigned by the cloud provider. For non-containerized
- // systems, this should be the `machine-id`. See the table below for the
- // sources to use to determine the `machine-id` based on operating system.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'fdbf79e8af94cb7f9e8df36789187052'
- HostIDKey = attribute.Key("host.id")
-
- // HostImageIDKey is the attribute Key conforming to the "host.image.id"
- // semantic conventions. It represents the vM image ID or host OS image ID.
- // For Cloud, this value is from the provider.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'ami-07b06b442921831e5'
- HostImageIDKey = attribute.Key("host.image.id")
-
- // HostImageNameKey is the attribute Key conforming to the
- // "host.image.name" semantic conventions. It represents the name of the VM
- // image or OS install the host was instantiated from.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
- HostImageNameKey = attribute.Key("host.image.name")
-
- // HostImageVersionKey is the attribute Key conforming to the
- // "host.image.version" semantic conventions. It represents the version
- // string of the VM image or host OS as defined in [Version
- // Attributes](/docs/resource/README.md#version-attributes).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '0.1'
- HostImageVersionKey = attribute.Key("host.image.version")
-
- // HostIPKey is the attribute Key conforming to the "host.ip" semantic
- // conventions. It represents the available IP addresses of the host,
- // excluding loopback interfaces.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e'
- // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6
- // addresses MUST be specified in the [RFC
- // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format.
- HostIPKey = attribute.Key("host.ip")
-
- // HostMacKey is the attribute Key conforming to the "host.mac" semantic
- // conventions. It represents the available MAC addresses of the host,
- // excluding loopback interfaces.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F'
- // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal
- // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf):
- // as hyphen-separated octets in uppercase hexadecimal form from most to
- // least significant.
- HostMacKey = attribute.Key("host.mac")
-
- // HostNameKey is the attribute Key conforming to the "host.name" semantic
- // conventions. It represents the name of the host. On Unix systems, it may
- // contain what the hostname command returns, or the fully qualified
- // hostname, or another name specified by the user.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry-test'
- HostNameKey = attribute.Key("host.name")
-
- // HostTypeKey is the attribute Key conforming to the "host.type" semantic
- // conventions. It represents the type of host. For Cloud, this must be the
- // machine type.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'n1-standard-1'
- HostTypeKey = attribute.Key("host.type")
-)
-
-var (
- // AMD64
- HostArchAMD64 = HostArchKey.String("amd64")
- // ARM32
- HostArchARM32 = HostArchKey.String("arm32")
- // ARM64
- HostArchARM64 = HostArchKey.String("arm64")
- // Itanium
- HostArchIA64 = HostArchKey.String("ia64")
- // 32-bit PowerPC
- HostArchPPC32 = HostArchKey.String("ppc32")
- // 64-bit PowerPC
- HostArchPPC64 = HostArchKey.String("ppc64")
- // IBM z/Architecture
- HostArchS390x = HostArchKey.String("s390x")
- // 32-bit x86
- HostArchX86 = HostArchKey.String("x86")
-)
-
-// HostCPUCacheL2Size returns an attribute KeyValue conforming to the
-// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of
-// level 2 memory cache available to the processor (in Bytes).
-func HostCPUCacheL2Size(val int) attribute.KeyValue {
- return HostCPUCacheL2SizeKey.Int(val)
-}
-
-// HostCPUFamily returns an attribute KeyValue conforming to the
-// "host.cpu.family" semantic conventions. It represents the family or
-// generation of the CPU.
-func HostCPUFamily(val string) attribute.KeyValue {
- return HostCPUFamilyKey.String(val)
-}
-
-// HostCPUModelID returns an attribute KeyValue conforming to the
-// "host.cpu.model.id" semantic conventions. It represents the model
-// identifier. It provides more granular information about the CPU,
-// distinguishing it from other CPUs within the same family.
-func HostCPUModelID(val string) attribute.KeyValue {
- return HostCPUModelIDKey.String(val)
-}
-
-// HostCPUModelName returns an attribute KeyValue conforming to the
-// "host.cpu.model.name" semantic conventions. It represents the model
-// designation of the processor.
-func HostCPUModelName(val string) attribute.KeyValue {
- return HostCPUModelNameKey.String(val)
-}
-
-// HostCPUStepping returns an attribute KeyValue conforming to the
-// "host.cpu.stepping" semantic conventions. It represents the stepping or core
-// revisions.
-func HostCPUStepping(val int) attribute.KeyValue {
- return HostCPUSteppingKey.Int(val)
-}
-
-// HostCPUVendorID returns an attribute KeyValue conforming to the
-// "host.cpu.vendor.id" semantic conventions. It represents the processor
-// manufacturer identifier. A maximum 12-character string.
-func HostCPUVendorID(val string) attribute.KeyValue {
- return HostCPUVendorIDKey.String(val)
-}
-
-// HostID returns an attribute KeyValue conforming to the "host.id" semantic
-// conventions. It represents the unique host ID. For Cloud, this must be the
-// instance_id assigned by the cloud provider. For non-containerized systems,
-// this should be the `machine-id`. See the table below for the sources to use
-// to determine the `machine-id` based on operating system.
-func HostID(val string) attribute.KeyValue {
- return HostIDKey.String(val)
-}
-
-// HostImageID returns an attribute KeyValue conforming to the
-// "host.image.id" semantic conventions. It represents the vM image ID or host
-// OS image ID. For Cloud, this value is from the provider.
-func HostImageID(val string) attribute.KeyValue {
- return HostImageIDKey.String(val)
-}
-
-// HostImageName returns an attribute KeyValue conforming to the
-// "host.image.name" semantic conventions. It represents the name of the VM
-// image or OS install the host was instantiated from.
-func HostImageName(val string) attribute.KeyValue {
- return HostImageNameKey.String(val)
-}
-
-// HostImageVersion returns an attribute KeyValue conforming to the
-// "host.image.version" semantic conventions. It represents the version string
-// of the VM image or host OS as defined in [Version
-// Attributes](/docs/resource/README.md#version-attributes).
-func HostImageVersion(val string) attribute.KeyValue {
- return HostImageVersionKey.String(val)
-}
-
-// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic
-// conventions. It represents the available IP addresses of the host, excluding
-// loopback interfaces.
-func HostIP(val ...string) attribute.KeyValue {
- return HostIPKey.StringSlice(val)
-}
-
-// HostMac returns an attribute KeyValue conforming to the "host.mac"
-// semantic conventions. It represents the available MAC addresses of the host,
-// excluding loopback interfaces.
-func HostMac(val ...string) attribute.KeyValue {
- return HostMacKey.StringSlice(val)
-}
-
-// HostName returns an attribute KeyValue conforming to the "host.name"
-// semantic conventions. It represents the name of the host. On Unix systems,
-// it may contain what the hostname command returns, or the fully qualified
-// hostname, or another name specified by the user.
-func HostName(val string) attribute.KeyValue {
- return HostNameKey.String(val)
-}
-
-// HostType returns an attribute KeyValue conforming to the "host.type"
-// semantic conventions. It represents the type of host. For Cloud, this must
-// be the machine type.
-func HostType(val string) attribute.KeyValue {
- return HostTypeKey.String(val)
-}
-
-// Kubernetes resource attributes.
-const (
- // K8SClusterNameKey is the attribute Key conforming to the
- // "k8s.cluster.name" semantic conventions. It represents the name of the
- // cluster.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry-cluster'
- K8SClusterNameKey = attribute.Key("k8s.cluster.name")
-
- // K8SClusterUIDKey is the attribute Key conforming to the
- // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for
- // the cluster, set to the UID of the `kube-system` namespace.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d'
- // Note: K8S doesn't have support for obtaining a cluster ID. If this is
- // ever
- // added, we will recommend collecting the `k8s.cluster.uid` through the
- // official APIs. In the meantime, we are able to use the `uid` of the
- // `kube-system` namespace as a proxy for cluster ID. Read on for the
- // rationale.
- //
- // Every object created in a K8S cluster is assigned a distinct UID. The
- // `kube-system` namespace is used by Kubernetes itself and will exist
- // for the lifetime of the cluster. Using the `uid` of the `kube-system`
- // namespace is a reasonable proxy for the K8S ClusterID as it will only
- // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are
- // UUIDs as standardized by
- // [ISO/IEC 9834-8 and ITU-T
- // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html).
- // Which states:
- //
- // > If generated according to one of the mechanisms defined in Rec.
- // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be
- // different from all other UUIDs generated before 3603 A.D., or is
- // extremely likely to be different (depending on the mechanism chosen).
- //
- // Therefore, UIDs between clusters should be extremely unlikely to
- // conflict.
- K8SClusterUIDKey = attribute.Key("k8s.cluster.uid")
-
- // K8SContainerNameKey is the attribute Key conforming to the
- // "k8s.container.name" semantic conventions. It represents the name of the
- // Container from Pod specification, must be unique within a Pod. Container
- // runtime usually uses different globally unique name (`container.name`).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'redis'
- K8SContainerNameKey = attribute.Key("k8s.container.name")
-
- // K8SContainerRestartCountKey is the attribute Key conforming to the
- // "k8s.container.restart_count" semantic conventions. It represents the
- // number of times the container was restarted. This attribute can be used
- // to identify a particular container (running or stopped) within a
- // container spec.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 0, 2
- K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
-
- // K8SCronJobNameKey is the attribute Key conforming to the
- // "k8s.cronjob.name" semantic conventions. It represents the name of the
- // CronJob.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry'
- K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
-
- // K8SCronJobUIDKey is the attribute Key conforming to the
- // "k8s.cronjob.uid" semantic conventions. It represents the UID of the
- // CronJob.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
-
- // K8SDaemonSetNameKey is the attribute Key conforming to the
- // "k8s.daemonset.name" semantic conventions. It represents the name of the
- // DaemonSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry'
- K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
-
- // K8SDaemonSetUIDKey is the attribute Key conforming to the
- // "k8s.daemonset.uid" semantic conventions. It represents the UID of the
- // DaemonSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
-
- // K8SDeploymentNameKey is the attribute Key conforming to the
- // "k8s.deployment.name" semantic conventions. It represents the name of
- // the Deployment.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry'
- K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
-
- // K8SDeploymentUIDKey is the attribute Key conforming to the
- // "k8s.deployment.uid" semantic conventions. It represents the UID of the
- // Deployment.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
-
- // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name"
- // semantic conventions. It represents the name of the Job.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry'
- K8SJobNameKey = attribute.Key("k8s.job.name")
-
- // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid"
- // semantic conventions. It represents the UID of the Job.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SJobUIDKey = attribute.Key("k8s.job.uid")
-
- // K8SNamespaceNameKey is the attribute Key conforming to the
- // "k8s.namespace.name" semantic conventions. It represents the name of the
- // namespace that the pod is running in.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'default'
- K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
-
- // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
- // semantic conventions. It represents the name of the Node.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'node-1'
- K8SNodeNameKey = attribute.Key("k8s.node.name")
-
- // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid"
- // semantic conventions. It represents the UID of the Node.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
- K8SNodeUIDKey = attribute.Key("k8s.node.uid")
-
- // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name"
- // semantic conventions. It represents the name of the Pod.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry-pod-autoconf'
- K8SPodNameKey = attribute.Key("k8s.pod.name")
-
- // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid"
- // semantic conventions. It represents the UID of the Pod.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SPodUIDKey = attribute.Key("k8s.pod.uid")
-
- // K8SReplicaSetNameKey is the attribute Key conforming to the
- // "k8s.replicaset.name" semantic conventions. It represents the name of
- // the ReplicaSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry'
- K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
-
- // K8SReplicaSetUIDKey is the attribute Key conforming to the
- // "k8s.replicaset.uid" semantic conventions. It represents the UID of the
- // ReplicaSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
-
- // K8SStatefulSetNameKey is the attribute Key conforming to the
- // "k8s.statefulset.name" semantic conventions. It represents the name of
- // the StatefulSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry'
- K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
-
- // K8SStatefulSetUIDKey is the attribute Key conforming to the
- // "k8s.statefulset.uid" semantic conventions. It represents the UID of the
- // StatefulSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
-)
-
-// K8SClusterName returns an attribute KeyValue conforming to the
-// "k8s.cluster.name" semantic conventions. It represents the name of the
-// cluster.
-func K8SClusterName(val string) attribute.KeyValue {
- return K8SClusterNameKey.String(val)
-}
-
-// K8SClusterUID returns an attribute KeyValue conforming to the
-// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the
-// cluster, set to the UID of the `kube-system` namespace.
-func K8SClusterUID(val string) attribute.KeyValue {
- return K8SClusterUIDKey.String(val)
-}
-
-// K8SContainerName returns an attribute KeyValue conforming to the
-// "k8s.container.name" semantic conventions. It represents the name of the
-// Container from Pod specification, must be unique within a Pod. Container
-// runtime usually uses different globally unique name (`container.name`).
-func K8SContainerName(val string) attribute.KeyValue {
- return K8SContainerNameKey.String(val)
-}
-
-// K8SContainerRestartCount returns an attribute KeyValue conforming to the
-// "k8s.container.restart_count" semantic conventions. It represents the number
-// of times the container was restarted. This attribute can be used to identify
-// a particular container (running or stopped) within a container spec.
-func K8SContainerRestartCount(val int) attribute.KeyValue {
- return K8SContainerRestartCountKey.Int(val)
-}
-
-// K8SCronJobName returns an attribute KeyValue conforming to the
-// "k8s.cronjob.name" semantic conventions. It represents the name of the
-// CronJob.
-func K8SCronJobName(val string) attribute.KeyValue {
- return K8SCronJobNameKey.String(val)
-}
-
-// K8SCronJobUID returns an attribute KeyValue conforming to the
-// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
-// CronJob.
-func K8SCronJobUID(val string) attribute.KeyValue {
- return K8SCronJobUIDKey.String(val)
-}
-
-// K8SDaemonSetName returns an attribute KeyValue conforming to the
-// "k8s.daemonset.name" semantic conventions. It represents the name of the
-// DaemonSet.
-func K8SDaemonSetName(val string) attribute.KeyValue {
- return K8SDaemonSetNameKey.String(val)
-}
-
-// K8SDaemonSetUID returns an attribute KeyValue conforming to the
-// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
-// DaemonSet.
-func K8SDaemonSetUID(val string) attribute.KeyValue {
- return K8SDaemonSetUIDKey.String(val)
-}
-
-// K8SDeploymentName returns an attribute KeyValue conforming to the
-// "k8s.deployment.name" semantic conventions. It represents the name of the
-// Deployment.
-func K8SDeploymentName(val string) attribute.KeyValue {
- return K8SDeploymentNameKey.String(val)
-}
-
-// K8SDeploymentUID returns an attribute KeyValue conforming to the
-// "k8s.deployment.uid" semantic conventions. It represents the UID of the
-// Deployment.
-func K8SDeploymentUID(val string) attribute.KeyValue {
- return K8SDeploymentUIDKey.String(val)
-}
-
-// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
-// semantic conventions. It represents the name of the Job.
-func K8SJobName(val string) attribute.KeyValue {
- return K8SJobNameKey.String(val)
-}
-
-// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
-// semantic conventions. It represents the UID of the Job.
-func K8SJobUID(val string) attribute.KeyValue {
- return K8SJobUIDKey.String(val)
-}
-
-// K8SNamespaceName returns an attribute KeyValue conforming to the
-// "k8s.namespace.name" semantic conventions. It represents the name of the
-// namespace that the pod is running in.
-func K8SNamespaceName(val string) attribute.KeyValue {
- return K8SNamespaceNameKey.String(val)
-}
-
-// K8SNodeName returns an attribute KeyValue conforming to the
-// "k8s.node.name" semantic conventions. It represents the name of the Node.
-func K8SNodeName(val string) attribute.KeyValue {
- return K8SNodeNameKey.String(val)
-}
-
-// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
-// semantic conventions. It represents the UID of the Node.
-func K8SNodeUID(val string) attribute.KeyValue {
- return K8SNodeUIDKey.String(val)
-}
-
-// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
-// semantic conventions. It represents the name of the Pod.
-func K8SPodName(val string) attribute.KeyValue {
- return K8SPodNameKey.String(val)
-}
-
-// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
-// semantic conventions. It represents the UID of the Pod.
-func K8SPodUID(val string) attribute.KeyValue {
- return K8SPodUIDKey.String(val)
-}
-
-// K8SReplicaSetName returns an attribute KeyValue conforming to the
-// "k8s.replicaset.name" semantic conventions. It represents the name of the
-// ReplicaSet.
-func K8SReplicaSetName(val string) attribute.KeyValue {
- return K8SReplicaSetNameKey.String(val)
-}
-
-// K8SReplicaSetUID returns an attribute KeyValue conforming to the
-// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
-// ReplicaSet.
-func K8SReplicaSetUID(val string) attribute.KeyValue {
- return K8SReplicaSetUIDKey.String(val)
-}
-
-// K8SStatefulSetName returns an attribute KeyValue conforming to the
-// "k8s.statefulset.name" semantic conventions. It represents the name of the
-// StatefulSet.
-func K8SStatefulSetName(val string) attribute.KeyValue {
- return K8SStatefulSetNameKey.String(val)
-}
-
-// K8SStatefulSetUID returns an attribute KeyValue conforming to the
-// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
-// StatefulSet.
-func K8SStatefulSetUID(val string) attribute.KeyValue {
- return K8SStatefulSetUIDKey.String(val)
-}
-
-// An OCI image manifest.
-const (
- // OciManifestDigestKey is the attribute Key conforming to the
- // "oci.manifest.digest" semantic conventions. It represents the digest of
- // the OCI image manifest. For container images specifically is the digest
- // by which the container image is known.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4'
- // Note: Follows [OCI Image Manifest
- // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md),
- // and specifically the [Digest
- // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests).
- // An example can be found in [Example Image
- // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest).
- OciManifestDigestKey = attribute.Key("oci.manifest.digest")
-)
-
-// OciManifestDigest returns an attribute KeyValue conforming to the
-// "oci.manifest.digest" semantic conventions. It represents the digest of the
-// OCI image manifest. For container images specifically is the digest by which
-// the container image is known.
-func OciManifestDigest(val string) attribute.KeyValue {
- return OciManifestDigestKey.String(val)
-}
-
-// The operating system (OS) on which the process represented by this resource
-// is running.
-const (
- // OSBuildIDKey is the attribute Key conforming to the "os.build_id"
- // semantic conventions. It represents the unique identifier for a
- // particular build or compilation of the operating system.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'TQ3C.230805.001.B2', '20E247', '22621'
- OSBuildIDKey = attribute.Key("os.build_id")
-
- // OSDescriptionKey is the attribute Key conforming to the "os.description"
- // semantic conventions. It represents the human readable (not intended to
- // be parsed) OS version information, like e.g. reported by `ver` or
- // `lsb_release -a` commands.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1
- // LTS'
- OSDescriptionKey = attribute.Key("os.description")
-
- // OSNameKey is the attribute Key conforming to the "os.name" semantic
- // conventions. It represents the human readable operating system name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'iOS', 'Android', 'Ubuntu'
- OSNameKey = attribute.Key("os.name")
-
- // OSTypeKey is the attribute Key conforming to the "os.type" semantic
- // conventions. It represents the operating system type.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- OSTypeKey = attribute.Key("os.type")
-
- // OSVersionKey is the attribute Key conforming to the "os.version"
- // semantic conventions. It represents the version string of the operating
- // system as defined in [Version
- // Attributes](/docs/resource/README.md#version-attributes).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '14.2.1', '18.04.1'
- OSVersionKey = attribute.Key("os.version")
-)
-
-var (
- // Microsoft Windows
- OSTypeWindows = OSTypeKey.String("windows")
- // Linux
- OSTypeLinux = OSTypeKey.String("linux")
- // Apple Darwin
- OSTypeDarwin = OSTypeKey.String("darwin")
- // FreeBSD
- OSTypeFreeBSD = OSTypeKey.String("freebsd")
- // NetBSD
- OSTypeNetBSD = OSTypeKey.String("netbsd")
- // OpenBSD
- OSTypeOpenBSD = OSTypeKey.String("openbsd")
- // DragonFly BSD
- OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
- // HP-UX (Hewlett Packard Unix)
- OSTypeHPUX = OSTypeKey.String("hpux")
- // AIX (Advanced Interactive eXecutive)
- OSTypeAIX = OSTypeKey.String("aix")
- // SunOS, Oracle Solaris
- OSTypeSolaris = OSTypeKey.String("solaris")
- // IBM z/OS
- OSTypeZOS = OSTypeKey.String("z_os")
-)
-
-// OSBuildID returns an attribute KeyValue conforming to the "os.build_id"
-// semantic conventions. It represents the unique identifier for a particular
-// build or compilation of the operating system.
-func OSBuildID(val string) attribute.KeyValue {
- return OSBuildIDKey.String(val)
-}
-
-// OSDescription returns an attribute KeyValue conforming to the
-// "os.description" semantic conventions. It represents the human readable (not
-// intended to be parsed) OS version information, like e.g. reported by `ver`
-// or `lsb_release -a` commands.
-func OSDescription(val string) attribute.KeyValue {
- return OSDescriptionKey.String(val)
-}
-
-// OSName returns an attribute KeyValue conforming to the "os.name" semantic
-// conventions. It represents the human readable operating system name.
-func OSName(val string) attribute.KeyValue {
- return OSNameKey.String(val)
-}
-
-// OSVersion returns an attribute KeyValue conforming to the "os.version"
-// semantic conventions. It represents the version string of the operating
-// system as defined in [Version
-// Attributes](/docs/resource/README.md#version-attributes).
-func OSVersion(val string) attribute.KeyValue {
- return OSVersionKey.String(val)
-}
-
-// An operating system process.
-const (
- // ProcessCommandKey is the attribute Key conforming to the
- // "process.command" semantic conventions. It represents the command used
- // to launch the process (i.e. the command name). On Linux based systems,
- // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can
- // be set to the first parameter extracted from `GetCommandLineW`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'cmd/otelcol'
- ProcessCommandKey = attribute.Key("process.command")
-
- // ProcessCommandArgsKey is the attribute Key conforming to the
- // "process.command_args" semantic conventions. It represents the all the
- // command arguments (including the command/executable itself) as received
- // by the process. On Linux-based systems (and some other Unixoid systems
- // supporting procfs), can be set according to the list of null-delimited
- // strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
- // this would be the full argv vector passed to `main`.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'cmd/otecol', '--config=config.yaml'
- ProcessCommandArgsKey = attribute.Key("process.command_args")
-
- // ProcessCommandLineKey is the attribute Key conforming to the
- // "process.command_line" semantic conventions. It represents the full
- // command used to launch the process as a single string representing the
- // full command. On Windows, can be set to the result of `GetCommandLineW`.
- // Do not set this if you have to assemble it just for monitoring; use
- // `process.command_args` instead.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
- ProcessCommandLineKey = attribute.Key("process.command_line")
-
- // ProcessExecutableNameKey is the attribute Key conforming to the
- // "process.executable.name" semantic conventions. It represents the name
- // of the process executable. On Linux based systems, can be set to the
- // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name
- // of `GetProcessImageFileNameW`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'otelcol'
- ProcessExecutableNameKey = attribute.Key("process.executable.name")
-
- // ProcessExecutablePathKey is the attribute Key conforming to the
- // "process.executable.path" semantic conventions. It represents the full
- // path to the process executable. On Linux based systems, can be set to
- // the target of `proc/[pid]/exe`. On Windows, can be set to the result of
- // `GetProcessImageFileNameW`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/usr/bin/cmd/otelcol'
- ProcessExecutablePathKey = attribute.Key("process.executable.path")
-
- // ProcessOwnerKey is the attribute Key conforming to the "process.owner"
- // semantic conventions. It represents the username of the user that owns
- // the process.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'root'
- ProcessOwnerKey = attribute.Key("process.owner")
-
- // ProcessParentPIDKey is the attribute Key conforming to the
- // "process.parent_pid" semantic conventions. It represents the parent
- // Process identifier (PPID).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 111
- ProcessParentPIDKey = attribute.Key("process.parent_pid")
-
- // ProcessPIDKey is the attribute Key conforming to the "process.pid"
- // semantic conventions. It represents the process identifier (PID).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1234
- ProcessPIDKey = attribute.Key("process.pid")
-
- // ProcessRuntimeDescriptionKey is the attribute Key conforming to the
- // "process.runtime.description" semantic conventions. It represents an
- // additional description about the runtime of the process, for example a
- // specific vendor customization of the runtime environment.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
- ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
-
- // ProcessRuntimeNameKey is the attribute Key conforming to the
- // "process.runtime.name" semantic conventions. It represents the name of
- // the runtime of this process. For compiled native binaries, this SHOULD
- // be the name of the compiler.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'OpenJDK Runtime Environment'
- ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
-
- // ProcessRuntimeVersionKey is the attribute Key conforming to the
- // "process.runtime.version" semantic conventions. It represents the
- // version of the runtime of this process, as returned by the runtime
- // without modification.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '14.0.2'
- ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
-)
-
-// ProcessCommand returns an attribute KeyValue conforming to the
-// "process.command" semantic conventions. It represents the command used to
-// launch the process (i.e. the command name). On Linux based systems, can be
-// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to
-// the first parameter extracted from `GetCommandLineW`.
-func ProcessCommand(val string) attribute.KeyValue {
- return ProcessCommandKey.String(val)
-}
-
-// ProcessCommandArgs returns an attribute KeyValue conforming to the
-// "process.command_args" semantic conventions. It represents the all the
-// command arguments (including the command/executable itself) as received by
-// the process. On Linux-based systems (and some other Unixoid systems
-// supporting procfs), can be set according to the list of null-delimited
-// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
-// this would be the full argv vector passed to `main`.
-func ProcessCommandArgs(val ...string) attribute.KeyValue {
- return ProcessCommandArgsKey.StringSlice(val)
-}
-
-// ProcessCommandLine returns an attribute KeyValue conforming to the
-// "process.command_line" semantic conventions. It represents the full command
-// used to launch the process as a single string representing the full command.
-// On Windows, can be set to the result of `GetCommandLineW`. Do not set this
-// if you have to assemble it just for monitoring; use `process.command_args`
-// instead.
-func ProcessCommandLine(val string) attribute.KeyValue {
- return ProcessCommandLineKey.String(val)
-}
-
-// ProcessExecutableName returns an attribute KeyValue conforming to the
-// "process.executable.name" semantic conventions. It represents the name of
-// the process executable. On Linux based systems, can be set to the `Name` in
-// `proc/[pid]/status`. On Windows, can be set to the base name of
-// `GetProcessImageFileNameW`.
-func ProcessExecutableName(val string) attribute.KeyValue {
- return ProcessExecutableNameKey.String(val)
-}
-
-// ProcessExecutablePath returns an attribute KeyValue conforming to the
-// "process.executable.path" semantic conventions. It represents the full path
-// to the process executable. On Linux based systems, can be set to the target
-// of `proc/[pid]/exe`. On Windows, can be set to the result of
-// `GetProcessImageFileNameW`.
-func ProcessExecutablePath(val string) attribute.KeyValue {
- return ProcessExecutablePathKey.String(val)
-}
-
-// ProcessOwner returns an attribute KeyValue conforming to the
-// "process.owner" semantic conventions. It represents the username of the user
-// that owns the process.
-func ProcessOwner(val string) attribute.KeyValue {
- return ProcessOwnerKey.String(val)
-}
-
-// ProcessParentPID returns an attribute KeyValue conforming to the
-// "process.parent_pid" semantic conventions. It represents the parent Process
-// identifier (PPID).
-func ProcessParentPID(val int) attribute.KeyValue {
- return ProcessParentPIDKey.Int(val)
-}
-
-// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
-// semantic conventions. It represents the process identifier (PID).
-func ProcessPID(val int) attribute.KeyValue {
- return ProcessPIDKey.Int(val)
-}
-
-// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
-// "process.runtime.description" semantic conventions. It represents an
-// additional description about the runtime of the process, for example a
-// specific vendor customization of the runtime environment.
-func ProcessRuntimeDescription(val string) attribute.KeyValue {
- return ProcessRuntimeDescriptionKey.String(val)
-}
-
-// ProcessRuntimeName returns an attribute KeyValue conforming to the
-// "process.runtime.name" semantic conventions. It represents the name of the
-// runtime of this process. For compiled native binaries, this SHOULD be the
-// name of the compiler.
-func ProcessRuntimeName(val string) attribute.KeyValue {
- return ProcessRuntimeNameKey.String(val)
-}
-
-// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
-// "process.runtime.version" semantic conventions. It represents the version of
-// the runtime of this process, as returned by the runtime without
-// modification.
-func ProcessRuntimeVersion(val string) attribute.KeyValue {
- return ProcessRuntimeVersionKey.String(val)
-}
-
-// The Android platform on which the Android application is running.
-const (
- // AndroidOSAPILevelKey is the attribute Key conforming to the
- // "android.os.api_level" semantic conventions. It represents the uniquely
- // identifies the framework API revision offered by a version
- // (`os.version`) of the android operating system. More information can be
- // found
- // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '33', '32'
- AndroidOSAPILevelKey = attribute.Key("android.os.api_level")
-)
-
-// AndroidOSAPILevel returns an attribute KeyValue conforming to the
-// "android.os.api_level" semantic conventions. It represents the uniquely
-// identifies the framework API revision offered by a version (`os.version`) of
-// the android operating system. More information can be found
-// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels).
-func AndroidOSAPILevel(val string) attribute.KeyValue {
- return AndroidOSAPILevelKey.String(val)
-}
-
-// The web browser in which the application represented by the resource is
-// running. The `browser.*` attributes MUST be used only for resources that
-// represent applications running in a web browser (regardless of whether
-// running on a mobile or desktop device).
-const (
- // BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
- // semantic conventions. It represents the array of brand name and version
- // separated by a space
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
- // Note: This value is intended to be taken from the [UA client hints
- // API](https://wicg.github.io/ua-client-hints/#interface)
- // (`navigator.userAgentData.brands`).
- BrowserBrandsKey = attribute.Key("browser.brands")
-
- // BrowserLanguageKey is the attribute Key conforming to the
- // "browser.language" semantic conventions. It represents the preferred
- // language of the user using the browser
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'en', 'en-US', 'fr', 'fr-FR'
- // Note: This value is intended to be taken from the Navigator API
- // `navigator.language`.
- BrowserLanguageKey = attribute.Key("browser.language")
-
- // BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
- // semantic conventions. It represents a boolean that is true if the
- // browser is running on a mobile device
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: This value is intended to be taken from the [UA client hints
- // API](https://wicg.github.io/ua-client-hints/#interface)
- // (`navigator.userAgentData.mobile`). If unavailable, this attribute
- // SHOULD be left unset.
- BrowserMobileKey = attribute.Key("browser.mobile")
-
- // BrowserPlatformKey is the attribute Key conforming to the
- // "browser.platform" semantic conventions. It represents the platform on
- // which the browser is running
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Windows', 'macOS', 'Android'
- // Note: This value is intended to be taken from the [UA client hints
- // API](https://wicg.github.io/ua-client-hints/#interface)
- // (`navigator.userAgentData.platform`). If unavailable, the legacy
- // `navigator.platform` API SHOULD NOT be used instead and this attribute
- // SHOULD be left unset in order for the values to be consistent.
- // The list of possible values is defined in the [W3C User-Agent Client
- // Hints
- // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
- // Note that some (but not all) of these values can overlap with values in
- // the [`os.type` and `os.name` attributes](./os.md). However, for
- // consistency, the values in the `browser.platform` attribute should
- // capture the exact value that the user agent provides.
- BrowserPlatformKey = attribute.Key("browser.platform")
-)
-
-// BrowserBrands returns an attribute KeyValue conforming to the
-// "browser.brands" semantic conventions. It represents the array of brand name
-// and version separated by a space
-func BrowserBrands(val ...string) attribute.KeyValue {
- return BrowserBrandsKey.StringSlice(val)
-}
-
-// BrowserLanguage returns an attribute KeyValue conforming to the
-// "browser.language" semantic conventions. It represents the preferred
-// language of the user using the browser
-func BrowserLanguage(val string) attribute.KeyValue {
- return BrowserLanguageKey.String(val)
-}
-
-// BrowserMobile returns an attribute KeyValue conforming to the
-// "browser.mobile" semantic conventions. It represents a boolean that is true
-// if the browser is running on a mobile device
-func BrowserMobile(val bool) attribute.KeyValue {
- return BrowserMobileKey.Bool(val)
-}
-
-// BrowserPlatform returns an attribute KeyValue conforming to the
-// "browser.platform" semantic conventions. It represents the platform on which
-// the browser is running
-func BrowserPlatform(val string) attribute.KeyValue {
- return BrowserPlatformKey.String(val)
-}
-
-// Resources used by AWS Elastic Container Service (ECS).
-const (
- // AWSECSClusterARNKey is the attribute Key conforming to the
- // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
- // [ECS
- // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
- AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
-
- // AWSECSContainerARNKey is the attribute Key conforming to the
- // "aws.ecs.container.arn" semantic conventions. It represents the Amazon
- // Resource Name (ARN) of an [ECS container
- // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
- AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
-
- // AWSECSLaunchtypeKey is the attribute Key conforming to the
- // "aws.ecs.launchtype" semantic conventions. It represents the [launch
- // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html)
- // for an ECS task.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
-
- // AWSECSTaskARNKey is the attribute Key conforming to the
- // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an
- // [ECS task
- // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b'
- AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
-
- // AWSECSTaskFamilyKey is the attribute Key conforming to the
- // "aws.ecs.task.family" semantic conventions. It represents the task
- // definition family this task definition is a member of.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry-family'
- AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
-
- // AWSECSTaskRevisionKey is the attribute Key conforming to the
- // "aws.ecs.task.revision" semantic conventions. It represents the revision
- // for this task definition.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '8', '26'
- AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
-)
-
-var (
- // ec2
- AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
- // fargate
- AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
-)
-
-// AWSECSClusterARN returns an attribute KeyValue conforming to the
-// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS
-// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
-func AWSECSClusterARN(val string) attribute.KeyValue {
- return AWSECSClusterARNKey.String(val)
-}
-
-// AWSECSContainerARN returns an attribute KeyValue conforming to the
-// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
-// Resource Name (ARN) of an [ECS container
-// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
-func AWSECSContainerARN(val string) attribute.KeyValue {
- return AWSECSContainerARNKey.String(val)
-}
-
-// AWSECSTaskARN returns an attribute KeyValue conforming to the
-// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS
-// task
-// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
-func AWSECSTaskARN(val string) attribute.KeyValue {
- return AWSECSTaskARNKey.String(val)
-}
-
-// AWSECSTaskFamily returns an attribute KeyValue conforming to the
-// "aws.ecs.task.family" semantic conventions. It represents the task
-// definition family this task definition is a member of.
-func AWSECSTaskFamily(val string) attribute.KeyValue {
- return AWSECSTaskFamilyKey.String(val)
-}
-
-// AWSECSTaskRevision returns an attribute KeyValue conforming to the
-// "aws.ecs.task.revision" semantic conventions. It represents the revision for
-// this task definition.
-func AWSECSTaskRevision(val string) attribute.KeyValue {
- return AWSECSTaskRevisionKey.String(val)
-}
-
-// Resources used by AWS Elastic Kubernetes Service (EKS).
-const (
- // AWSEKSClusterARNKey is the attribute Key conforming to the
- // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an
- // EKS cluster.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
- AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
-)
-
-// AWSEKSClusterARN returns an attribute KeyValue conforming to the
-// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
-// cluster.
-func AWSEKSClusterARN(val string) attribute.KeyValue {
- return AWSEKSClusterARNKey.String(val)
-}
-
-// Resources specific to Amazon Web Services.
-const (
- // AWSLogGroupARNsKey is the attribute Key conforming to the
- // "aws.log.group.arns" semantic conventions. It represents the Amazon
- // Resource Name(s) (ARN) of the AWS log group(s).
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
- // Note: See the [log group ARN format
- // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
- AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
-
- // AWSLogGroupNamesKey is the attribute Key conforming to the
- // "aws.log.group.names" semantic conventions. It represents the name(s) of
- // the AWS log group(s) an application is writing to.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/aws/lambda/my-function', 'opentelemetry-service'
- // Note: Multiple log groups must be supported for cases like
- // multi-container applications, where a single application has sidecar
- // containers, and each write to their own log group.
- AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
-
- // AWSLogStreamARNsKey is the attribute Key conforming to the
- // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of
- // the AWS log stream(s).
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
- // Note: See the [log stream ARN format
- // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
- // One log group can contain several log streams, so these ARNs necessarily
- // identify both a log group and a log stream.
- AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
-
- // AWSLogStreamNamesKey is the attribute Key conforming to the
- // "aws.log.stream.names" semantic conventions. It represents the name(s)
- // of the AWS log stream(s) an application is writing to.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
- AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
-)
-
-// AWSLogGroupARNs returns an attribute KeyValue conforming to the
-// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
-// Name(s) (ARN) of the AWS log group(s).
-func AWSLogGroupARNs(val ...string) attribute.KeyValue {
- return AWSLogGroupARNsKey.StringSlice(val)
-}
-
-// AWSLogGroupNames returns an attribute KeyValue conforming to the
-// "aws.log.group.names" semantic conventions. It represents the name(s) of the
-// AWS log group(s) an application is writing to.
-func AWSLogGroupNames(val ...string) attribute.KeyValue {
- return AWSLogGroupNamesKey.StringSlice(val)
-}
-
-// AWSLogStreamARNs returns an attribute KeyValue conforming to the
-// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
-// AWS log stream(s).
-func AWSLogStreamARNs(val ...string) attribute.KeyValue {
- return AWSLogStreamARNsKey.StringSlice(val)
-}
-
-// AWSLogStreamNames returns an attribute KeyValue conforming to the
-// "aws.log.stream.names" semantic conventions. It represents the name(s) of
-// the AWS log stream(s) an application is writing to.
-func AWSLogStreamNames(val ...string) attribute.KeyValue {
- return AWSLogStreamNamesKey.StringSlice(val)
-}
-
-// Resource used by Google Cloud Run.
-const (
- // GCPCloudRunJobExecutionKey is the attribute Key conforming to the
- // "gcp.cloud_run.job.execution" semantic conventions. It represents the
- // name of the Cloud Run
- // [execution](https://cloud.google.com/run/docs/managing/job-executions)
- // being run for the Job, as set by the
- // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
- // environment variable.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'job-name-xxxx', 'sample-job-mdw84'
- GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution")
-
- // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the
- // "gcp.cloud_run.job.task_index" semantic conventions. It represents the
- // index for a task within an execution as provided by the
- // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
- // environment variable.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 0, 1
- GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index")
-)
-
-// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the
-// "gcp.cloud_run.job.execution" semantic conventions. It represents the name
-// of the Cloud Run
-// [execution](https://cloud.google.com/run/docs/managing/job-executions) being
-// run for the Job, as set by the
-// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
-// environment variable.
-func GCPCloudRunJobExecution(val string) attribute.KeyValue {
- return GCPCloudRunJobExecutionKey.String(val)
-}
-
-// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the
-// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index
-// for a task within an execution as provided by the
-// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
-// environment variable.
-func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue {
- return GCPCloudRunJobTaskIndexKey.Int(val)
-}
-
-// Resources used by Google Compute Engine (GCE).
-const (
- // GCPGceInstanceHostnameKey is the attribute Key conforming to the
- // "gcp.gce.instance.hostname" semantic conventions. It represents the
- // hostname of a GCE instance. This is the full value of the default or
- // [custom
- // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'my-host1234.example.com',
- // 'sample-vm.us-west1-b.c.my-project.internal'
- GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname")
-
- // GCPGceInstanceNameKey is the attribute Key conforming to the
- // "gcp.gce.instance.name" semantic conventions. It represents the instance
- // name of a GCE instance. This is the value provided by `host.name`, the
- // visible name of the instance in the Cloud Console UI, and the prefix for
- // the default hostname of the instance as defined by the [default internal
- // DNS
- // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'instance-1', 'my-vm-name'
- GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name")
-)
-
-// GCPGceInstanceHostname returns an attribute KeyValue conforming to the
-// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname
-// of a GCE instance. This is the full value of the default or [custom
-// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
-func GCPGceInstanceHostname(val string) attribute.KeyValue {
- return GCPGceInstanceHostnameKey.String(val)
-}
-
-// GCPGceInstanceName returns an attribute KeyValue conforming to the
-// "gcp.gce.instance.name" semantic conventions. It represents the instance
-// name of a GCE instance. This is the value provided by `host.name`, the
-// visible name of the instance in the Cloud Console UI, and the prefix for the
-// default hostname of the instance as defined by the [default internal DNS
-// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
-func GCPGceInstanceName(val string) attribute.KeyValue {
- return GCPGceInstanceNameKey.String(val)
-}
-
-// Heroku dyno metadata
-const (
- // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id"
- // semantic conventions. It represents the unique identifier for the
- // application
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da'
- HerokuAppIDKey = attribute.Key("heroku.app.id")
-
- // HerokuReleaseCommitKey is the attribute Key conforming to the
- // "heroku.release.commit" semantic conventions. It represents the commit
- // hash for the current release
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec'
- HerokuReleaseCommitKey = attribute.Key("heroku.release.commit")
-
- // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the
- // "heroku.release.creation_timestamp" semantic conventions. It represents
- // the time and date the release was created
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2022-10-23T18:00:42Z'
- HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp")
-)
-
-// HerokuAppID returns an attribute KeyValue conforming to the
-// "heroku.app.id" semantic conventions. It represents the unique identifier
-// for the application
-func HerokuAppID(val string) attribute.KeyValue {
- return HerokuAppIDKey.String(val)
-}
-
-// HerokuReleaseCommit returns an attribute KeyValue conforming to the
-// "heroku.release.commit" semantic conventions. It represents the commit hash
-// for the current release
-func HerokuReleaseCommit(val string) attribute.KeyValue {
- return HerokuReleaseCommitKey.String(val)
-}
-
-// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming
-// to the "heroku.release.creation_timestamp" semantic conventions. It
-// represents the time and date the release was created
-func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue {
- return HerokuReleaseCreationTimestampKey.String(val)
-}
-
-// The software deployment.
-const (
- // DeploymentEnvironmentKey is the attribute Key conforming to the
- // "deployment.environment" semantic conventions. It represents the name of
- // the [deployment
- // environment](https://wikipedia.org/wiki/Deployment_environment) (aka
- // deployment tier).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'staging', 'production'
- // Note: `deployment.environment` does not affect the uniqueness
- // constraints defined through
- // the `service.namespace`, `service.name` and `service.instance.id`
- // resource attributes.
- // This implies that resources carrying the following attribute
- // combinations MUST be
- // considered to be identifying the same service:
- //
- // * `service.name=frontend`, `deployment.environment=production`
- // * `service.name=frontend`, `deployment.environment=staging`.
- DeploymentEnvironmentKey = attribute.Key("deployment.environment")
-)
-
-// DeploymentEnvironment returns an attribute KeyValue conforming to the
-// "deployment.environment" semantic conventions. It represents the name of the
-// [deployment environment](https://wikipedia.org/wiki/Deployment_environment)
-// (aka deployment tier).
-func DeploymentEnvironment(val string) attribute.KeyValue {
- return DeploymentEnvironmentKey.String(val)
-}
-
-// A serverless instance.
-const (
- // FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
- // semantic conventions. It represents the execution environment ID as a
- // string, that will be potentially reused for other invocations to the
- // same function/function version.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
- // Note: * **AWS Lambda:** Use the (full) log stream name.
- FaaSInstanceKey = attribute.Key("faas.instance")
-
- // FaaSMaxMemoryKey is the attribute Key conforming to the
- // "faas.max_memory" semantic conventions. It represents the amount of
- // memory available to the serverless function converted to Bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 134217728
- // Note: It's recommended to set this attribute since e.g. too little
- // memory can easily stop a Java AWS Lambda function from working
- // correctly. On AWS Lambda, the environment variable
- // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must
- // be multiplied by 1,048,576).
- FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
-
- // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
- // conventions. It represents the name of the single function that this
- // runtime instance executes.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: experimental
- // Examples: 'my-function', 'myazurefunctionapp/some-function-name'
- // Note: This is the name of the function as configured/deployed on the
- // FaaS
- // platform and is usually different from the name of the callback
- // function (which may be stored in the
- // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes)
- // span attributes).
- //
- // For some cloud providers, the above definition is ambiguous. The
- // following
- // definition of function name MUST be used for this attribute
- // (and consequently the span name) for the listed cloud
- // providers/products:
- //
- // * **Azure:** The full name `<FUNCAPP>/<FUNC>`, i.e., function app name
- // followed by a forward slash followed by the function name (this form
- // can also be seen in the resource JSON for the function).
- // This means that a span attribute MUST be used, as an Azure function
- // app can host multiple functions that would usually share
- // a TracerProvider (see also the `cloud.resource_id` attribute).
- FaaSNameKey = attribute.Key("faas.name")
-
- // FaaSVersionKey is the attribute Key conforming to the "faas.version"
- // semantic conventions. It represents the immutable version of the
- // function being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '26', 'pinkfroid-00002'
- // Note: Depending on the cloud provider and platform, use:
- //
- // * **AWS Lambda:** The [function
- // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html)
- // (an integer represented as a decimal string).
- // * **Google Cloud Run (Services):** The
- // [revision](https://cloud.google.com/run/docs/managing/revisions)
- // (i.e., the function name plus the revision suffix).
- // * **Google Cloud Functions:** The value of the
- // [`K_REVISION` environment
- // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically).
- // * **Azure Functions:** Not applicable. Do not set this attribute.
- FaaSVersionKey = attribute.Key("faas.version")
-)
-
-// FaaSInstance returns an attribute KeyValue conforming to the
-// "faas.instance" semantic conventions. It represents the execution
-// environment ID as a string, that will be potentially reused for other
-// invocations to the same function/function version.
-func FaaSInstance(val string) attribute.KeyValue {
- return FaaSInstanceKey.String(val)
-}
-
-// FaaSMaxMemory returns an attribute KeyValue conforming to the
-// "faas.max_memory" semantic conventions. It represents the amount of memory
-// available to the serverless function converted to Bytes.
-func FaaSMaxMemory(val int) attribute.KeyValue {
- return FaaSMaxMemoryKey.Int(val)
-}
-
-// FaaSName returns an attribute KeyValue conforming to the "faas.name"
-// semantic conventions. It represents the name of the single function that
-// this runtime instance executes.
-func FaaSName(val string) attribute.KeyValue {
- return FaaSNameKey.String(val)
-}
-
-// FaaSVersion returns an attribute KeyValue conforming to the
-// "faas.version" semantic conventions. It represents the immutable version of
-// the function being executed.
-func FaaSVersion(val string) attribute.KeyValue {
- return FaaSVersionKey.String(val)
-}
-
-// A service instance.
-const (
- // ServiceNameKey is the attribute Key conforming to the "service.name"
- // semantic conventions. It represents the logical name of the service.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: experimental
- // Examples: 'shoppingcart'
- // Note: MUST be the same for all instances of horizontally scaled
- // services. If the value was not specified, SDKs MUST fallback to
- // `unknown_service:` concatenated with
- // [`process.executable.name`](process.md#process), e.g.
- // `unknown_service:bash`. If `process.executable.name` is not available,
- // the value MUST be set to `unknown_service`.
- ServiceNameKey = attribute.Key("service.name")
-
- // ServiceVersionKey is the attribute Key conforming to the
- // "service.version" semantic conventions. It represents the version string
- // of the service API or implementation. The format is not defined by these
- // conventions.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2.0.0', 'a01dbef8a'
- ServiceVersionKey = attribute.Key("service.version")
-)
-
-// ServiceName returns an attribute KeyValue conforming to the
-// "service.name" semantic conventions. It represents the logical name of the
-// service.
-func ServiceName(val string) attribute.KeyValue {
- return ServiceNameKey.String(val)
-}
-
-// ServiceVersion returns an attribute KeyValue conforming to the
-// "service.version" semantic conventions. It represents the version string of
-// the service API or implementation. The format is not defined by these
-// conventions.
-func ServiceVersion(val string) attribute.KeyValue {
- return ServiceVersionKey.String(val)
-}
-
-// A service instance.
-const (
- // ServiceInstanceIDKey is the attribute Key conforming to the
- // "service.instance.id" semantic conventions. It represents the string ID
- // of the service instance.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'my-k8s-pod-deployment-1',
- // '627cc493-f310-47de-96bd-71410b7dec09'
- // Note: MUST be unique for each instance of the same
- // `service.namespace,service.name` pair (in other words
- // `service.namespace,service.name,service.instance.id` triplet MUST be
- // globally unique). The ID helps to distinguish instances of the same
- // service that exist at the same time (e.g. instances of a horizontally
- // scaled service). It is preferable for the ID to be persistent and stay
- // the same for the lifetime of the service instance, however it is
- // acceptable that the ID is ephemeral and changes during important
- // lifetime events for the service (e.g. service restarts). If the service
- // has no inherent unique ID that can be used as the value of this
- // attribute it is recommended to generate a random Version 1 or Version 4
- // RFC 4122 UUID (services aiming for reproducible UUIDs may also use
- // Version 5, see RFC 4122 for more recommendations).
- ServiceInstanceIDKey = attribute.Key("service.instance.id")
-
- // ServiceNamespaceKey is the attribute Key conforming to the
- // "service.namespace" semantic conventions. It represents a namespace for
- // `service.name`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Shop'
- // Note: A string value having a meaning that helps to distinguish a group
- // of services, for example the team name that owns a group of services.
- // `service.name` is expected to be unique within the same namespace. If
- // `service.namespace` is not specified in the Resource then `service.name`
- // is expected to be unique for all services that have no explicit
- // namespace defined (so the empty/unspecified namespace is simply one more
- // valid namespace). Zero-length namespace string is assumed equal to
- // unspecified namespace.
- ServiceNamespaceKey = attribute.Key("service.namespace")
-)
-
-// ServiceInstanceID returns an attribute KeyValue conforming to the
-// "service.instance.id" semantic conventions. It represents the string ID of
-// the service instance.
-func ServiceInstanceID(val string) attribute.KeyValue {
- return ServiceInstanceIDKey.String(val)
-}
-
-// ServiceNamespace returns an attribute KeyValue conforming to the
-// "service.namespace" semantic conventions. It represents a namespace for
-// `service.name`.
-func ServiceNamespace(val string) attribute.KeyValue {
- return ServiceNamespaceKey.String(val)
-}
-
-// The telemetry SDK used to capture data recorded by the instrumentation
-// libraries.
-const (
- // TelemetrySDKLanguageKey is the attribute Key conforming to the
- // "telemetry.sdk.language" semantic conventions. It represents the
- // language of the telemetry SDK.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: experimental
- TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
-
- // TelemetrySDKNameKey is the attribute Key conforming to the
- // "telemetry.sdk.name" semantic conventions. It represents the name of the
- // telemetry SDK as defined above.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: experimental
- // Examples: 'opentelemetry'
- // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute
- // to `opentelemetry`.
- // If another SDK, like a fork or a vendor-provided implementation, is
- // used, this SDK MUST set the
- // `telemetry.sdk.name` attribute to the fully-qualified class or module
- // name of this SDK's main entry point
- // or another suitable identifier depending on the language.
- // The identifier `opentelemetry` is reserved and MUST NOT be used in this
- // case.
- // All custom identifiers SHOULD be stable across different versions of an
- // implementation.
- TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
-
- // TelemetrySDKVersionKey is the attribute Key conforming to the
- // "telemetry.sdk.version" semantic conventions. It represents the version
- // string of the telemetry SDK.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: experimental
- // Examples: '1.2.3'
- TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
-)
-
-var (
- // cpp
- TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
- // dotnet
- TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
- // erlang
- TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
- // go
- TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
- // java
- TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
- // nodejs
- TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
- // php
- TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
- // python
- TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
- // ruby
- TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
- // rust
- TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust")
- // swift
- TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
- // webjs
- TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
-)
-
-// TelemetrySDKName returns an attribute KeyValue conforming to the
-// "telemetry.sdk.name" semantic conventions. It represents the name of the
-// telemetry SDK as defined above.
-func TelemetrySDKName(val string) attribute.KeyValue {
- return TelemetrySDKNameKey.String(val)
-}
-
-// TelemetrySDKVersion returns an attribute KeyValue conforming to the
-// "telemetry.sdk.version" semantic conventions. It represents the version
-// string of the telemetry SDK.
-func TelemetrySDKVersion(val string) attribute.KeyValue {
- return TelemetrySDKVersionKey.String(val)
-}
-
-// The telemetry SDK used to capture data recorded by the instrumentation
-// libraries.
-const (
- // TelemetryDistroNameKey is the attribute Key conforming to the
- // "telemetry.distro.name" semantic conventions. It represents the name of
- // the auto instrumentation agent or distribution, if used.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'parts-unlimited-java'
- // Note: Official auto instrumentation agents and distributions SHOULD set
- // the `telemetry.distro.name` attribute to
- // a string starting with `opentelemetry-`, e.g.
- // `opentelemetry-java-instrumentation`.
- TelemetryDistroNameKey = attribute.Key("telemetry.distro.name")
-
- // TelemetryDistroVersionKey is the attribute Key conforming to the
- // "telemetry.distro.version" semantic conventions. It represents the
- // version string of the auto instrumentation agent or distribution, if
- // used.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1.2.3'
- TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version")
-)
-
-// TelemetryDistroName returns an attribute KeyValue conforming to the
-// "telemetry.distro.name" semantic conventions. It represents the name of the
-// auto instrumentation agent or distribution, if used.
-func TelemetryDistroName(val string) attribute.KeyValue {
- return TelemetryDistroNameKey.String(val)
-}
-
-// TelemetryDistroVersion returns an attribute KeyValue conforming to the
-// "telemetry.distro.version" semantic conventions. It represents the version
-// string of the auto instrumentation agent or distribution, if used.
-func TelemetryDistroVersion(val string) attribute.KeyValue {
- return TelemetryDistroVersionKey.String(val)
-}
-
-// Resource describing the packaged software running the application code. Web
-// engines are typically executed using process.runtime.
-const (
- // WebEngineDescriptionKey is the attribute Key conforming to the
- // "webengine.description" semantic conventions. It represents the
- // additional description of the web engine (e.g. detailed version and
- // edition information).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
- // 2.2.2.Final'
- WebEngineDescriptionKey = attribute.Key("webengine.description")
-
- // WebEngineNameKey is the attribute Key conforming to the "webengine.name"
- // semantic conventions. It represents the name of the web engine.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: experimental
- // Examples: 'WildFly'
- WebEngineNameKey = attribute.Key("webengine.name")
-
- // WebEngineVersionKey is the attribute Key conforming to the
- // "webengine.version" semantic conventions. It represents the version of
- // the web engine.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '21.0.0'
- WebEngineVersionKey = attribute.Key("webengine.version")
-)
-
-// WebEngineDescription returns an attribute KeyValue conforming to the
-// "webengine.description" semantic conventions. It represents the additional
-// description of the web engine (e.g. detailed version and edition
-// information).
-func WebEngineDescription(val string) attribute.KeyValue {
- return WebEngineDescriptionKey.String(val)
-}
-
-// WebEngineName returns an attribute KeyValue conforming to the
-// "webengine.name" semantic conventions. It represents the name of the web
-// engine.
-func WebEngineName(val string) attribute.KeyValue {
- return WebEngineNameKey.String(val)
-}
-
-// WebEngineVersion returns an attribute KeyValue conforming to the
-// "webengine.version" semantic conventions. It represents the version of the
-// web engine.
-func WebEngineVersion(val string) attribute.KeyValue {
- return WebEngineVersionKey.String(val)
-}
-
-// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's
-// concepts.
-const (
- // OTelScopeNameKey is the attribute Key conforming to the
- // "otel.scope.name" semantic conventions. It represents the name of the
- // instrumentation scope - (`InstrumentationScope.Name` in OTLP).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'io.opentelemetry.contrib.mongodb'
- OTelScopeNameKey = attribute.Key("otel.scope.name")
-
- // OTelScopeVersionKey is the attribute Key conforming to the
- // "otel.scope.version" semantic conventions. It represents the version of
- // the instrumentation scope - (`InstrumentationScope.Version` in OTLP).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1.0.0'
- OTelScopeVersionKey = attribute.Key("otel.scope.version")
-)
-
-// OTelScopeName returns an attribute KeyValue conforming to the
-// "otel.scope.name" semantic conventions. It represents the name of the
-// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
-func OTelScopeName(val string) attribute.KeyValue {
- return OTelScopeNameKey.String(val)
-}
-
-// OTelScopeVersion returns an attribute KeyValue conforming to the
-// "otel.scope.version" semantic conventions. It represents the version of the
-// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
-func OTelScopeVersion(val string) attribute.KeyValue {
- return OTelScopeVersionKey.String(val)
-}
-
-// Span attributes used by non-OTLP exporters to represent OpenTelemetry
-// Scope's concepts.
-const (
- // OTelLibraryNameKey is the attribute Key conforming to the
- // "otel.library.name" semantic conventions.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: deprecated
- // Examples: 'io.opentelemetry.contrib.mongodb'
- // Deprecated: use the `otel.scope.name` attribute.
- OTelLibraryNameKey = attribute.Key("otel.library.name")
-
- // OTelLibraryVersionKey is the attribute Key conforming to the
- // "otel.library.version" semantic conventions.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: deprecated
- // Examples: '1.0.0'
- // Deprecated: use the `otel.scope.version` attribute.
- OTelLibraryVersionKey = attribute.Key("otel.library.version")
-)
-
-// OTelLibraryName returns an attribute KeyValue conforming to the
-// "otel.library.name" semantic conventions.
-//
-// Deprecated: use the `otel.scope.name` attribute.
-func OTelLibraryName(val string) attribute.KeyValue {
- return OTelLibraryNameKey.String(val)
-}
-
-// OTelLibraryVersion returns an attribute KeyValue conforming to the
-// "otel.library.version" semantic conventions.
-//
-// Deprecated: use the `otel.scope.version` attribute.
-func OTelLibraryVersion(val string) attribute.KeyValue {
- return OTelLibraryVersionKey.String(val)
-}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go
deleted file mode 100644
index fe80b1731..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
-
-// SchemaURL is the schema URL that matches the version of the semantic conventions
-// that this package defines. Semconv packages starting from v1.4.0 must declare
-// non-empty schema URL in the form https://opentelemetry.io/schemas/<version>
-const SchemaURL = "https://opentelemetry.io/schemas/1.24.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go
deleted file mode 100644
index c1718234e..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go
+++ /dev/null
@@ -1,1323 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// Operations that access some remote service.
-const (
- // PeerServiceKey is the attribute Key conforming to the "peer.service"
- // semantic conventions. It represents the
- // [`service.name`](/docs/resource/README.md#service) of the remote
- // service. SHOULD be equal to the actual `service.name` resource attribute
- // of the remote service if any.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'AuthTokenCache'
- PeerServiceKey = attribute.Key("peer.service")
-)
-
-// PeerService returns an attribute KeyValue conforming to the
-// "peer.service" semantic conventions. It represents the
-// [`service.name`](/docs/resource/README.md#service) of the remote service.
-// SHOULD be equal to the actual `service.name` resource attribute of the
-// remote service if any.
-func PeerService(val string) attribute.KeyValue {
- return PeerServiceKey.String(val)
-}
-
-// These attributes may be used for any operation with an authenticated and/or
-// authorized enduser.
-const (
- // EnduserIDKey is the attribute Key conforming to the "enduser.id"
- // semantic conventions. It represents the username or client_id extracted
- // from the access token or
- // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header
- // in the inbound request from outside the system.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'username'
- EnduserIDKey = attribute.Key("enduser.id")
-
- // EnduserRoleKey is the attribute Key conforming to the "enduser.role"
- // semantic conventions. It represents the actual/assumed role the client
- // is making the request under extracted from token or application security
- // context.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'admin'
- EnduserRoleKey = attribute.Key("enduser.role")
-
- // EnduserScopeKey is the attribute Key conforming to the "enduser.scope"
- // semantic conventions. It represents the scopes or granted authorities
- // the client currently possesses extracted from token or application
- // security context. The value would come from the scope associated with an
- // [OAuth 2.0 Access
- // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
- // value in a [SAML 2.0
- // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'read:message, write:files'
- EnduserScopeKey = attribute.Key("enduser.scope")
-)
-
-// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
-// semantic conventions. It represents the username or client_id extracted from
-// the access token or
-// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in
-// the inbound request from outside the system.
-func EnduserID(val string) attribute.KeyValue {
- return EnduserIDKey.String(val)
-}
-
-// EnduserRole returns an attribute KeyValue conforming to the
-// "enduser.role" semantic conventions. It represents the actual/assumed role
-// the client is making the request under extracted from token or application
-// security context.
-func EnduserRole(val string) attribute.KeyValue {
- return EnduserRoleKey.String(val)
-}
-
-// EnduserScope returns an attribute KeyValue conforming to the
-// "enduser.scope" semantic conventions. It represents the scopes or granted
-// authorities the client currently possesses extracted from token or
-// application security context. The value would come from the scope associated
-// with an [OAuth 2.0 Access
-// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
-// value in a [SAML 2.0
-// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
-func EnduserScope(val string) attribute.KeyValue {
- return EnduserScopeKey.String(val)
-}
-
-// These attributes allow to report this unit of code and therefore to provide
-// more context about the span.
-const (
- // CodeColumnKey is the attribute Key conforming to the "code.column"
- // semantic conventions. It represents the column number in `code.filepath`
- // best representing the operation. It SHOULD point within the code unit
- // named in `code.function`.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 16
- CodeColumnKey = attribute.Key("code.column")
-
- // CodeFilepathKey is the attribute Key conforming to the "code.filepath"
- // semantic conventions. It represents the source code file name that
- // identifies the code unit as uniquely as possible (preferably an absolute
- // file path).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/usr/local/MyApplication/content_root/app/index.php'
- CodeFilepathKey = attribute.Key("code.filepath")
-
- // CodeFunctionKey is the attribute Key conforming to the "code.function"
- // semantic conventions. It represents the method or function name, or
- // equivalent (usually rightmost part of the code unit's name).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'serveRequest'
- CodeFunctionKey = attribute.Key("code.function")
-
- // CodeLineNumberKey is the attribute Key conforming to the "code.lineno"
- // semantic conventions. It represents the line number in `code.filepath`
- // best representing the operation. It SHOULD point within the code unit
- // named in `code.function`.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 42
- CodeLineNumberKey = attribute.Key("code.lineno")
-
- // CodeNamespaceKey is the attribute Key conforming to the "code.namespace"
- // semantic conventions. It represents the "namespace" within which
- // `code.function` is defined. Usually the qualified class or module name,
- // such that `code.namespace` + some separator + `code.function` form a
- // unique identifier for the code unit.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'com.example.MyHTTPService'
- CodeNamespaceKey = attribute.Key("code.namespace")
-
- // CodeStacktraceKey is the attribute Key conforming to the
- // "code.stacktrace" semantic conventions. It represents a stacktrace as a
- // string in the natural representation for the language runtime. The
- // representation is to be determined and documented by each language SIG.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'at
- // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
- // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
- // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
- CodeStacktraceKey = attribute.Key("code.stacktrace")
-)
-
-// CodeColumn returns an attribute KeyValue conforming to the "code.column"
-// semantic conventions. It represents the column number in `code.filepath`
-// best representing the operation. It SHOULD point within the code unit named
-// in `code.function`.
-func CodeColumn(val int) attribute.KeyValue {
- return CodeColumnKey.Int(val)
-}
-
-// CodeFilepath returns an attribute KeyValue conforming to the
-// "code.filepath" semantic conventions. It represents the source code file
-// name that identifies the code unit as uniquely as possible (preferably an
-// absolute file path).
-func CodeFilepath(val string) attribute.KeyValue {
- return CodeFilepathKey.String(val)
-}
-
-// CodeFunction returns an attribute KeyValue conforming to the
-// "code.function" semantic conventions. It represents the method or function
-// name, or equivalent (usually rightmost part of the code unit's name).
-func CodeFunction(val string) attribute.KeyValue {
- return CodeFunctionKey.String(val)
-}
-
-// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno"
-// semantic conventions. It represents the line number in `code.filepath` best
-// representing the operation. It SHOULD point within the code unit named in
-// `code.function`.
-func CodeLineNumber(val int) attribute.KeyValue {
- return CodeLineNumberKey.Int(val)
-}
-
-// CodeNamespace returns an attribute KeyValue conforming to the
-// "code.namespace" semantic conventions. It represents the "namespace" within
-// which `code.function` is defined. Usually the qualified class or module
-// name, such that `code.namespace` + some separator + `code.function` form a
-// unique identifier for the code unit.
-func CodeNamespace(val string) attribute.KeyValue {
- return CodeNamespaceKey.String(val)
-}
-
-// CodeStacktrace returns an attribute KeyValue conforming to the
-// "code.stacktrace" semantic conventions. It represents a stacktrace as a
-// string in the natural representation for the language runtime. The
-// representation is to be determined and documented by each language SIG.
-func CodeStacktrace(val string) attribute.KeyValue {
- return CodeStacktraceKey.String(val)
-}
-
-// These attributes may be used for any operation to store information about a
-// thread that started a span.
-const (
- // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
- // conventions. It represents the current "managed" thread ID (as opposed
- // to OS thread ID).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 42
- ThreadIDKey = attribute.Key("thread.id")
-
- // ThreadNameKey is the attribute Key conforming to the "thread.name"
- // semantic conventions. It represents the current thread name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'main'
- ThreadNameKey = attribute.Key("thread.name")
-)
-
-// ThreadID returns an attribute KeyValue conforming to the "thread.id"
-// semantic conventions. It represents the current "managed" thread ID (as
-// opposed to OS thread ID).
-func ThreadID(val int) attribute.KeyValue {
- return ThreadIDKey.Int(val)
-}
-
-// ThreadName returns an attribute KeyValue conforming to the "thread.name"
-// semantic conventions. It represents the current thread name.
-func ThreadName(val string) attribute.KeyValue {
- return ThreadNameKey.String(val)
-}
-
-// Span attributes used by AWS Lambda (in addition to general `faas`
-// attributes).
-const (
- // AWSLambdaInvokedARNKey is the attribute Key conforming to the
- // "aws.lambda.invoked_arn" semantic conventions. It represents the full
- // invoked ARN as provided on the `Context` passed to the function
- // (`Lambda-Runtime-Invoked-Function-ARN` header on the
- // `/runtime/invocation/next` applicable).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
- // Note: This may be different from `cloud.resource_id` if an alias is
- // involved.
- AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
-)
-
-// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
-// "aws.lambda.invoked_arn" semantic conventions. It represents the full
-// invoked ARN as provided on the `Context` passed to the function
-// (`Lambda-Runtime-Invoked-Function-ARN` header on the
-// `/runtime/invocation/next` applicable).
-func AWSLambdaInvokedARN(val string) attribute.KeyValue {
- return AWSLambdaInvokedARNKey.String(val)
-}
-
-// Attributes for CloudEvents. CloudEvents is a specification on how to define
-// event data in a standard way. These attributes can be attached to spans when
-// performing operations with CloudEvents, regardless of the protocol being
-// used.
-const (
- // CloudeventsEventIDKey is the attribute Key conforming to the
- // "cloudevents.event_id" semantic conventions. It represents the
- // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
- // uniquely identifies the event.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: experimental
- // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
- CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
-
- // CloudeventsEventSourceKey is the attribute Key conforming to the
- // "cloudevents.event_source" semantic conventions. It represents the
- // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
- // identifies the context in which an event happened.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: experimental
- // Examples: 'https://github.com/cloudevents',
- // '/cloudevents/spec/pull/123', 'my-service'
- CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
-
- // CloudeventsEventSpecVersionKey is the attribute Key conforming to the
- // "cloudevents.event_spec_version" semantic conventions. It represents the
- // [version of the CloudEvents
- // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
- // which the event uses.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1.0'
- CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
-
- // CloudeventsEventSubjectKey is the attribute Key conforming to the
- // "cloudevents.event_subject" semantic conventions. It represents the
- // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
- // of the event in the context of the event producer (identified by
- // source).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'mynewfile.jpg'
- CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
-
- // CloudeventsEventTypeKey is the attribute Key conforming to the
- // "cloudevents.event_type" semantic conventions. It represents the
- // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
- // contains a value describing the type of event related to the originating
- // occurrence.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'com.github.pull_request.opened',
- // 'com.example.object.deleted.v2'
- CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
-)
-
-// CloudeventsEventID returns an attribute KeyValue conforming to the
-// "cloudevents.event_id" semantic conventions. It represents the
-// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
-// uniquely identifies the event.
-func CloudeventsEventID(val string) attribute.KeyValue {
- return CloudeventsEventIDKey.String(val)
-}
-
-// CloudeventsEventSource returns an attribute KeyValue conforming to the
-// "cloudevents.event_source" semantic conventions. It represents the
-// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
-// identifies the context in which an event happened.
-func CloudeventsEventSource(val string) attribute.KeyValue {
- return CloudeventsEventSourceKey.String(val)
-}
-
-// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to
-// the "cloudevents.event_spec_version" semantic conventions. It represents the
-// [version of the CloudEvents
-// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
-// which the event uses.
-func CloudeventsEventSpecVersion(val string) attribute.KeyValue {
- return CloudeventsEventSpecVersionKey.String(val)
-}
-
-// CloudeventsEventSubject returns an attribute KeyValue conforming to the
-// "cloudevents.event_subject" semantic conventions. It represents the
-// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
-// of the event in the context of the event producer (identified by source).
-func CloudeventsEventSubject(val string) attribute.KeyValue {
- return CloudeventsEventSubjectKey.String(val)
-}
-
-// CloudeventsEventType returns an attribute KeyValue conforming to the
-// "cloudevents.event_type" semantic conventions. It represents the
-// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
-// contains a value describing the type of event related to the originating
-// occurrence.
-func CloudeventsEventType(val string) attribute.KeyValue {
- return CloudeventsEventTypeKey.String(val)
-}
-
-// Semantic conventions for the OpenTracing Shim
-const (
- // OpentracingRefTypeKey is the attribute Key conforming to the
- // "opentracing.ref_type" semantic conventions. It represents the
- // parent-child Reference type
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: The causal relationship between a child Span and a parent Span.
- OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
-)
-
-var (
- // The parent Span depends on the child Span in some capacity
- OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
- // The parent Span doesn't depend in any way on the result of the child Span
- OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
-)
-
-// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's
-// concepts.
-const (
- // OTelStatusCodeKey is the attribute Key conforming to the
- // "otel.status_code" semantic conventions. It represents the name of the
- // code, either "OK" or "ERROR". MUST NOT be set if the status code is
- // UNSET.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- OTelStatusCodeKey = attribute.Key("otel.status_code")
-
- // OTelStatusDescriptionKey is the attribute Key conforming to the
- // "otel.status_description" semantic conventions. It represents the
- // description of the Status if it has a value, otherwise not set.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'resource not found'
- OTelStatusDescriptionKey = attribute.Key("otel.status_description")
-)
-
-var (
- // The operation has been validated by an Application developer or Operator to have completed successfully
- OTelStatusCodeOk = OTelStatusCodeKey.String("OK")
- // The operation contains an error
- OTelStatusCodeError = OTelStatusCodeKey.String("ERROR")
-)
-
-// OTelStatusDescription returns an attribute KeyValue conforming to the
-// "otel.status_description" semantic conventions. It represents the
-// description of the Status if it has a value, otherwise not set.
-func OTelStatusDescription(val string) attribute.KeyValue {
- return OTelStatusDescriptionKey.String(val)
-}
-
-// This semantic convention describes an instance of a function that runs
-// without provisioning or managing of servers (also known as serverless
-// functions or Function as a Service (FaaS)) with spans.
-const (
- // FaaSInvocationIDKey is the attribute Key conforming to the
- // "faas.invocation_id" semantic conventions. It represents the invocation
- // ID of the current function invocation.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
- FaaSInvocationIDKey = attribute.Key("faas.invocation_id")
-)
-
-// FaaSInvocationID returns an attribute KeyValue conforming to the
-// "faas.invocation_id" semantic conventions. It represents the invocation ID
-// of the current function invocation.
-func FaaSInvocationID(val string) attribute.KeyValue {
- return FaaSInvocationIDKey.String(val)
-}
-
-// Semantic Convention for FaaS triggered as a response to some data source
-// operation such as a database or filesystem read/write.
-const (
- // FaaSDocumentCollectionKey is the attribute Key conforming to the
- // "faas.document.collection" semantic conventions. It represents the name
- // of the source on which the triggering operation was performed. For
- // example, in Cloud Storage or S3 corresponds to the bucket name, and in
- // Cosmos DB to the database name.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: experimental
- // Examples: 'myBucketName', 'myDBName'
- FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
-
- // FaaSDocumentNameKey is the attribute Key conforming to the
- // "faas.document.name" semantic conventions. It represents the document
- // name/table subjected to the operation. For example, in Cloud Storage or
- // S3 is the name of the file, and in Cosmos DB the table name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myFile.txt', 'myTableName'
- FaaSDocumentNameKey = attribute.Key("faas.document.name")
-
- // FaaSDocumentOperationKey is the attribute Key conforming to the
- // "faas.document.operation" semantic conventions. It represents the
- // describes the type of the operation that was performed on the data.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: experimental
- FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
-
- // FaaSDocumentTimeKey is the attribute Key conforming to the
- // "faas.document.time" semantic conventions. It represents a string
- // containing the time when the data was accessed in the [ISO
- // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
- // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2020-01-23T13:47:06Z'
- FaaSDocumentTimeKey = attribute.Key("faas.document.time")
-)
-
-var (
- // When a new object is created
- FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
- // When an object is modified
- FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
- // When an object is deleted
- FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
-)
-
-// FaaSDocumentCollection returns an attribute KeyValue conforming to the
-// "faas.document.collection" semantic conventions. It represents the name of
-// the source on which the triggering operation was performed. For example, in
-// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
-// database name.
-func FaaSDocumentCollection(val string) attribute.KeyValue {
- return FaaSDocumentCollectionKey.String(val)
-}
-
-// FaaSDocumentName returns an attribute KeyValue conforming to the
-// "faas.document.name" semantic conventions. It represents the document
-// name/table subjected to the operation. For example, in Cloud Storage or S3
-// is the name of the file, and in Cosmos DB the table name.
-func FaaSDocumentName(val string) attribute.KeyValue {
- return FaaSDocumentNameKey.String(val)
-}
-
-// FaaSDocumentTime returns an attribute KeyValue conforming to the
-// "faas.document.time" semantic conventions. It represents a string containing
-// the time when the data was accessed in the [ISO
-// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
-// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
-func FaaSDocumentTime(val string) attribute.KeyValue {
- return FaaSDocumentTimeKey.String(val)
-}
-
-// Semantic Convention for FaaS scheduled to be executed regularly.
-const (
- // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
- // conventions. It represents a string containing the schedule period as
- // [Cron
- // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '0/5 * * * ? *'
- FaaSCronKey = attribute.Key("faas.cron")
-
- // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
- // conventions. It represents a string containing the function invocation
- // time in the [ISO
- // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
- // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2020-01-23T13:47:06Z'
- FaaSTimeKey = attribute.Key("faas.time")
-)
-
-// FaaSCron returns an attribute KeyValue conforming to the "faas.cron"
-// semantic conventions. It represents a string containing the schedule period
-// as [Cron
-// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
-func FaaSCron(val string) attribute.KeyValue {
- return FaaSCronKey.String(val)
-}
-
-// FaaSTime returns an attribute KeyValue conforming to the "faas.time"
-// semantic conventions. It represents a string containing the function
-// invocation time in the [ISO
-// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
-// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
-func FaaSTime(val string) attribute.KeyValue {
- return FaaSTimeKey.String(val)
-}
-
-// Contains additional attributes for incoming FaaS spans.
-const (
- // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
- // semantic conventions. It represents a boolean that is true if the
- // serverless function is executed for the first time (aka cold-start).
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- FaaSColdstartKey = attribute.Key("faas.coldstart")
-)
-
-// FaaSColdstart returns an attribute KeyValue conforming to the
-// "faas.coldstart" semantic conventions. It represents a boolean that is true
-// if the serverless function is executed for the first time (aka cold-start).
-func FaaSColdstart(val bool) attribute.KeyValue {
- return FaaSColdstartKey.Bool(val)
-}
-
-// The `aws` conventions apply to operations using the AWS SDK. They map
-// request or response parameters in AWS SDK API calls to attributes on a Span.
-// The conventions have been collected over time based on feedback from AWS
-// users of tracing and will continue to evolve as new interesting conventions
-// are found.
-// Some descriptions are also provided for populating general OpenTelemetry
-// semantic conventions based on these APIs.
-const (
- // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id"
- // semantic conventions. It represents the AWS request ID as returned in
- // the response headers `x-amz-request-id` or `x-amz-requestid`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ'
- AWSRequestIDKey = attribute.Key("aws.request_id")
-)
-
-// AWSRequestID returns an attribute KeyValue conforming to the
-// "aws.request_id" semantic conventions. It represents the AWS request ID as
-// returned in the response headers `x-amz-request-id` or `x-amz-requestid`.
-func AWSRequestID(val string) attribute.KeyValue {
- return AWSRequestIDKey.String(val)
-}
-
-// Attributes that exist for multiple DynamoDB request types.
-const (
- // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
- // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
- // value of the `AttributesToGet` request parameter.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'lives', 'id'
- AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
-
- // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
- // "aws.dynamodb.consistent_read" semantic conventions. It represents the
- // value of the `ConsistentRead` request parameter.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
-
- // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
- // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
- // JSON-serialized value of each item in the `ConsumedCapacity` response
- // field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": {
- // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" :
- // { "CapacityUnits": number, "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table":
- // { "CapacityUnits": number, "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number }, "TableName": "string",
- // "WriteCapacityUnits": number }'
- AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
-
- // AWSDynamoDBIndexNameKey is the attribute Key conforming to the
- // "aws.dynamodb.index_name" semantic conventions. It represents the value
- // of the `IndexName` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'name_to_group'
- AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
-
- // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to
- // the "aws.dynamodb.item_collection_metrics" semantic conventions. It
- // represents the JSON-serialized value of the `ItemCollectionMetrics`
- // response field.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B":
- // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": {
- // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ],
- // "NULL": boolean, "S": "string", "SS": [ "string" ] } },
- // "SizeEstimateRangeGB": [ number ] } ] }'
- AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
-
- // AWSDynamoDBLimitKey is the attribute Key conforming to the
- // "aws.dynamodb.limit" semantic conventions. It represents the value of
- // the `Limit` request parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 10
- AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
-
- // AWSDynamoDBProjectionKey is the attribute Key conforming to the
- // "aws.dynamodb.projection" semantic conventions. It represents the value
- // of the `ProjectionExpression` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Title', 'Title, Price, Color', 'Title, Description,
- // RelatedItems, ProductReviews'
- AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
-
- // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to
- // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
- // represents the value of the `ProvisionedThroughput.ReadCapacityUnits`
- // request parameter.
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1.0, 2.0
- AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
-
- // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming
- // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions.
- // It represents the value of the
- // `ProvisionedThroughput.WriteCapacityUnits` request parameter.
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1.0, 2.0
- AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
-
- // AWSDynamoDBSelectKey is the attribute Key conforming to the
- // "aws.dynamodb.select" semantic conventions. It represents the value of
- // the `Select` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'ALL_ATTRIBUTES', 'COUNT'
- AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
-
- // AWSDynamoDBTableNamesKey is the attribute Key conforming to the
- // "aws.dynamodb.table_names" semantic conventions. It represents the keys
- // in the `RequestItems` object field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Users', 'Cats'
- AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
-)
-
-// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to
-// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
-// value of the `AttributesToGet` request parameter.
-func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
- return AWSDynamoDBAttributesToGetKey.StringSlice(val)
-}
-
-// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
-// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
-// of the `ConsistentRead` request parameter.
-func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
- return AWSDynamoDBConsistentReadKey.Bool(val)
-}
-
-// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to
-// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
-// JSON-serialized value of each item in the `ConsumedCapacity` response field.
-func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
- return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
-}
-
-// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
-// "aws.dynamodb.index_name" semantic conventions. It represents the value of
-// the `IndexName` request parameter.
-func AWSDynamoDBIndexName(val string) attribute.KeyValue {
- return AWSDynamoDBIndexNameKey.String(val)
-}
-
-// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming
-// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It
-// represents the JSON-serialized value of the `ItemCollectionMetrics` response
-// field.
-func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
- return AWSDynamoDBItemCollectionMetricsKey.String(val)
-}
-
-// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
-// "aws.dynamodb.limit" semantic conventions. It represents the value of the
-// `Limit` request parameter.
-func AWSDynamoDBLimit(val int) attribute.KeyValue {
- return AWSDynamoDBLimitKey.Int(val)
-}
-
-// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
-// "aws.dynamodb.projection" semantic conventions. It represents the value of
-// the `ProjectionExpression` request parameter.
-func AWSDynamoDBProjection(val string) attribute.KeyValue {
- return AWSDynamoDBProjectionKey.String(val)
-}
-
-// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue
-// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic
-// conventions. It represents the value of the
-// `ProvisionedThroughput.ReadCapacityUnits` request parameter.
-func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
- return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
-}
-
-// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue
-// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic
-// conventions. It represents the value of the
-// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
-func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
- return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
-}
-
-// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
-// "aws.dynamodb.select" semantic conventions. It represents the value of the
-// `Select` request parameter.
-func AWSDynamoDBSelect(val string) attribute.KeyValue {
- return AWSDynamoDBSelectKey.String(val)
-}
-
-// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
-// "aws.dynamodb.table_names" semantic conventions. It represents the keys in
-// the `RequestItems` object field.
-func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
- return AWSDynamoDBTableNamesKey.StringSlice(val)
-}
-
-// DynamoDB.CreateTable
-const (
- // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to
- // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
- // represents the JSON-serialized value of each item of the
- // `GlobalSecondaryIndexes` request field
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName":
- // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
- // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
- // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }'
- AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
-
- // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to
- // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
- // represents the JSON-serialized value of each item of the
- // `LocalSecondaryIndexes` request field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "IndexARN": "string", "IndexName": "string",
- // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ {
- // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
- // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }'
- AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
-)
-
-// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue
-// conforming to the "aws.dynamodb.global_secondary_indexes" semantic
-// conventions. It represents the JSON-serialized value of each item of the
-// `GlobalSecondaryIndexes` request field
-func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
- return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
-}
-
-// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming
-// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
-// represents the JSON-serialized value of each item of the
-// `LocalSecondaryIndexes` request field.
-func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
- return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
-}
-
-// DynamoDB.ListTables
-const (
- // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
- // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents
- // the value of the `ExclusiveStartTableName` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Users', 'CatsTable'
- AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
-
- // AWSDynamoDBTableCountKey is the attribute Key conforming to the
- // "aws.dynamodb.table_count" semantic conventions. It represents the the
- // number of items in the `TableNames` response parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 20
- AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
-)
-
-// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming
-// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It
-// represents the value of the `ExclusiveStartTableName` request parameter.
-func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
- return AWSDynamoDBExclusiveStartTableKey.String(val)
-}
-
-// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.table_count" semantic conventions. It represents the the
-// number of items in the `TableNames` response parameter.
-func AWSDynamoDBTableCount(val int) attribute.KeyValue {
- return AWSDynamoDBTableCountKey.Int(val)
-}
-
-// DynamoDB.Query
-const (
- // AWSDynamoDBScanForwardKey is the attribute Key conforming to the
- // "aws.dynamodb.scan_forward" semantic conventions. It represents the
- // value of the `ScanIndexForward` request parameter.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
-)
-
-// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
-// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
-// the `ScanIndexForward` request parameter.
-func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
- return AWSDynamoDBScanForwardKey.Bool(val)
-}
-
-// DynamoDB.Scan
-const (
- // AWSDynamoDBCountKey is the attribute Key conforming to the
- // "aws.dynamodb.count" semantic conventions. It represents the value of
- // the `Count` response parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 10
- AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
-
- // AWSDynamoDBScannedCountKey is the attribute Key conforming to the
- // "aws.dynamodb.scanned_count" semantic conventions. It represents the
- // value of the `ScannedCount` response parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 50
- AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
-
- // AWSDynamoDBSegmentKey is the attribute Key conforming to the
- // "aws.dynamodb.segment" semantic conventions. It represents the value of
- // the `Segment` request parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 10
- AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
-
- // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
- // "aws.dynamodb.total_segments" semantic conventions. It represents the
- // value of the `TotalSegments` request parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 100
- AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
-)
-
-// AWSDynamoDBCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.count" semantic conventions. It represents the value of the
-// `Count` response parameter.
-func AWSDynamoDBCount(val int) attribute.KeyValue {
- return AWSDynamoDBCountKey.Int(val)
-}
-
-// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.scanned_count" semantic conventions. It represents the value
-// of the `ScannedCount` response parameter.
-func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
- return AWSDynamoDBScannedCountKey.Int(val)
-}
-
-// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
-// "aws.dynamodb.segment" semantic conventions. It represents the value of the
-// `Segment` request parameter.
-func AWSDynamoDBSegment(val int) attribute.KeyValue {
- return AWSDynamoDBSegmentKey.Int(val)
-}
-
-// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
-// "aws.dynamodb.total_segments" semantic conventions. It represents the value
-// of the `TotalSegments` request parameter.
-func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
- return AWSDynamoDBTotalSegmentsKey.Int(val)
-}
-
-// DynamoDB.UpdateTable
-const (
- // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to
- // the "aws.dynamodb.attribute_definitions" semantic conventions. It
- // represents the JSON-serialized value of each item in the
- // `AttributeDefinitions` request field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
- AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
-
- // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key
- // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
- // conventions. It represents the JSON-serialized value of each item in the
- // the `GlobalSecondaryIndexUpdates` request field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
- // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
- // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
- // "ProvisionedThroughput": { "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number } }'
- AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
-)
-
-// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming
-// to the "aws.dynamodb.attribute_definitions" semantic conventions. It
-// represents the JSON-serialized value of each item in the
-// `AttributeDefinitions` request field.
-func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
- return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
-}
-
-// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
-// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
-// conventions. It represents the JSON-serialized value of each item in the the
-// `GlobalSecondaryIndexUpdates` request field.
-func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
- return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
-}
-
-// Attributes that exist for S3 request types.
-const (
- // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket"
- // semantic conventions. It represents the S3 bucket name the request
- // refers to. Corresponds to the `--bucket` parameter of the [S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
- // operations.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'some-bucket-name'
- // Note: The `bucket` attribute is applicable to all S3 operations that
- // reference a bucket, i.e. that require the bucket name as a mandatory
- // parameter.
- // This applies to almost all S3 operations except `list-buckets`.
- AWSS3BucketKey = attribute.Key("aws.s3.bucket")
-
- // AWSS3CopySourceKey is the attribute Key conforming to the
- // "aws.s3.copy_source" semantic conventions. It represents the source
- // object (in the form `bucket`/`key`) for the copy operation.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'someFile.yml'
- // Note: The `copy_source` attribute applies to S3 copy operations and
- // corresponds to the `--copy-source` parameter
- // of the [copy-object operation within the S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html).
- // This applies in particular to the following operations:
- //
- // -
- // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
- // -
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source")
-
- // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete"
- // semantic conventions. It represents the delete request container that
- // specifies the objects to be deleted.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean'
- // Note: The `delete` attribute is only applicable to the
- // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
- // operation.
- // The `delete` attribute corresponds to the `--delete` parameter of the
- // [delete-objects operation within the S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html).
- AWSS3DeleteKey = attribute.Key("aws.s3.delete")
-
- // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic
- // conventions. It represents the S3 object key the request refers to.
- // Corresponds to the `--key` parameter of the [S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
- // operations.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'someFile.yml'
- // Note: The `key` attribute is applicable to all object-related S3
- // operations, i.e. that require the object key as a mandatory parameter.
- // This applies in particular to the following operations:
- //
- // -
- // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
- // -
- // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
- // -
- // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html)
- // -
- // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html)
- // -
- // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html)
- // -
- // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html)
- // -
- // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html)
- // -
- // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
- // -
- // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
- // -
- // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html)
- // -
- // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
- // -
- // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
- // -
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- AWSS3KeyKey = attribute.Key("aws.s3.key")
-
- // AWSS3PartNumberKey is the attribute Key conforming to the
- // "aws.s3.part_number" semantic conventions. It represents the part number
- // of the part being uploaded in a multipart-upload operation. This is a
- // positive integer between 1 and 10,000.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 3456
- // Note: The `part_number` attribute is only applicable to the
- // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
- // and
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- // operations.
- // The `part_number` attribute corresponds to the `--part-number` parameter
- // of the
- // [upload-part operation within the S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html).
- AWSS3PartNumberKey = attribute.Key("aws.s3.part_number")
-
- // AWSS3UploadIDKey is the attribute Key conforming to the
- // "aws.s3.upload_id" semantic conventions. It represents the upload ID
- // that identifies the multipart upload.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ'
- // Note: The `upload_id` attribute applies to S3 multipart-upload
- // operations and corresponds to the `--upload-id` parameter
- // of the [S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
- // multipart operations.
- // This applies in particular to the following operations:
- //
- // -
- // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
- // -
- // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
- // -
- // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
- // -
- // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
- // -
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id")
-)
-
-// AWSS3Bucket returns an attribute KeyValue conforming to the
-// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the
-// request refers to. Corresponds to the `--bucket` parameter of the [S3
-// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
-// operations.
-func AWSS3Bucket(val string) attribute.KeyValue {
- return AWSS3BucketKey.String(val)
-}
-
-// AWSS3CopySource returns an attribute KeyValue conforming to the
-// "aws.s3.copy_source" semantic conventions. It represents the source object
-// (in the form `bucket`/`key`) for the copy operation.
-func AWSS3CopySource(val string) attribute.KeyValue {
- return AWSS3CopySourceKey.String(val)
-}
-
-// AWSS3Delete returns an attribute KeyValue conforming to the
-// "aws.s3.delete" semantic conventions. It represents the delete request
-// container that specifies the objects to be deleted.
-func AWSS3Delete(val string) attribute.KeyValue {
- return AWSS3DeleteKey.String(val)
-}
-
-// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key"
-// semantic conventions. It represents the S3 object key the request refers to.
-// Corresponds to the `--key` parameter of the [S3
-// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
-// operations.
-func AWSS3Key(val string) attribute.KeyValue {
- return AWSS3KeyKey.String(val)
-}
-
-// AWSS3PartNumber returns an attribute KeyValue conforming to the
-// "aws.s3.part_number" semantic conventions. It represents the part number of
-// the part being uploaded in a multipart-upload operation. This is a positive
-// integer between 1 and 10,000.
-func AWSS3PartNumber(val int) attribute.KeyValue {
- return AWSS3PartNumberKey.Int(val)
-}
-
-// AWSS3UploadID returns an attribute KeyValue conforming to the
-// "aws.s3.upload_id" semantic conventions. It represents the upload ID that
-// identifies the multipart upload.
-func AWSS3UploadID(val string) attribute.KeyValue {
- return AWSS3UploadIDKey.String(val)
-}
-
-// Semantic conventions to apply when instrumenting the GraphQL implementation.
-// They map GraphQL operations to attributes on a Span.
-const (
- // GraphqlDocumentKey is the attribute Key conforming to the
- // "graphql.document" semantic conventions. It represents the GraphQL
- // document being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'query findBookByID { bookByID(id: ?) { name } }'
- // Note: The value may be sanitized to exclude sensitive information.
- GraphqlDocumentKey = attribute.Key("graphql.document")
-
- // GraphqlOperationNameKey is the attribute Key conforming to the
- // "graphql.operation.name" semantic conventions. It represents the name of
- // the operation being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'findBookByID'
- GraphqlOperationNameKey = attribute.Key("graphql.operation.name")
-
- // GraphqlOperationTypeKey is the attribute Key conforming to the
- // "graphql.operation.type" semantic conventions. It represents the type of
- // the operation being executed.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'query', 'mutation', 'subscription'
- GraphqlOperationTypeKey = attribute.Key("graphql.operation.type")
-)
-
-var (
- // GraphQL query
- GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query")
- // GraphQL mutation
- GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation")
- // GraphQL subscription
- GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription")
-)
-
-// GraphqlDocument returns an attribute KeyValue conforming to the
-// "graphql.document" semantic conventions. It represents the GraphQL document
-// being executed.
-func GraphqlDocument(val string) attribute.KeyValue {
- return GraphqlDocumentKey.String(val)
-}
-
-// GraphqlOperationName returns an attribute KeyValue conforming to the
-// "graphql.operation.name" semantic conventions. It represents the name of the
-// operation being executed.
-func GraphqlOperationName(val string) attribute.KeyValue {
- return GraphqlOperationNameKey.String(val)
-}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md
deleted file mode 100644
index 2de1fc3c6..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Semconv v1.26.0
-
-[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.26.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.26.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go
deleted file mode 100644
index d8dc822b2..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go
+++ /dev/null
@@ -1,8996 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// The Android platform on which the Android application is running.
-const (
- // AndroidOSAPILevelKey is the attribute Key conforming to the
- // "android.os.api_level" semantic conventions. It represents the uniquely
- // identifies the framework API revision offered by a version
- // (`os.version`) of the android operating system. More information can be
- // found
- // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '33', '32'
- AndroidOSAPILevelKey = attribute.Key("android.os.api_level")
-)
-
-// AndroidOSAPILevel returns an attribute KeyValue conforming to the
-// "android.os.api_level" semantic conventions. It represents the uniquely
-// identifies the framework API revision offered by a version (`os.version`) of
-// the android operating system. More information can be found
-// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels).
-func AndroidOSAPILevel(val string) attribute.KeyValue {
- return AndroidOSAPILevelKey.String(val)
-}
-
-// ASP.NET Core attributes
-const (
- // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the
- // "aspnetcore.rate_limiting.result" semantic conventions. It represents
- // the rate-limiting result, shows whether the lease was acquired or
- // contains a rejection reason
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'acquired', 'request_canceled'
- AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result")
-
- // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to
- // the "aspnetcore.diagnostics.handler.type" semantic conventions. It
- // represents the full type name of the
- // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler)
- // implementation that handled the exception.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (if and only if the exception
- // was handled by this handler.)
- // Stability: stable
- // Examples: 'Contoso.MyHandler'
- AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type")
-
- // AspnetcoreDiagnosticsExceptionResultKey is the attribute Key conforming
- // to the "aspnetcore.diagnostics.exception.result" semantic conventions.
- // It represents the aSP.NET Core exception middleware handling result
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'handled', 'unhandled'
- AspnetcoreDiagnosticsExceptionResultKey = attribute.Key("aspnetcore.diagnostics.exception.result")
-
- // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the
- // "aspnetcore.rate_limiting.policy" semantic conventions. It represents
- // the rate limiting policy name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'fixed', 'sliding', 'token'
- AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy")
-
- // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the
- // "aspnetcore.request.is_unhandled" semantic conventions. It represents
- // the flag indicating if request was handled by the application pipeline.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: True
- AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled")
-
- // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the
- // "aspnetcore.routing.is_fallback" semantic conventions. It represents a
- // value that indicates whether the matched route is a fallback route.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: True
- AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback")
-
- // AspnetcoreRoutingMatchStatusKey is the attribute Key conforming to the
- // "aspnetcore.routing.match_status" semantic conventions. It represents
- // the match result - success or failure
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'success', 'failure'
- AspnetcoreRoutingMatchStatusKey = attribute.Key("aspnetcore.routing.match_status")
-)
-
-var (
- // Lease was acquired
- AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired")
- // Lease request was rejected by the endpoint limiter
- AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter")
- // Lease request was rejected by the global limiter
- AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter")
- // Lease request was canceled
- AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled")
-)
-
-var (
- // Exception was handled by the exception handling middleware
- AspnetcoreDiagnosticsExceptionResultHandled = AspnetcoreDiagnosticsExceptionResultKey.String("handled")
- // Exception was not handled by the exception handling middleware
- AspnetcoreDiagnosticsExceptionResultUnhandled = AspnetcoreDiagnosticsExceptionResultKey.String("unhandled")
- // Exception handling was skipped because the response had started
- AspnetcoreDiagnosticsExceptionResultSkipped = AspnetcoreDiagnosticsExceptionResultKey.String("skipped")
- // Exception handling didn't run because the request was aborted
- AspnetcoreDiagnosticsExceptionResultAborted = AspnetcoreDiagnosticsExceptionResultKey.String("aborted")
-)
-
-var (
- // Match succeeded
- AspnetcoreRoutingMatchStatusSuccess = AspnetcoreRoutingMatchStatusKey.String("success")
- // Match failed
- AspnetcoreRoutingMatchStatusFailure = AspnetcoreRoutingMatchStatusKey.String("failure")
-)
-
-// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming
-// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It
-// represents the full type name of the
-// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler)
-// implementation that handled the exception.
-func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue {
- return AspnetcoreDiagnosticsHandlerTypeKey.String(val)
-}
-
-// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to
-// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents
-// the rate limiting policy name.
-func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue {
- return AspnetcoreRateLimitingPolicyKey.String(val)
-}
-
-// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to
-// the "aspnetcore.request.is_unhandled" semantic conventions. It represents
-// the flag indicating if request was handled by the application pipeline.
-func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue {
- return AspnetcoreRequestIsUnhandledKey.Bool(val)
-}
-
-// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to
-// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a
-// value that indicates whether the matched route is a fallback route.
-func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue {
- return AspnetcoreRoutingIsFallbackKey.Bool(val)
-}
-
-// Generic attributes for AWS services.
-const (
- // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id"
- // semantic conventions. It represents the AWS request ID as returned in
- // the response headers `x-amz-request-id` or `x-amz-requestid`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ'
- AWSRequestIDKey = attribute.Key("aws.request_id")
-)
-
-// AWSRequestID returns an attribute KeyValue conforming to the
-// "aws.request_id" semantic conventions. It represents the AWS request ID as
-// returned in the response headers `x-amz-request-id` or `x-amz-requestid`.
-func AWSRequestID(val string) attribute.KeyValue {
- return AWSRequestIDKey.String(val)
-}
-
-// Attributes for AWS DynamoDB.
-const (
- // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to
- // the "aws.dynamodb.attribute_definitions" semantic conventions. It
- // represents the JSON-serialized value of each item in the
- // `AttributeDefinitions` request field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
- AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
-
- // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
- // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
- // value of the `AttributesToGet` request parameter.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'lives', 'id'
- AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
-
- // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
- // "aws.dynamodb.consistent_read" semantic conventions. It represents the
- // value of the `ConsistentRead` request parameter.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
-
- // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
- // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
- // JSON-serialized value of each item in the `ConsumedCapacity` response
- // field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": {
- // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" :
- // { "CapacityUnits": number, "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table":
- // { "CapacityUnits": number, "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number }, "TableName": "string",
- // "WriteCapacityUnits": number }'
- AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
-
- // AWSDynamoDBCountKey is the attribute Key conforming to the
- // "aws.dynamodb.count" semantic conventions. It represents the value of
- // the `Count` response parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 10
- AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
-
- // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
- // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents
- // the value of the `ExclusiveStartTableName` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Users', 'CatsTable'
- AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
-
- // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key
- // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
- // conventions. It represents the JSON-serialized value of each item in the
- // `GlobalSecondaryIndexUpdates` request field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
- // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
- // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
- // "ProvisionedThroughput": { "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number } }'
- AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
-
- // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to
- // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
- // represents the JSON-serialized value of each item of the
- // `GlobalSecondaryIndexes` request field
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName":
- // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
- // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
- // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }'
- AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
-
- // AWSDynamoDBIndexNameKey is the attribute Key conforming to the
- // "aws.dynamodb.index_name" semantic conventions. It represents the value
- // of the `IndexName` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'name_to_group'
- AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
-
- // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to
- // the "aws.dynamodb.item_collection_metrics" semantic conventions. It
- // represents the JSON-serialized value of the `ItemCollectionMetrics`
- // response field.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B":
- // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": {
- // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ],
- // "NULL": boolean, "S": "string", "SS": [ "string" ] } },
- // "SizeEstimateRangeGB": [ number ] } ] }'
- AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
-
- // AWSDynamoDBLimitKey is the attribute Key conforming to the
- // "aws.dynamodb.limit" semantic conventions. It represents the value of
- // the `Limit` request parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 10
- AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
-
- // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to
- // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
- // represents the JSON-serialized value of each item of the
- // `LocalSecondaryIndexes` request field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "IndexARN": "string", "IndexName": "string",
- // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ {
- // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
- // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }'
- AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
-
- // AWSDynamoDBProjectionKey is the attribute Key conforming to the
- // "aws.dynamodb.projection" semantic conventions. It represents the value
- // of the `ProjectionExpression` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Title', 'Title, Price, Color', 'Title, Description,
- // RelatedItems, ProductReviews'
- AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
-
- // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to
- // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
- // represents the value of the `ProvisionedThroughput.ReadCapacityUnits`
- // request parameter.
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1.0, 2.0
- AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
-
- // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming
- // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions.
- // It represents the value of the
- // `ProvisionedThroughput.WriteCapacityUnits` request parameter.
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1.0, 2.0
- AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
-
- // AWSDynamoDBScanForwardKey is the attribute Key conforming to the
- // "aws.dynamodb.scan_forward" semantic conventions. It represents the
- // value of the `ScanIndexForward` request parameter.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
-
- // AWSDynamoDBScannedCountKey is the attribute Key conforming to the
- // "aws.dynamodb.scanned_count" semantic conventions. It represents the
- // value of the `ScannedCount` response parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 50
- AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
-
- // AWSDynamoDBSegmentKey is the attribute Key conforming to the
- // "aws.dynamodb.segment" semantic conventions. It represents the value of
- // the `Segment` request parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 10
- AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
-
- // AWSDynamoDBSelectKey is the attribute Key conforming to the
- // "aws.dynamodb.select" semantic conventions. It represents the value of
- // the `Select` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'ALL_ATTRIBUTES', 'COUNT'
- AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
-
- // AWSDynamoDBTableCountKey is the attribute Key conforming to the
- // "aws.dynamodb.table_count" semantic conventions. It represents the
- // number of items in the `TableNames` response parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 20
- AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
-
- // AWSDynamoDBTableNamesKey is the attribute Key conforming to the
- // "aws.dynamodb.table_names" semantic conventions. It represents the keys
- // in the `RequestItems` object field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Users', 'Cats'
- AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
-
- // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
- // "aws.dynamodb.total_segments" semantic conventions. It represents the
- // value of the `TotalSegments` request parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 100
- AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
-)
-
-// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming
-// to the "aws.dynamodb.attribute_definitions" semantic conventions. It
-// represents the JSON-serialized value of each item in the
-// `AttributeDefinitions` request field.
-func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
- return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
-}
-
-// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to
-// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
-// value of the `AttributesToGet` request parameter.
-func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
- return AWSDynamoDBAttributesToGetKey.StringSlice(val)
-}
-
-// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
-// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
-// of the `ConsistentRead` request parameter.
-func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
- return AWSDynamoDBConsistentReadKey.Bool(val)
-}
-
-// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to
-// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
-// JSON-serialized value of each item in the `ConsumedCapacity` response field.
-func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
- return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
-}
-
-// AWSDynamoDBCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.count" semantic conventions. It represents the value of the
-// `Count` response parameter.
-func AWSDynamoDBCount(val int) attribute.KeyValue {
- return AWSDynamoDBCountKey.Int(val)
-}
-
-// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming
-// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It
-// represents the value of the `ExclusiveStartTableName` request parameter.
-func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
- return AWSDynamoDBExclusiveStartTableKey.String(val)
-}
-
-// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
-// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
-// conventions. It represents the JSON-serialized value of each item in the
-// `GlobalSecondaryIndexUpdates` request field.
-func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
- return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
-}
-
-// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue
-// conforming to the "aws.dynamodb.global_secondary_indexes" semantic
-// conventions. It represents the JSON-serialized value of each item of the
-// `GlobalSecondaryIndexes` request field
-func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
- return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
-}
-
-// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
-// "aws.dynamodb.index_name" semantic conventions. It represents the value of
-// the `IndexName` request parameter.
-func AWSDynamoDBIndexName(val string) attribute.KeyValue {
- return AWSDynamoDBIndexNameKey.String(val)
-}
-
-// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming
-// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It
-// represents the JSON-serialized value of the `ItemCollectionMetrics` response
-// field.
-func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
- return AWSDynamoDBItemCollectionMetricsKey.String(val)
-}
-
-// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
-// "aws.dynamodb.limit" semantic conventions. It represents the value of the
-// `Limit` request parameter.
-func AWSDynamoDBLimit(val int) attribute.KeyValue {
- return AWSDynamoDBLimitKey.Int(val)
-}
-
-// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming
-// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
-// represents the JSON-serialized value of each item of the
-// `LocalSecondaryIndexes` request field.
-func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
- return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
-}
-
-// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
-// "aws.dynamodb.projection" semantic conventions. It represents the value of
-// the `ProjectionExpression` request parameter.
-func AWSDynamoDBProjection(val string) attribute.KeyValue {
- return AWSDynamoDBProjectionKey.String(val)
-}
-
-// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue
-// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic
-// conventions. It represents the value of the
-// `ProvisionedThroughput.ReadCapacityUnits` request parameter.
-func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
- return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
-}
-
-// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue
-// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic
-// conventions. It represents the value of the
-// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
-func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
- return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
-}
-
-// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
-// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
-// the `ScanIndexForward` request parameter.
-func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
- return AWSDynamoDBScanForwardKey.Bool(val)
-}
-
-// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.scanned_count" semantic conventions. It represents the value
-// of the `ScannedCount` response parameter.
-func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
- return AWSDynamoDBScannedCountKey.Int(val)
-}
-
-// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
-// "aws.dynamodb.segment" semantic conventions. It represents the value of the
-// `Segment` request parameter.
-func AWSDynamoDBSegment(val int) attribute.KeyValue {
- return AWSDynamoDBSegmentKey.Int(val)
-}
-
-// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
-// "aws.dynamodb.select" semantic conventions. It represents the value of the
-// `Select` request parameter.
-func AWSDynamoDBSelect(val string) attribute.KeyValue {
- return AWSDynamoDBSelectKey.String(val)
-}
-
-// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.table_count" semantic conventions. It represents the number of
-// items in the `TableNames` response parameter.
-func AWSDynamoDBTableCount(val int) attribute.KeyValue {
- return AWSDynamoDBTableCountKey.Int(val)
-}
-
-// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
-// "aws.dynamodb.table_names" semantic conventions. It represents the keys in
-// the `RequestItems` object field.
-func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
- return AWSDynamoDBTableNamesKey.StringSlice(val)
-}
-
-// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
-// "aws.dynamodb.total_segments" semantic conventions. It represents the value
-// of the `TotalSegments` request parameter.
-func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
- return AWSDynamoDBTotalSegmentsKey.Int(val)
-}
-
-// Attributes for AWS Elastic Container Service (ECS).
-const (
- // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id"
- // semantic conventions. It represents the ID of a running ECS task. The ID
- // MUST be extracted from `task.arn`.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (If and only if `task.arn` is
- // populated.)
- // Stability: experimental
- // Examples: '10838bed-421f-43ef-870a-f43feacbbb5b',
- // '23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd'
- AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id")
-
- // AWSECSClusterARNKey is the attribute Key conforming to the
- // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
- // [ECS
- // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
- AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
-
- // AWSECSContainerARNKey is the attribute Key conforming to the
- // "aws.ecs.container.arn" semantic conventions. It represents the Amazon
- // Resource Name (ARN) of an [ECS container
- // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
- AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
-
- // AWSECSLaunchtypeKey is the attribute Key conforming to the
- // "aws.ecs.launchtype" semantic conventions. It represents the [launch
- // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html)
- // for an ECS task.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
-
- // AWSECSTaskARNKey is the attribute Key conforming to the
- // "aws.ecs.task.arn" semantic conventions. It represents the ARN of a
- // running [ECS
- // task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b',
- // 'arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd'
- AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
-
- // AWSECSTaskFamilyKey is the attribute Key conforming to the
- // "aws.ecs.task.family" semantic conventions. It represents the family
- // name of the [ECS task
- // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html)
- // used to create the ECS task.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry-family'
- AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
-
- // AWSECSTaskRevisionKey is the attribute Key conforming to the
- // "aws.ecs.task.revision" semantic conventions. It represents the revision
- // for the task definition used to create the ECS task.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '8', '26'
- AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
-)
-
-var (
- // ec2
- AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
- // fargate
- AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
-)
-
-// AWSECSTaskID returns an attribute KeyValue conforming to the
-// "aws.ecs.task.id" semantic conventions. It represents the ID of a running
-// ECS task. The ID MUST be extracted from `task.arn`.
-func AWSECSTaskID(val string) attribute.KeyValue {
- return AWSECSTaskIDKey.String(val)
-}
-
-// AWSECSClusterARN returns an attribute KeyValue conforming to the
-// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS
-// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
-func AWSECSClusterARN(val string) attribute.KeyValue {
- return AWSECSClusterARNKey.String(val)
-}
-
-// AWSECSContainerARN returns an attribute KeyValue conforming to the
-// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
-// Resource Name (ARN) of an [ECS container
-// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
-func AWSECSContainerARN(val string) attribute.KeyValue {
- return AWSECSContainerARNKey.String(val)
-}
-
-// AWSECSTaskARN returns an attribute KeyValue conforming to the
-// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running
-// [ECS
-// task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids).
-func AWSECSTaskARN(val string) attribute.KeyValue {
- return AWSECSTaskARNKey.String(val)
-}
-
-// AWSECSTaskFamily returns an attribute KeyValue conforming to the
-// "aws.ecs.task.family" semantic conventions. It represents the family name of
-// the [ECS task
-// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html)
-// used to create the ECS task.
-func AWSECSTaskFamily(val string) attribute.KeyValue {
- return AWSECSTaskFamilyKey.String(val)
-}
-
-// AWSECSTaskRevision returns an attribute KeyValue conforming to the
-// "aws.ecs.task.revision" semantic conventions. It represents the revision for
-// the task definition used to create the ECS task.
-func AWSECSTaskRevision(val string) attribute.KeyValue {
- return AWSECSTaskRevisionKey.String(val)
-}
-
-// Attributes for AWS Elastic Kubernetes Service (EKS).
-const (
- // AWSEKSClusterARNKey is the attribute Key conforming to the
- // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an
- // EKS cluster.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
- AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
-)
-
-// AWSEKSClusterARN returns an attribute KeyValue conforming to the
-// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
-// cluster.
-func AWSEKSClusterARN(val string) attribute.KeyValue {
- return AWSEKSClusterARNKey.String(val)
-}
-
-// Attributes for AWS Logs.
-const (
- // AWSLogGroupARNsKey is the attribute Key conforming to the
- // "aws.log.group.arns" semantic conventions. It represents the Amazon
- // Resource Name(s) (ARN) of the AWS log group(s).
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
- // Note: See the [log group ARN format
- // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
- AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
-
- // AWSLogGroupNamesKey is the attribute Key conforming to the
- // "aws.log.group.names" semantic conventions. It represents the name(s) of
- // the AWS log group(s) an application is writing to.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/aws/lambda/my-function', 'opentelemetry-service'
- // Note: Multiple log groups must be supported for cases like
- // multi-container applications, where a single application has sidecar
- // containers, and each write to their own log group.
- AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
-
- // AWSLogStreamARNsKey is the attribute Key conforming to the
- // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of
- // the AWS log stream(s).
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
- // Note: See the [log stream ARN format
- // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
- // One log group can contain several log streams, so these ARNs necessarily
- // identify both a log group and a log stream.
- AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
-
- // AWSLogStreamNamesKey is the attribute Key conforming to the
- // "aws.log.stream.names" semantic conventions. It represents the name(s)
- // of the AWS log stream(s) an application is writing to.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
- AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
-)
-
-// AWSLogGroupARNs returns an attribute KeyValue conforming to the
-// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
-// Name(s) (ARN) of the AWS log group(s).
-func AWSLogGroupARNs(val ...string) attribute.KeyValue {
- return AWSLogGroupARNsKey.StringSlice(val)
-}
-
-// AWSLogGroupNames returns an attribute KeyValue conforming to the
-// "aws.log.group.names" semantic conventions. It represents the name(s) of the
-// AWS log group(s) an application is writing to.
-func AWSLogGroupNames(val ...string) attribute.KeyValue {
- return AWSLogGroupNamesKey.StringSlice(val)
-}
-
-// AWSLogStreamARNs returns an attribute KeyValue conforming to the
-// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
-// AWS log stream(s).
-func AWSLogStreamARNs(val ...string) attribute.KeyValue {
- return AWSLogStreamARNsKey.StringSlice(val)
-}
-
-// AWSLogStreamNames returns an attribute KeyValue conforming to the
-// "aws.log.stream.names" semantic conventions. It represents the name(s) of
-// the AWS log stream(s) an application is writing to.
-func AWSLogStreamNames(val ...string) attribute.KeyValue {
- return AWSLogStreamNamesKey.StringSlice(val)
-}
-
-// Attributes for AWS Lambda.
-const (
- // AWSLambdaInvokedARNKey is the attribute Key conforming to the
- // "aws.lambda.invoked_arn" semantic conventions. It represents the full
- // invoked ARN as provided on the `Context` passed to the function
- // (`Lambda-Runtime-Invoked-Function-ARN` header on the
- // `/runtime/invocation/next` applicable).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
- // Note: This may be different from `cloud.resource_id` if an alias is
- // involved.
- AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
-)
-
-// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
-// "aws.lambda.invoked_arn" semantic conventions. It represents the full
-// invoked ARN as provided on the `Context` passed to the function
-// (`Lambda-Runtime-Invoked-Function-ARN` header on the
-// `/runtime/invocation/next` applicable).
-func AWSLambdaInvokedARN(val string) attribute.KeyValue {
- return AWSLambdaInvokedARNKey.String(val)
-}
-
-// Attributes for AWS S3.
-const (
- // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket"
- // semantic conventions. It represents the S3 bucket name the request
- // refers to. Corresponds to the `--bucket` parameter of the [S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
- // operations.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'some-bucket-name'
- // Note: The `bucket` attribute is applicable to all S3 operations that
- // reference a bucket, i.e. that require the bucket name as a mandatory
- // parameter.
- // This applies to almost all S3 operations except `list-buckets`.
- AWSS3BucketKey = attribute.Key("aws.s3.bucket")
-
- // AWSS3CopySourceKey is the attribute Key conforming to the
- // "aws.s3.copy_source" semantic conventions. It represents the source
- // object (in the form `bucket`/`key`) for the copy operation.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'someFile.yml'
- // Note: The `copy_source` attribute applies to S3 copy operations and
- // corresponds to the `--copy-source` parameter
- // of the [copy-object operation within the S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html).
- // This applies in particular to the following operations:
- //
- // -
- // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
- // -
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source")
-
- // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete"
- // semantic conventions. It represents the delete request container that
- // specifies the objects to be deleted.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean'
- // Note: The `delete` attribute is only applicable to the
- // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
- // operation.
- // The `delete` attribute corresponds to the `--delete` parameter of the
- // [delete-objects operation within the S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html).
- AWSS3DeleteKey = attribute.Key("aws.s3.delete")
-
- // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic
- // conventions. It represents the S3 object key the request refers to.
- // Corresponds to the `--key` parameter of the [S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
- // operations.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'someFile.yml'
- // Note: The `key` attribute is applicable to all object-related S3
- // operations, i.e. that require the object key as a mandatory parameter.
- // This applies in particular to the following operations:
- //
- // -
- // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
- // -
- // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
- // -
- // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html)
- // -
- // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html)
- // -
- // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html)
- // -
- // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html)
- // -
- // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html)
- // -
- // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
- // -
- // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
- // -
- // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html)
- // -
- // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
- // -
- // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
- // -
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- AWSS3KeyKey = attribute.Key("aws.s3.key")
-
- // AWSS3PartNumberKey is the attribute Key conforming to the
- // "aws.s3.part_number" semantic conventions. It represents the part number
- // of the part being uploaded in a multipart-upload operation. This is a
- // positive integer between 1 and 10,000.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 3456
- // Note: The `part_number` attribute is only applicable to the
- // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
- // and
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- // operations.
- // The `part_number` attribute corresponds to the `--part-number` parameter
- // of the
- // [upload-part operation within the S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html).
- AWSS3PartNumberKey = attribute.Key("aws.s3.part_number")
-
- // AWSS3UploadIDKey is the attribute Key conforming to the
- // "aws.s3.upload_id" semantic conventions. It represents the upload ID
- // that identifies the multipart upload.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ'
- // Note: The `upload_id` attribute applies to S3 multipart-upload
- // operations and corresponds to the `--upload-id` parameter
- // of the [S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
- // multipart operations.
- // This applies in particular to the following operations:
- //
- // -
- // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
- // -
- // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
- // -
- // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
- // -
- // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
- // -
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id")
-)
-
-// AWSS3Bucket returns an attribute KeyValue conforming to the
-// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the
-// request refers to. Corresponds to the `--bucket` parameter of the [S3
-// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
-// operations.
-func AWSS3Bucket(val string) attribute.KeyValue {
- return AWSS3BucketKey.String(val)
-}
-
-// AWSS3CopySource returns an attribute KeyValue conforming to the
-// "aws.s3.copy_source" semantic conventions. It represents the source object
-// (in the form `bucket`/`key`) for the copy operation.
-func AWSS3CopySource(val string) attribute.KeyValue {
- return AWSS3CopySourceKey.String(val)
-}
-
-// AWSS3Delete returns an attribute KeyValue conforming to the
-// "aws.s3.delete" semantic conventions. It represents the delete request
-// container that specifies the objects to be deleted.
-func AWSS3Delete(val string) attribute.KeyValue {
- return AWSS3DeleteKey.String(val)
-}
-
-// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key"
-// semantic conventions. It represents the S3 object key the request refers to.
-// Corresponds to the `--key` parameter of the [S3
-// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
-// operations.
-func AWSS3Key(val string) attribute.KeyValue {
- return AWSS3KeyKey.String(val)
-}
-
-// AWSS3PartNumber returns an attribute KeyValue conforming to the
-// "aws.s3.part_number" semantic conventions. It represents the part number of
-// the part being uploaded in a multipart-upload operation. This is a positive
-// integer between 1 and 10,000.
-func AWSS3PartNumber(val int) attribute.KeyValue {
- return AWSS3PartNumberKey.Int(val)
-}
-
-// AWSS3UploadID returns an attribute KeyValue conforming to the
-// "aws.s3.upload_id" semantic conventions. It represents the upload ID that
-// identifies the multipart upload.
-func AWSS3UploadID(val string) attribute.KeyValue {
- return AWSS3UploadIDKey.String(val)
-}
-
-// The web browser attributes
-const (
- // BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
- // semantic conventions. It represents the array of brand name and version
- // separated by a space
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
- // Note: This value is intended to be taken from the [UA client hints
- // API](https://wicg.github.io/ua-client-hints/#interface)
- // (`navigator.userAgentData.brands`).
- BrowserBrandsKey = attribute.Key("browser.brands")
-
- // BrowserLanguageKey is the attribute Key conforming to the
- // "browser.language" semantic conventions. It represents the preferred
- // language of the user using the browser
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'en', 'en-US', 'fr', 'fr-FR'
- // Note: This value is intended to be taken from the Navigator API
- // `navigator.language`.
- BrowserLanguageKey = attribute.Key("browser.language")
-
- // BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
- // semantic conventions. It represents a boolean that is true if the
- // browser is running on a mobile device
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: This value is intended to be taken from the [UA client hints
- // API](https://wicg.github.io/ua-client-hints/#interface)
- // (`navigator.userAgentData.mobile`). If unavailable, this attribute
- // SHOULD be left unset.
- BrowserMobileKey = attribute.Key("browser.mobile")
-
- // BrowserPlatformKey is the attribute Key conforming to the
- // "browser.platform" semantic conventions. It represents the platform on
- // which the browser is running
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Windows', 'macOS', 'Android'
- // Note: This value is intended to be taken from the [UA client hints
- // API](https://wicg.github.io/ua-client-hints/#interface)
- // (`navigator.userAgentData.platform`). If unavailable, the legacy
- // `navigator.platform` API SHOULD NOT be used instead and this attribute
- // SHOULD be left unset in order for the values to be consistent.
- // The list of possible values is defined in the [W3C User-Agent Client
- // Hints
- // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
- // Note that some (but not all) of these values can overlap with values in
- // the [`os.type` and `os.name` attributes](./os.md). However, for
- // consistency, the values in the `browser.platform` attribute should
- // capture the exact value that the user agent provides.
- BrowserPlatformKey = attribute.Key("browser.platform")
-)
-
-// BrowserBrands returns an attribute KeyValue conforming to the
-// "browser.brands" semantic conventions. It represents the array of brand name
-// and version separated by a space
-func BrowserBrands(val ...string) attribute.KeyValue {
- return BrowserBrandsKey.StringSlice(val)
-}
-
-// BrowserLanguage returns an attribute KeyValue conforming to the
-// "browser.language" semantic conventions. It represents the preferred
-// language of the user using the browser
-func BrowserLanguage(val string) attribute.KeyValue {
- return BrowserLanguageKey.String(val)
-}
-
-// BrowserMobile returns an attribute KeyValue conforming to the
-// "browser.mobile" semantic conventions. It represents a boolean that is true
-// if the browser is running on a mobile device
-func BrowserMobile(val bool) attribute.KeyValue {
- return BrowserMobileKey.Bool(val)
-}
-
-// BrowserPlatform returns an attribute KeyValue conforming to the
-// "browser.platform" semantic conventions. It represents the platform on which
-// the browser is running
-func BrowserPlatform(val string) attribute.KeyValue {
- return BrowserPlatformKey.String(val)
-}
-
-// These attributes may be used to describe the client in a connection-based
-// network interaction where there is one side that initiates the connection
-// (the client is the side that initiates the connection). This covers all TCP
-// network interactions since TCP is connection-based and one side initiates
-// the connection (an exception is made for peer-to-peer communication over TCP
-// where the "user-facing" surface of the protocol / API doesn't expose a clear
-// notion of client and server). This also covers UDP network interactions
-// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS.
-const (
- // ClientAddressKey is the attribute Key conforming to the "client.address"
- // semantic conventions. It represents the client address - domain name if
- // available without reverse DNS lookup; otherwise, IP address or Unix
- // domain socket name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock'
- // Note: When observed from the server side, and when communicating through
- // an intermediary, `client.address` SHOULD represent the client address
- // behind any intermediaries, for example proxies, if it's available.
- ClientAddressKey = attribute.Key("client.address")
-
- // ClientPortKey is the attribute Key conforming to the "client.port"
- // semantic conventions. It represents the client port number.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 65123
- // Note: When observed from the server side, and when communicating through
- // an intermediary, `client.port` SHOULD represent the client port behind
- // any intermediaries, for example proxies, if it's available.
- ClientPortKey = attribute.Key("client.port")
-)
-
-// ClientAddress returns an attribute KeyValue conforming to the
-// "client.address" semantic conventions. It represents the client address -
-// domain name if available without reverse DNS lookup; otherwise, IP address
-// or Unix domain socket name.
-func ClientAddress(val string) attribute.KeyValue {
- return ClientAddressKey.String(val)
-}
-
-// ClientPort returns an attribute KeyValue conforming to the "client.port"
-// semantic conventions. It represents the client port number.
-func ClientPort(val int) attribute.KeyValue {
- return ClientPortKey.Int(val)
-}
-
-// A cloud environment (e.g. GCP, Azure, AWS).
-const (
- // CloudAccountIDKey is the attribute Key conforming to the
- // "cloud.account.id" semantic conventions. It represents the cloud account
- // ID the resource is assigned to.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '111111111111', 'opentelemetry'
- CloudAccountIDKey = attribute.Key("cloud.account.id")
-
- // CloudAvailabilityZoneKey is the attribute Key conforming to the
- // "cloud.availability_zone" semantic conventions. It represents the cloud
- // regions often have multiple, isolated locations known as zones to
- // increase availability. Availability zone represents the zone where the
- // resource is running.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'us-east-1c'
- // Note: Availability zones are called "zones" on Alibaba Cloud and Google
- // Cloud.
- CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
-
- // CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
- // semantic conventions. It represents the cloud platform in use.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: The prefix of the service SHOULD match the one specified in
- // `cloud.provider`.
- CloudPlatformKey = attribute.Key("cloud.platform")
-
- // CloudProviderKey is the attribute Key conforming to the "cloud.provider"
- // semantic conventions. It represents the name of the cloud provider.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- CloudProviderKey = attribute.Key("cloud.provider")
-
- // CloudRegionKey is the attribute Key conforming to the "cloud.region"
- // semantic conventions. It represents the geographical region the resource
- // is running.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'us-central1', 'us-east-1'
- // Note: Refer to your provider's docs to see the available regions, for
- // example [Alibaba Cloud
- // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS
- // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/),
- // [Azure
- // regions](https://azure.microsoft.com/global-infrastructure/geographies/),
- // [Google Cloud regions](https://cloud.google.com/about/locations), or
- // [Tencent Cloud
- // regions](https://www.tencentcloud.com/document/product/213/6091).
- CloudRegionKey = attribute.Key("cloud.region")
-
- // CloudResourceIDKey is the attribute Key conforming to the
- // "cloud.resource_id" semantic conventions. It represents the cloud
- // provider-specific native identifier of the monitored cloud resource
- // (e.g. an
- // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
- // on AWS, a [fully qualified resource
- // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id)
- // on Azure, a [full resource
- // name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
- // on GCP)
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function',
- // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID',
- // '/subscriptions/<SUBSCIPTION_GUID>/resourceGroups/<RG>/providers/Microsoft.Web/sites/<FUNCAPP>/functions/<FUNC>'
- // Note: On some cloud providers, it may not be possible to determine the
- // full ID at startup,
- // so it may be necessary to set `cloud.resource_id` as a span attribute
- // instead.
- //
- // The exact value to use for `cloud.resource_id` depends on the cloud
- // provider.
- // The following well-known definitions MUST be used if you set this
- // attribute and they apply:
- //
- // * **AWS Lambda:** The function
- // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
- // Take care not to use the "invoked ARN" directly but replace any
- // [alias
- // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html)
- // with the resolved function version, as the same runtime instance may
- // be invokable with
- // multiple different aliases.
- // * **GCP:** The [URI of the
- // resource](https://cloud.google.com/iam/docs/full-resource-names)
- // * **Azure:** The [Fully Qualified Resource
- // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id)
- // of the invoked function,
- // *not* the function app, having the form
- // `/subscriptions/<SUBSCIPTION_GUID>/resourceGroups/<RG>/providers/Microsoft.Web/sites/<FUNCAPP>/functions/<FUNC>`.
- // This means that a span attribute MUST be used, as an Azure function
- // app can host multiple functions that would usually share
- // a TracerProvider.
- CloudResourceIDKey = attribute.Key("cloud.resource_id")
-)
-
-var (
- // Alibaba Cloud Elastic Compute Service
- CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
- // Alibaba Cloud Function Compute
- CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
- // Red Hat OpenShift on Alibaba Cloud
- CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift")
- // AWS Elastic Compute Cloud
- CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
- // AWS Elastic Container Service
- CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
- // AWS Elastic Kubernetes Service
- CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
- // AWS Lambda
- CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
- // AWS Elastic Beanstalk
- CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
- // AWS App Runner
- CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
- // Red Hat OpenShift on AWS (ROSA)
- CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift")
- // Azure Virtual Machines
- CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
- // Azure Container Apps
- CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps")
- // Azure Container Instances
- CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
- // Azure Kubernetes Service
- CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
- // Azure Functions
- CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
- // Azure App Service
- CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
- // Azure Red Hat OpenShift
- CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift")
- // Google Bare Metal Solution (BMS)
- CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution")
- // Google Cloud Compute Engine (GCE)
- CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
- // Google Cloud Run
- CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
- // Google Cloud Kubernetes Engine (GKE)
- CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
- // Google Cloud Functions (GCF)
- CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
- // Google Cloud App Engine (GAE)
- CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
- // Red Hat OpenShift on Google Cloud
- CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift")
- // Red Hat OpenShift on IBM Cloud
- CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift")
- // Tencent Cloud Cloud Virtual Machine (CVM)
- CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
- // Tencent Cloud Elastic Kubernetes Service (EKS)
- CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
- // Tencent Cloud Serverless Cloud Function (SCF)
- CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
-)
-
-var (
- // Alibaba Cloud
- CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
- // Amazon Web Services
- CloudProviderAWS = CloudProviderKey.String("aws")
- // Microsoft Azure
- CloudProviderAzure = CloudProviderKey.String("azure")
- // Google Cloud Platform
- CloudProviderGCP = CloudProviderKey.String("gcp")
- // Heroku Platform as a Service
- CloudProviderHeroku = CloudProviderKey.String("heroku")
- // IBM Cloud
- CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud")
- // Tencent Cloud
- CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
-)
-
-// CloudAccountID returns an attribute KeyValue conforming to the
-// "cloud.account.id" semantic conventions. It represents the cloud account ID
-// the resource is assigned to.
-func CloudAccountID(val string) attribute.KeyValue {
- return CloudAccountIDKey.String(val)
-}
-
-// CloudAvailabilityZone returns an attribute KeyValue conforming to the
-// "cloud.availability_zone" semantic conventions. It represents the cloud
-// regions often have multiple, isolated locations known as zones to increase
-// availability. Availability zone represents the zone where the resource is
-// running.
-func CloudAvailabilityZone(val string) attribute.KeyValue {
- return CloudAvailabilityZoneKey.String(val)
-}
-
-// CloudRegion returns an attribute KeyValue conforming to the
-// "cloud.region" semantic conventions. It represents the geographical region
-// the resource is running.
-func CloudRegion(val string) attribute.KeyValue {
- return CloudRegionKey.String(val)
-}
-
-// CloudResourceID returns an attribute KeyValue conforming to the
-// "cloud.resource_id" semantic conventions. It represents the cloud
-// provider-specific native identifier of the monitored cloud resource (e.g. an
-// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
-// on AWS, a [fully qualified resource
-// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on
-// Azure, a [full resource
-// name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
-// on GCP)
-func CloudResourceID(val string) attribute.KeyValue {
- return CloudResourceIDKey.String(val)
-}
-
-// Attributes for CloudEvents.
-const (
- // CloudeventsEventIDKey is the attribute Key conforming to the
- // "cloudevents.event_id" semantic conventions. It represents the
- // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
- // uniquely identifies the event.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
- CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
-
- // CloudeventsEventSourceKey is the attribute Key conforming to the
- // "cloudevents.event_source" semantic conventions. It represents the
- // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
- // identifies the context in which an event happened.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'https://github.com/cloudevents',
- // '/cloudevents/spec/pull/123', 'my-service'
- CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
-
- // CloudeventsEventSpecVersionKey is the attribute Key conforming to the
- // "cloudevents.event_spec_version" semantic conventions. It represents the
- // [version of the CloudEvents
- // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
- // which the event uses.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1.0'
- CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
-
- // CloudeventsEventSubjectKey is the attribute Key conforming to the
- // "cloudevents.event_subject" semantic conventions. It represents the
- // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
- // of the event in the context of the event producer (identified by
- // source).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'mynewfile.jpg'
- CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
-
- // CloudeventsEventTypeKey is the attribute Key conforming to the
- // "cloudevents.event_type" semantic conventions. It represents the
- // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
- // contains a value describing the type of event related to the originating
- // occurrence.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'com.github.pull_request.opened',
- // 'com.example.object.deleted.v2'
- CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
-)
-
-// CloudeventsEventID returns an attribute KeyValue conforming to the
-// "cloudevents.event_id" semantic conventions. It represents the
-// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
-// uniquely identifies the event.
-func CloudeventsEventID(val string) attribute.KeyValue {
- return CloudeventsEventIDKey.String(val)
-}
-
-// CloudeventsEventSource returns an attribute KeyValue conforming to the
-// "cloudevents.event_source" semantic conventions. It represents the
-// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
-// identifies the context in which an event happened.
-func CloudeventsEventSource(val string) attribute.KeyValue {
- return CloudeventsEventSourceKey.String(val)
-}
-
-// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to
-// the "cloudevents.event_spec_version" semantic conventions. It represents the
-// [version of the CloudEvents
-// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
-// which the event uses.
-func CloudeventsEventSpecVersion(val string) attribute.KeyValue {
- return CloudeventsEventSpecVersionKey.String(val)
-}
-
-// CloudeventsEventSubject returns an attribute KeyValue conforming to the
-// "cloudevents.event_subject" semantic conventions. It represents the
-// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
-// of the event in the context of the event producer (identified by source).
-func CloudeventsEventSubject(val string) attribute.KeyValue {
- return CloudeventsEventSubjectKey.String(val)
-}
-
-// CloudeventsEventType returns an attribute KeyValue conforming to the
-// "cloudevents.event_type" semantic conventions. It represents the
-// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
-// contains a value describing the type of event related to the originating
-// occurrence.
-func CloudeventsEventType(val string) attribute.KeyValue {
- return CloudeventsEventTypeKey.String(val)
-}
-
-// These attributes allow to report this unit of code and therefore to provide
-// more context about the span.
-const (
- // CodeColumnKey is the attribute Key conforming to the "code.column"
- // semantic conventions. It represents the column number in `code.filepath`
- // best representing the operation. It SHOULD point within the code unit
- // named in `code.function`.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 16
- CodeColumnKey = attribute.Key("code.column")
-
- // CodeFilepathKey is the attribute Key conforming to the "code.filepath"
- // semantic conventions. It represents the source code file name that
- // identifies the code unit as uniquely as possible (preferably an absolute
- // file path).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/usr/local/MyApplication/content_root/app/index.php'
- CodeFilepathKey = attribute.Key("code.filepath")
-
- // CodeFunctionKey is the attribute Key conforming to the "code.function"
- // semantic conventions. It represents the method or function name, or
- // equivalent (usually rightmost part of the code unit's name).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'serveRequest'
- CodeFunctionKey = attribute.Key("code.function")
-
- // CodeLineNumberKey is the attribute Key conforming to the "code.lineno"
- // semantic conventions. It represents the line number in `code.filepath`
- // best representing the operation. It SHOULD point within the code unit
- // named in `code.function`.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 42
- CodeLineNumberKey = attribute.Key("code.lineno")
-
- // CodeNamespaceKey is the attribute Key conforming to the "code.namespace"
- // semantic conventions. It represents the "namespace" within which
- // `code.function` is defined. Usually the qualified class or module name,
- // such that `code.namespace` + some separator + `code.function` form a
- // unique identifier for the code unit.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'com.example.MyHTTPService'
- CodeNamespaceKey = attribute.Key("code.namespace")
-
- // CodeStacktraceKey is the attribute Key conforming to the
- // "code.stacktrace" semantic conventions. It represents a stacktrace as a
- // string in the natural representation for the language runtime. The
- // representation is to be determined and documented by each language SIG.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'at
- // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
- // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
- // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
- CodeStacktraceKey = attribute.Key("code.stacktrace")
-)
-
-// CodeColumn returns an attribute KeyValue conforming to the "code.column"
-// semantic conventions. It represents the column number in `code.filepath`
-// best representing the operation. It SHOULD point within the code unit named
-// in `code.function`.
-func CodeColumn(val int) attribute.KeyValue {
- return CodeColumnKey.Int(val)
-}
-
-// CodeFilepath returns an attribute KeyValue conforming to the
-// "code.filepath" semantic conventions. It represents the source code file
-// name that identifies the code unit as uniquely as possible (preferably an
-// absolute file path).
-func CodeFilepath(val string) attribute.KeyValue {
- return CodeFilepathKey.String(val)
-}
-
-// CodeFunction returns an attribute KeyValue conforming to the
-// "code.function" semantic conventions. It represents the method or function
-// name, or equivalent (usually rightmost part of the code unit's name).
-func CodeFunction(val string) attribute.KeyValue {
- return CodeFunctionKey.String(val)
-}
-
-// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno"
-// semantic conventions. It represents the line number in `code.filepath` best
-// representing the operation. It SHOULD point within the code unit named in
-// `code.function`.
-func CodeLineNumber(val int) attribute.KeyValue {
- return CodeLineNumberKey.Int(val)
-}
-
-// CodeNamespace returns an attribute KeyValue conforming to the
-// "code.namespace" semantic conventions. It represents the "namespace" within
-// which `code.function` is defined. Usually the qualified class or module
-// name, such that `code.namespace` + some separator + `code.function` form a
-// unique identifier for the code unit.
-func CodeNamespace(val string) attribute.KeyValue {
- return CodeNamespaceKey.String(val)
-}
-
-// CodeStacktrace returns an attribute KeyValue conforming to the
-// "code.stacktrace" semantic conventions. It represents a stacktrace as a
-// string in the natural representation for the language runtime. The
-// representation is to be determined and documented by each language SIG.
-func CodeStacktrace(val string) attribute.KeyValue {
- return CodeStacktraceKey.String(val)
-}
-
-// A container instance.
-const (
- // ContainerCommandKey is the attribute Key conforming to the
- // "container.command" semantic conventions. It represents the command used
- // to run the container (i.e. the command name).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'otelcontribcol'
- // Note: If using embedded credentials or sensitive data, it is recommended
- // to remove them to prevent potential leakage.
- ContainerCommandKey = attribute.Key("container.command")
-
- // ContainerCommandArgsKey is the attribute Key conforming to the
- // "container.command_args" semantic conventions. It represents the all the
- // command arguments (including the command/executable itself) run by the
- // container. [2]
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'otelcontribcol, --config, config.yaml'
- ContainerCommandArgsKey = attribute.Key("container.command_args")
-
- // ContainerCommandLineKey is the attribute Key conforming to the
- // "container.command_line" semantic conventions. It represents the full
- // command run by the container as a single string representing the full
- // command. [2]
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'otelcontribcol --config config.yaml'
- ContainerCommandLineKey = attribute.Key("container.command_line")
-
- // ContainerCPUStateKey is the attribute Key conforming to the
- // "container.cpu.state" semantic conventions. It represents the CPU state
- // for this data point.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'user', 'kernel'
- ContainerCPUStateKey = attribute.Key("container.cpu.state")
-
- // ContainerIDKey is the attribute Key conforming to the "container.id"
- // semantic conventions. It represents the container ID. Usually a UUID, as
- // for example used to [identify Docker
- // containers](https://docs.docker.com/engine/reference/run/#container-identification).
- // The UUID might be abbreviated.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'a3bf90e006b2'
- ContainerIDKey = attribute.Key("container.id")
-
- // ContainerImageIDKey is the attribute Key conforming to the
- // "container.image.id" semantic conventions. It represents the runtime
- // specific image identifier. Usually a hash algorithm followed by a UUID.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f'
- // Note: Docker defines a sha256 of the image id; `container.image.id`
- // corresponds to the `Image` field from the Docker container inspect
- // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect)
- // endpoint.
- // K8S defines a link to the container registry repository with digest
- // `"imageID": "registry.azurecr.io
- // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`.
- // The ID is assigned by the container runtime and can vary in different
- // environments. Consider using `oci.manifest.digest` if it is important to
- // identify the same image in different environments/runtimes.
- ContainerImageIDKey = attribute.Key("container.image.id")
-
- // ContainerImageNameKey is the attribute Key conforming to the
- // "container.image.name" semantic conventions. It represents the name of
- // the image the container was built on.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'gcr.io/opentelemetry/operator'
- ContainerImageNameKey = attribute.Key("container.image.name")
-
- // ContainerImageRepoDigestsKey is the attribute Key conforming to the
- // "container.image.repo_digests" semantic conventions. It represents the
- // repo digests of the container image as provided by the container
- // runtime.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb',
- // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578'
- // Note:
- // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect)
- // and
- // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238)
- // report those under the `RepoDigests` field.
- ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests")
-
- // ContainerImageTagsKey is the attribute Key conforming to the
- // "container.image.tags" semantic conventions. It represents the container
- // image tags. An example can be found in [Docker Image
- // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect).
- // Should be only the `<tag>` section of the full name for example from
- // `registry.example.com/my-org/my-image:<tag>`.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'v1.27.1', '3.5.7-0'
- ContainerImageTagsKey = attribute.Key("container.image.tags")
-
- // ContainerNameKey is the attribute Key conforming to the "container.name"
- // semantic conventions. It represents the container name used by container
- // runtime.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry-autoconf'
- ContainerNameKey = attribute.Key("container.name")
-
- // ContainerRuntimeKey is the attribute Key conforming to the
- // "container.runtime" semantic conventions. It represents the container
- // runtime managing this container.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'docker', 'containerd', 'rkt'
- ContainerRuntimeKey = attribute.Key("container.runtime")
-)
-
-var (
- // When tasks of the cgroup are in user mode (Linux). When all container processes are in user mode (Windows)
- ContainerCPUStateUser = ContainerCPUStateKey.String("user")
- // When CPU is used by the system (host OS)
- ContainerCPUStateSystem = ContainerCPUStateKey.String("system")
- // When tasks of the cgroup are in kernel mode (Linux). When all container processes are in kernel mode (Windows)
- ContainerCPUStateKernel = ContainerCPUStateKey.String("kernel")
-)
-
-// ContainerCommand returns an attribute KeyValue conforming to the
-// "container.command" semantic conventions. It represents the command used to
-// run the container (i.e. the command name).
-func ContainerCommand(val string) attribute.KeyValue {
- return ContainerCommandKey.String(val)
-}
-
-// ContainerCommandArgs returns an attribute KeyValue conforming to the
-// "container.command_args" semantic conventions. It represents the all the
-// command arguments (including the command/executable itself) run by the
-// container. [2]
-func ContainerCommandArgs(val ...string) attribute.KeyValue {
- return ContainerCommandArgsKey.StringSlice(val)
-}
-
-// ContainerCommandLine returns an attribute KeyValue conforming to the
-// "container.command_line" semantic conventions. It represents the full
-// command run by the container as a single string representing the full
-// command. [2]
-func ContainerCommandLine(val string) attribute.KeyValue {
- return ContainerCommandLineKey.String(val)
-}
-
-// ContainerID returns an attribute KeyValue conforming to the
-// "container.id" semantic conventions. It represents the container ID. Usually
-// a UUID, as for example used to [identify Docker
-// containers](https://docs.docker.com/engine/reference/run/#container-identification).
-// The UUID might be abbreviated.
-func ContainerID(val string) attribute.KeyValue {
- return ContainerIDKey.String(val)
-}
-
-// ContainerImageID returns an attribute KeyValue conforming to the
-// "container.image.id" semantic conventions. It represents the runtime
-// specific image identifier. Usually a hash algorithm followed by a UUID.
-func ContainerImageID(val string) attribute.KeyValue {
- return ContainerImageIDKey.String(val)
-}
-
-// ContainerImageName returns an attribute KeyValue conforming to the
-// "container.image.name" semantic conventions. It represents the name of the
-// image the container was built on.
-func ContainerImageName(val string) attribute.KeyValue {
- return ContainerImageNameKey.String(val)
-}
-
-// ContainerImageRepoDigests returns an attribute KeyValue conforming to the
-// "container.image.repo_digests" semantic conventions. It represents the repo
-// digests of the container image as provided by the container runtime.
-func ContainerImageRepoDigests(val ...string) attribute.KeyValue {
- return ContainerImageRepoDigestsKey.StringSlice(val)
-}
-
-// ContainerImageTags returns an attribute KeyValue conforming to the
-// "container.image.tags" semantic conventions. It represents the container
-// image tags. An example can be found in [Docker Image
-// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect).
-// Should be only the `<tag>` section of the full name for example from
-// `registry.example.com/my-org/my-image:<tag>`.
-func ContainerImageTags(val ...string) attribute.KeyValue {
- return ContainerImageTagsKey.StringSlice(val)
-}
-
-// ContainerName returns an attribute KeyValue conforming to the
-// "container.name" semantic conventions. It represents the container name used
-// by container runtime.
-func ContainerName(val string) attribute.KeyValue {
- return ContainerNameKey.String(val)
-}
-
-// ContainerRuntime returns an attribute KeyValue conforming to the
-// "container.runtime" semantic conventions. It represents the container
-// runtime managing this container.
-func ContainerRuntime(val string) attribute.KeyValue {
- return ContainerRuntimeKey.String(val)
-}
-
-// This group defines the attributes used to describe telemetry in the context
-// of databases.
-const (
- // DBClientConnectionsPoolNameKey is the attribute Key conforming to the
- // "db.client.connections.pool.name" semantic conventions. It represents
- // the name of the connection pool; unique within the instrumented
- // application. In case the connection pool implementation doesn't provide
- // a name, instrumentation should use a combination of `server.address` and
- // `server.port` attributes formatted as `server.address:server.port`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myDataSource'
- DBClientConnectionsPoolNameKey = attribute.Key("db.client.connections.pool.name")
-
- // DBClientConnectionsStateKey is the attribute Key conforming to the
- // "db.client.connections.state" semantic conventions. It represents the
- // state of a connection in the pool
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'idle'
- DBClientConnectionsStateKey = attribute.Key("db.client.connections.state")
-
- // DBCollectionNameKey is the attribute Key conforming to the
- // "db.collection.name" semantic conventions. It represents the name of a
- // collection (table, container) within the database.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'public.users', 'customers'
- // Note: If the collection name is parsed from the query, it SHOULD match
- // the value provided in the query and may be qualified with the schema and
- // database name.
- // It is RECOMMENDED to capture the value as provided by the application
- // without attempting to do any case normalization.
- DBCollectionNameKey = attribute.Key("db.collection.name")
-
- // DBNamespaceKey is the attribute Key conforming to the "db.namespace"
- // semantic conventions. It represents the name of the database, fully
- // qualified within the server address and port.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'customers', 'test.users'
- // Note: If a database system has multiple namespace components, they
- // SHOULD be concatenated (potentially using database system specific
- // conventions) from most general to most specific namespace component, and
- // more specific namespaces SHOULD NOT be captured without the more general
- // namespaces, to ensure that "startswith" queries for the more general
- // namespaces will be valid.
- // Semantic conventions for individual database systems SHOULD document
- // what `db.namespace` means in the context of that system.
- // It is RECOMMENDED to capture the value as provided by the application
- // without attempting to do any case normalization.
- DBNamespaceKey = attribute.Key("db.namespace")
-
- // DBOperationNameKey is the attribute Key conforming to the
- // "db.operation.name" semantic conventions. It represents the name of the
- // operation or command being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'findAndModify', 'HMSET', 'SELECT'
- // Note: It is RECOMMENDED to capture the value as provided by the
- // application without attempting to do any case normalization.
- DBOperationNameKey = attribute.Key("db.operation.name")
-
- // DBQueryTextKey is the attribute Key conforming to the "db.query.text"
- // semantic conventions. It represents the database query being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'SELECT * FROM wuser_table where username = ?', 'SET mykey
- // "WuValue"'
- DBQueryTextKey = attribute.Key("db.query.text")
-
- // DBSystemKey is the attribute Key conforming to the "db.system" semantic
- // conventions. It represents the database management system (DBMS) product
- // as identified by the client instrumentation.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: The actual DBMS may differ from the one identified by the client.
- // For example, when using PostgreSQL client libraries to connect to a
- // CockroachDB, the `db.system` is set to `postgresql` based on the
- // instrumentation's best knowledge.
- DBSystemKey = attribute.Key("db.system")
-)
-
-var (
- // idle
- DBClientConnectionsStateIdle = DBClientConnectionsStateKey.String("idle")
- // used
- DBClientConnectionsStateUsed = DBClientConnectionsStateKey.String("used")
-)
-
-var (
- // Some other SQL database. Fallback only. See notes
- DBSystemOtherSQL = DBSystemKey.String("other_sql")
- // Microsoft SQL Server
- DBSystemMSSQL = DBSystemKey.String("mssql")
- // Microsoft SQL Server Compact
- DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact")
- // MySQL
- DBSystemMySQL = DBSystemKey.String("mysql")
- // Oracle Database
- DBSystemOracle = DBSystemKey.String("oracle")
- // IBM DB2
- DBSystemDB2 = DBSystemKey.String("db2")
- // PostgreSQL
- DBSystemPostgreSQL = DBSystemKey.String("postgresql")
- // Amazon Redshift
- DBSystemRedshift = DBSystemKey.String("redshift")
- // Apache Hive
- DBSystemHive = DBSystemKey.String("hive")
- // Cloudscape
- DBSystemCloudscape = DBSystemKey.String("cloudscape")
- // HyperSQL DataBase
- DBSystemHSQLDB = DBSystemKey.String("hsqldb")
- // Progress Database
- DBSystemProgress = DBSystemKey.String("progress")
- // SAP MaxDB
- DBSystemMaxDB = DBSystemKey.String("maxdb")
- // SAP HANA
- DBSystemHanaDB = DBSystemKey.String("hanadb")
- // Ingres
- DBSystemIngres = DBSystemKey.String("ingres")
- // FirstSQL
- DBSystemFirstSQL = DBSystemKey.String("firstsql")
- // EnterpriseDB
- DBSystemEDB = DBSystemKey.String("edb")
- // InterSystems Caché
- DBSystemCache = DBSystemKey.String("cache")
- // Adabas (Adaptable Database System)
- DBSystemAdabas = DBSystemKey.String("adabas")
- // Firebird
- DBSystemFirebird = DBSystemKey.String("firebird")
- // Apache Derby
- DBSystemDerby = DBSystemKey.String("derby")
- // FileMaker
- DBSystemFilemaker = DBSystemKey.String("filemaker")
- // Informix
- DBSystemInformix = DBSystemKey.String("informix")
- // InstantDB
- DBSystemInstantDB = DBSystemKey.String("instantdb")
- // InterBase
- DBSystemInterbase = DBSystemKey.String("interbase")
- // MariaDB
- DBSystemMariaDB = DBSystemKey.String("mariadb")
- // Netezza
- DBSystemNetezza = DBSystemKey.String("netezza")
- // Pervasive PSQL
- DBSystemPervasive = DBSystemKey.String("pervasive")
- // PointBase
- DBSystemPointbase = DBSystemKey.String("pointbase")
- // SQLite
- DBSystemSqlite = DBSystemKey.String("sqlite")
- // Sybase
- DBSystemSybase = DBSystemKey.String("sybase")
- // Teradata
- DBSystemTeradata = DBSystemKey.String("teradata")
- // Vertica
- DBSystemVertica = DBSystemKey.String("vertica")
- // H2
- DBSystemH2 = DBSystemKey.String("h2")
- // ColdFusion IMQ
- DBSystemColdfusion = DBSystemKey.String("coldfusion")
- // Apache Cassandra
- DBSystemCassandra = DBSystemKey.String("cassandra")
- // Apache HBase
- DBSystemHBase = DBSystemKey.String("hbase")
- // MongoDB
- DBSystemMongoDB = DBSystemKey.String("mongodb")
- // Redis
- DBSystemRedis = DBSystemKey.String("redis")
- // Couchbase
- DBSystemCouchbase = DBSystemKey.String("couchbase")
- // CouchDB
- DBSystemCouchDB = DBSystemKey.String("couchdb")
- // Microsoft Azure Cosmos DB
- DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
- // Amazon DynamoDB
- DBSystemDynamoDB = DBSystemKey.String("dynamodb")
- // Neo4j
- DBSystemNeo4j = DBSystemKey.String("neo4j")
- // Apache Geode
- DBSystemGeode = DBSystemKey.String("geode")
- // Elasticsearch
- DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
- // Memcached
- DBSystemMemcached = DBSystemKey.String("memcached")
- // CockroachDB
- DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
- // OpenSearch
- DBSystemOpensearch = DBSystemKey.String("opensearch")
- // ClickHouse
- DBSystemClickhouse = DBSystemKey.String("clickhouse")
- // Cloud Spanner
- DBSystemSpanner = DBSystemKey.String("spanner")
- // Trino
- DBSystemTrino = DBSystemKey.String("trino")
-)
-
-// DBClientConnectionsPoolName returns an attribute KeyValue conforming to
-// the "db.client.connections.pool.name" semantic conventions. It represents
-// the name of the connection pool; unique within the instrumented application.
-// In case the connection pool implementation doesn't provide a name,
-// instrumentation should use a combination of `server.address` and
-// `server.port` attributes formatted as `server.address:server.port`.
-func DBClientConnectionsPoolName(val string) attribute.KeyValue {
- return DBClientConnectionsPoolNameKey.String(val)
-}
-
-// DBCollectionName returns an attribute KeyValue conforming to the
-// "db.collection.name" semantic conventions. It represents the name of a
-// collection (table, container) within the database.
-func DBCollectionName(val string) attribute.KeyValue {
- return DBCollectionNameKey.String(val)
-}
-
-// DBNamespace returns an attribute KeyValue conforming to the
-// "db.namespace" semantic conventions. It represents the name of the database,
-// fully qualified within the server address and port.
-func DBNamespace(val string) attribute.KeyValue {
- return DBNamespaceKey.String(val)
-}
-
-// DBOperationName returns an attribute KeyValue conforming to the
-// "db.operation.name" semantic conventions. It represents the name of the
-// operation or command being executed.
-func DBOperationName(val string) attribute.KeyValue {
- return DBOperationNameKey.String(val)
-}
-
-// DBQueryText returns an attribute KeyValue conforming to the
-// "db.query.text" semantic conventions. It represents the database query being
-// executed.
-func DBQueryText(val string) attribute.KeyValue {
- return DBQueryTextKey.String(val)
-}
-
-// This group defines attributes for Cassandra.
-const (
- // DBCassandraConsistencyLevelKey is the attribute Key conforming to the
- // "db.cassandra.consistency_level" semantic conventions. It represents the
- // consistency level of the query. Based on consistency values from
- // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html).
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
-
- // DBCassandraCoordinatorDCKey is the attribute Key conforming to the
- // "db.cassandra.coordinator.dc" semantic conventions. It represents the
- // data center of the coordinating node for a query.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'us-west-2'
- DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
-
- // DBCassandraCoordinatorIDKey is the attribute Key conforming to the
- // "db.cassandra.coordinator.id" semantic conventions. It represents the ID
- // of the coordinating node for a query.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
- DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
-
- // DBCassandraIdempotenceKey is the attribute Key conforming to the
- // "db.cassandra.idempotence" semantic conventions. It represents the
- // whether or not the query is idempotent.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
-
- // DBCassandraPageSizeKey is the attribute Key conforming to the
- // "db.cassandra.page_size" semantic conventions. It represents the fetch
- // size used for paging, i.e. how many rows will be returned at once.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 5000
- DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
-
- // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming
- // to the "db.cassandra.speculative_execution_count" semantic conventions.
- // It represents the number of times a query was speculatively executed.
- // Not set or `0` if the query was not executed speculatively.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 0, 2
- DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
-)
-
-var (
- // all
- DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
- // each_quorum
- DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
- // quorum
- DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
- // local_quorum
- DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
- // one
- DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
- // two
- DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
- // three
- DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
- // local_one
- DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
- // any
- DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
- // serial
- DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
- // local_serial
- DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
-)
-
-// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the
-// "db.cassandra.coordinator.dc" semantic conventions. It represents the data
-// center of the coordinating node for a query.
-func DBCassandraCoordinatorDC(val string) attribute.KeyValue {
- return DBCassandraCoordinatorDCKey.String(val)
-}
-
-// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the
-// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of
-// the coordinating node for a query.
-func DBCassandraCoordinatorID(val string) attribute.KeyValue {
- return DBCassandraCoordinatorIDKey.String(val)
-}
-
-// DBCassandraIdempotence returns an attribute KeyValue conforming to the
-// "db.cassandra.idempotence" semantic conventions. It represents the whether
-// or not the query is idempotent.
-func DBCassandraIdempotence(val bool) attribute.KeyValue {
- return DBCassandraIdempotenceKey.Bool(val)
-}
-
-// DBCassandraPageSize returns an attribute KeyValue conforming to the
-// "db.cassandra.page_size" semantic conventions. It represents the fetch size
-// used for paging, i.e. how many rows will be returned at once.
-func DBCassandraPageSize(val int) attribute.KeyValue {
- return DBCassandraPageSizeKey.Int(val)
-}
-
-// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue
-// conforming to the "db.cassandra.speculative_execution_count" semantic
-// conventions. It represents the number of times a query was speculatively
-// executed. Not set or `0` if the query was not executed speculatively.
-func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
- return DBCassandraSpeculativeExecutionCountKey.Int(val)
-}
-
-// This group defines attributes for Azure Cosmos DB.
-const (
- // DBCosmosDBClientIDKey is the attribute Key conforming to the
- // "db.cosmosdb.client_id" semantic conventions. It represents the unique
- // Cosmos client instance id.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d'
- DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id")
-
- // DBCosmosDBConnectionModeKey is the attribute Key conforming to the
- // "db.cosmosdb.connection_mode" semantic conventions. It represents the
- // cosmos client connection mode.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode")
-
- // DBCosmosDBOperationTypeKey is the attribute Key conforming to the
- // "db.cosmosdb.operation_type" semantic conventions. It represents the
- // cosmosDB Operation Type.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type")
-
- // DBCosmosDBRequestChargeKey is the attribute Key conforming to the
- // "db.cosmosdb.request_charge" semantic conventions. It represents the rU
- // consumed for that operation
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 46.18, 1.0
- DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge")
-
- // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the
- // "db.cosmosdb.request_content_length" semantic conventions. It represents
- // the request payload size in bytes
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length")
-
- // DBCosmosDBStatusCodeKey is the attribute Key conforming to the
- // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos
- // DB status code.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 200, 201
- DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code")
-
- // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the
- // "db.cosmosdb.sub_status_code" semantic conventions. It represents the
- // cosmos DB sub status code.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1000, 1002
- DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code")
-)
-
-var (
- // Gateway (HTTP) connections mode
- DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway")
- // Direct connection
- DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct")
-)
-
-var (
- // invalid
- DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid")
- // create
- DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create")
- // patch
- DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch")
- // read
- DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read")
- // read_feed
- DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed")
- // delete
- DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete")
- // replace
- DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace")
- // execute
- DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute")
- // query
- DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query")
- // head
- DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head")
- // head_feed
- DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed")
- // upsert
- DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert")
- // batch
- DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch")
- // query_plan
- DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan")
- // execute_javascript
- DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript")
-)
-
-// DBCosmosDBClientID returns an attribute KeyValue conforming to the
-// "db.cosmosdb.client_id" semantic conventions. It represents the unique
-// Cosmos client instance id.
-func DBCosmosDBClientID(val string) attribute.KeyValue {
- return DBCosmosDBClientIDKey.String(val)
-}
-
-// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the
-// "db.cosmosdb.request_charge" semantic conventions. It represents the rU
-// consumed for that operation
-func DBCosmosDBRequestCharge(val float64) attribute.KeyValue {
- return DBCosmosDBRequestChargeKey.Float64(val)
-}
-
-// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming
-// to the "db.cosmosdb.request_content_length" semantic conventions. It
-// represents the request payload size in bytes
-func DBCosmosDBRequestContentLength(val int) attribute.KeyValue {
- return DBCosmosDBRequestContentLengthKey.Int(val)
-}
-
-// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the
-// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB
-// status code.
-func DBCosmosDBStatusCode(val int) attribute.KeyValue {
- return DBCosmosDBStatusCodeKey.Int(val)
-}
-
-// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the
-// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos
-// DB sub status code.
-func DBCosmosDBSubStatusCode(val int) attribute.KeyValue {
- return DBCosmosDBSubStatusCodeKey.Int(val)
-}
-
-// This group defines attributes for Elasticsearch.
-const (
- // DBElasticsearchClusterNameKey is the attribute Key conforming to the
- // "db.elasticsearch.cluster.name" semantic conventions. It represents the
- // represents the identifier of an Elasticsearch cluster.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f'
- DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name")
-
- // DBElasticsearchNodeNameKey is the attribute Key conforming to the
- // "db.elasticsearch.node.name" semantic conventions. It represents the
- // represents the human-readable identifier of the node/instance to which a
- // request was routed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'instance-0000000001'
- DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name")
-)
-
-// DBElasticsearchClusterName returns an attribute KeyValue conforming to
-// the "db.elasticsearch.cluster.name" semantic conventions. It represents the
-// represents the identifier of an Elasticsearch cluster.
-func DBElasticsearchClusterName(val string) attribute.KeyValue {
- return DBElasticsearchClusterNameKey.String(val)
-}
-
-// DBElasticsearchNodeName returns an attribute KeyValue conforming to the
-// "db.elasticsearch.node.name" semantic conventions. It represents the
-// represents the human-readable identifier of the node/instance to which a
-// request was routed.
-func DBElasticsearchNodeName(val string) attribute.KeyValue {
- return DBElasticsearchNodeNameKey.String(val)
-}
-
-// Attributes for software deployments.
-const (
- // DeploymentEnvironmentKey is the attribute Key conforming to the
- // "deployment.environment" semantic conventions. It represents the name of
- // the [deployment
- // environment](https://wikipedia.org/wiki/Deployment_environment) (aka
- // deployment tier).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'staging', 'production'
- // Note: `deployment.environment` does not affect the uniqueness
- // constraints defined through
- // the `service.namespace`, `service.name` and `service.instance.id`
- // resource attributes.
- // This implies that resources carrying the following attribute
- // combinations MUST be
- // considered to be identifying the same service:
- //
- // * `service.name=frontend`, `deployment.environment=production`
- // * `service.name=frontend`, `deployment.environment=staging`.
- DeploymentEnvironmentKey = attribute.Key("deployment.environment")
-)
-
-// DeploymentEnvironment returns an attribute KeyValue conforming to the
-// "deployment.environment" semantic conventions. It represents the name of the
-// [deployment environment](https://wikipedia.org/wiki/Deployment_environment)
-// (aka deployment tier).
-func DeploymentEnvironment(val string) attribute.KeyValue {
- return DeploymentEnvironmentKey.String(val)
-}
-
-// Attributes that represents an occurrence of a lifecycle transition on the
-// Android platform.
-const (
- // AndroidStateKey is the attribute Key conforming to the "android.state"
- // semantic conventions. It represents the deprecated use the
- // `device.app.lifecycle` event definition including `android.state` as a
- // payload field instead.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: The Android lifecycle states are defined in [Activity lifecycle
- // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc),
- // and from which the `OS identifiers` are derived.
- AndroidStateKey = attribute.Key("android.state")
-)
-
-var (
- // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time
- AndroidStateCreated = AndroidStateKey.String("created")
- // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state
- AndroidStateBackground = AndroidStateKey.String("background")
- // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states
- AndroidStateForeground = AndroidStateKey.String("foreground")
-)
-
-// These attributes may be used to describe the receiver of a network
-// exchange/packet. These should be used when there is no client/server
-// relationship between the two sides, or when that relationship is unknown.
-// This covers low-level network interactions (e.g. packet tracing) where you
-// don't know if there was a connection or which side initiated it. This also
-// covers unidirectional UDP flows and peer-to-peer communication where the
-// "user-facing" surface of the protocol / API doesn't expose a clear notion of
-// client and server.
-const (
- // DestinationAddressKey is the attribute Key conforming to the
- // "destination.address" semantic conventions. It represents the
- // destination address - domain name if available without reverse DNS
- // lookup; otherwise, IP address or Unix domain socket name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock'
- // Note: When observed from the source side, and when communicating through
- // an intermediary, `destination.address` SHOULD represent the destination
- // address behind any intermediaries, for example proxies, if it's
- // available.
- DestinationAddressKey = attribute.Key("destination.address")
-
- // DestinationPortKey is the attribute Key conforming to the
- // "destination.port" semantic conventions. It represents the destination
- // port number
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 3389, 2888
- DestinationPortKey = attribute.Key("destination.port")
-)
-
-// DestinationAddress returns an attribute KeyValue conforming to the
-// "destination.address" semantic conventions. It represents the destination
-// address - domain name if available without reverse DNS lookup; otherwise, IP
-// address or Unix domain socket name.
-func DestinationAddress(val string) attribute.KeyValue {
- return DestinationAddressKey.String(val)
-}
-
-// DestinationPort returns an attribute KeyValue conforming to the
-// "destination.port" semantic conventions. It represents the destination port
-// number
-func DestinationPort(val int) attribute.KeyValue {
- return DestinationPortKey.Int(val)
-}
-
-// Describes device attributes.
-const (
- // DeviceIDKey is the attribute Key conforming to the "device.id" semantic
- // conventions. It represents a unique identifier representing the device
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
- // Note: The device identifier MUST only be defined using the values
- // outlined below. This value is not an advertising identifier and MUST NOT
- // be used as such. On iOS (Swift or Objective-C), this value MUST be equal
- // to the [vendor
- // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor).
- // On Android (Java or Kotlin), this value MUST be equal to the Firebase
- // Installation ID or a globally unique UUID which is persisted across
- // sessions in your application. More information can be found
- // [here](https://developer.android.com/training/articles/user-data-ids) on
- // best practices and exact implementation details. Caution should be taken
- // when storing personal data or anything which can identify a user. GDPR
- // and data protection laws may apply, ensure you do your own due
- // diligence.
- DeviceIDKey = attribute.Key("device.id")
-
- // DeviceManufacturerKey is the attribute Key conforming to the
- // "device.manufacturer" semantic conventions. It represents the name of
- // the device manufacturer
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Apple', 'Samsung'
- // Note: The Android OS provides this field via
- // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
- // iOS apps SHOULD hardcode the value `Apple`.
- DeviceManufacturerKey = attribute.Key("device.manufacturer")
-
- // DeviceModelIdentifierKey is the attribute Key conforming to the
- // "device.model.identifier" semantic conventions. It represents the model
- // identifier for the device
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'iPhone3,4', 'SM-G920F'
- // Note: It's recommended this value represents a machine-readable version
- // of the model identifier rather than the market or consumer-friendly name
- // of the device.
- DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
-
- // DeviceModelNameKey is the attribute Key conforming to the
- // "device.model.name" semantic conventions. It represents the marketing
- // name for the device model
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
- // Note: It's recommended this value represents a human-readable version of
- // the device model rather than a machine-readable alternative.
- DeviceModelNameKey = attribute.Key("device.model.name")
-)
-
-// DeviceID returns an attribute KeyValue conforming to the "device.id"
-// semantic conventions. It represents a unique identifier representing the
-// device
-func DeviceID(val string) attribute.KeyValue {
- return DeviceIDKey.String(val)
-}
-
-// DeviceManufacturer returns an attribute KeyValue conforming to the
-// "device.manufacturer" semantic conventions. It represents the name of the
-// device manufacturer
-func DeviceManufacturer(val string) attribute.KeyValue {
- return DeviceManufacturerKey.String(val)
-}
-
-// DeviceModelIdentifier returns an attribute KeyValue conforming to the
-// "device.model.identifier" semantic conventions. It represents the model
-// identifier for the device
-func DeviceModelIdentifier(val string) attribute.KeyValue {
- return DeviceModelIdentifierKey.String(val)
-}
-
-// DeviceModelName returns an attribute KeyValue conforming to the
-// "device.model.name" semantic conventions. It represents the marketing name
-// for the device model
-func DeviceModelName(val string) attribute.KeyValue {
- return DeviceModelNameKey.String(val)
-}
-
-// These attributes may be used for any disk related operation.
-const (
- // DiskIoDirectionKey is the attribute Key conforming to the
- // "disk.io.direction" semantic conventions. It represents the disk IO
- // operation direction.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'read'
- DiskIoDirectionKey = attribute.Key("disk.io.direction")
-)
-
-var (
- // read
- DiskIoDirectionRead = DiskIoDirectionKey.String("read")
- // write
- DiskIoDirectionWrite = DiskIoDirectionKey.String("write")
-)
-
-// The shared attributes used to report a DNS query.
-const (
- // DNSQuestionNameKey is the attribute Key conforming to the
- // "dns.question.name" semantic conventions. It represents the name being
- // queried.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'www.example.com', 'opentelemetry.io'
- // Note: If the name field contains non-printable characters (below 32 or
- // above 126), those characters should be represented as escaped base 10
- // integers (\DDD). Back slashes and quotes should be escaped. Tabs,
- // carriage returns, and line feeds should be converted to \t, \r, and \n
- // respectively.
- DNSQuestionNameKey = attribute.Key("dns.question.name")
-)
-
-// DNSQuestionName returns an attribute KeyValue conforming to the
-// "dns.question.name" semantic conventions. It represents the name being
-// queried.
-func DNSQuestionName(val string) attribute.KeyValue {
- return DNSQuestionNameKey.String(val)
-}
-
-// Attributes for operations with an authenticated and/or authorized enduser.
-const (
- // EnduserIDKey is the attribute Key conforming to the "enduser.id"
- // semantic conventions. It represents the username or client_id extracted
- // from the access token or
- // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header
- // in the inbound request from outside the system.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'username'
- EnduserIDKey = attribute.Key("enduser.id")
-
- // EnduserRoleKey is the attribute Key conforming to the "enduser.role"
- // semantic conventions. It represents the actual/assumed role the client
- // is making the request under extracted from token or application security
- // context.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'admin'
- EnduserRoleKey = attribute.Key("enduser.role")
-
- // EnduserScopeKey is the attribute Key conforming to the "enduser.scope"
- // semantic conventions. It represents the scopes or granted authorities
- // the client currently possesses extracted from token or application
- // security context. The value would come from the scope associated with an
- // [OAuth 2.0 Access
- // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
- // value in a [SAML 2.0
- // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'read:message, write:files'
- EnduserScopeKey = attribute.Key("enduser.scope")
-)
-
-// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
-// semantic conventions. It represents the username or client_id extracted from
-// the access token or
-// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in
-// the inbound request from outside the system.
-func EnduserID(val string) attribute.KeyValue {
- return EnduserIDKey.String(val)
-}
-
-// EnduserRole returns an attribute KeyValue conforming to the
-// "enduser.role" semantic conventions. It represents the actual/assumed role
-// the client is making the request under extracted from token or application
-// security context.
-func EnduserRole(val string) attribute.KeyValue {
- return EnduserRoleKey.String(val)
-}
-
-// EnduserScope returns an attribute KeyValue conforming to the
-// "enduser.scope" semantic conventions. It represents the scopes or granted
-// authorities the client currently possesses extracted from token or
-// application security context. The value would come from the scope associated
-// with an [OAuth 2.0 Access
-// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
-// value in a [SAML 2.0
-// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
-func EnduserScope(val string) attribute.KeyValue {
- return EnduserScopeKey.String(val)
-}
-
-// The shared attributes used to report an error.
-const (
- // ErrorTypeKey is the attribute Key conforming to the "error.type"
- // semantic conventions. It represents the describes a class of error the
- // operation ended with.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'timeout', 'java.net.UnknownHostException',
- // 'server_certificate_invalid', '500'
- // Note: The `error.type` SHOULD be predictable, and SHOULD have low
- // cardinality.
- //
- // When `error.type` is set to a type (e.g., an exception type), its
- // canonical class name identifying the type within the artifact SHOULD be
- // used.
- //
- // Instrumentations SHOULD document the list of errors they report.
- //
- // The cardinality of `error.type` within one instrumentation library
- // SHOULD be low.
- // Telemetry consumers that aggregate data from multiple instrumentation
- // libraries and applications
- // should be prepared for `error.type` to have high cardinality at query
- // time when no
- // additional filters are applied.
- //
- // If the operation has completed successfully, instrumentations SHOULD NOT
- // set `error.type`.
- //
- // If a specific domain defines its own set of error identifiers (such as
- // HTTP or gRPC status codes),
- // it's RECOMMENDED to:
- //
- // * Use a domain-specific attribute
- // * Set `error.type` to capture all errors, regardless of whether they are
- // defined within the domain-specific set or not.
- ErrorTypeKey = attribute.Key("error.type")
-)
-
-var (
- // A fallback error value to be used when the instrumentation doesn't define a custom value
- ErrorTypeOther = ErrorTypeKey.String("_OTHER")
-)
-
-// Attributes for Events represented using Log Records.
-const (
- // EventNameKey is the attribute Key conforming to the "event.name"
- // semantic conventions. It represents the identifies the class / type of
- // event.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'browser.mouse.click', 'device.app.lifecycle'
- // Note: Event names are subject to the same rules as [attribute
- // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.33.0/specification/common/attribute-naming.md).
- // Notably, event names are namespaced to avoid collisions and provide a
- // clean separation of semantics for events in separate domains like
- // browser, mobile, and kubernetes.
- EventNameKey = attribute.Key("event.name")
-)
-
-// EventName returns an attribute KeyValue conforming to the "event.name"
-// semantic conventions. It represents the identifies the class / type of
-// event.
-func EventName(val string) attribute.KeyValue {
- return EventNameKey.String(val)
-}
-
-// The shared attributes used to report a single exception associated with a
-// span or log.
-const (
- // ExceptionEscapedKey is the attribute Key conforming to the
- // "exception.escaped" semantic conventions. It represents the sHOULD be
- // set to true if the exception event is recorded at a point where it is
- // known that the exception is escaping the scope of the span.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- // Note: An exception is considered to have escaped (or left) the scope of
- // a span,
- // if that span is ended while the exception is still logically "in
- // flight".
- // This may be actually "in flight" in some languages (e.g. if the
- // exception
- // is passed to a Context manager's `__exit__` method in Python) but will
- // usually be caught at the point of recording the exception in most
- // languages.
- //
- // It is usually not possible to determine at the point where an exception
- // is thrown
- // whether it will escape the scope of a span.
- // However, it is trivial to know that an exception
- // will escape, if one checks for an active exception just before ending
- // the span,
- // as done in the [example for recording span
- // exceptions](https://opentelemetry.io/docs/specs/semconv/exceptions/exceptions-spans/#recording-an-exception).
- //
- // It follows that an exception may still escape the scope of the span
- // even if the `exception.escaped` attribute was not set or set to false,
- // since the event might have been recorded at a time where it was not
- // clear whether the exception will escape.
- ExceptionEscapedKey = attribute.Key("exception.escaped")
-
- // ExceptionMessageKey is the attribute Key conforming to the
- // "exception.message" semantic conventions. It represents the exception
- // message.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Division by zero', "Can't convert 'int' object to str
- // implicitly"
- ExceptionMessageKey = attribute.Key("exception.message")
-
- // ExceptionStacktraceKey is the attribute Key conforming to the
- // "exception.stacktrace" semantic conventions. It represents a stacktrace
- // as a string in the natural representation for the language runtime. The
- // representation is to be determined and documented by each language SIG.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
- // exception\\n at '
- // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
- // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
- // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
- ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
-
- // ExceptionTypeKey is the attribute Key conforming to the "exception.type"
- // semantic conventions. It represents the type of the exception (its
- // fully-qualified class name, if applicable). The dynamic type of the
- // exception should be preferred over the static type in languages that
- // support it.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'java.net.ConnectException', 'OSError'
- ExceptionTypeKey = attribute.Key("exception.type")
-)
-
-// ExceptionEscaped returns an attribute KeyValue conforming to the
-// "exception.escaped" semantic conventions. It represents the sHOULD be set to
-// true if the exception event is recorded at a point where it is known that
-// the exception is escaping the scope of the span.
-func ExceptionEscaped(val bool) attribute.KeyValue {
- return ExceptionEscapedKey.Bool(val)
-}
-
-// ExceptionMessage returns an attribute KeyValue conforming to the
-// "exception.message" semantic conventions. It represents the exception
-// message.
-func ExceptionMessage(val string) attribute.KeyValue {
- return ExceptionMessageKey.String(val)
-}
-
-// ExceptionStacktrace returns an attribute KeyValue conforming to the
-// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
-// string in the natural representation for the language runtime. The
-// representation is to be determined and documented by each language SIG.
-func ExceptionStacktrace(val string) attribute.KeyValue {
- return ExceptionStacktraceKey.String(val)
-}
-
-// ExceptionType returns an attribute KeyValue conforming to the
-// "exception.type" semantic conventions. It represents the type of the
-// exception (its fully-qualified class name, if applicable). The dynamic type
-// of the exception should be preferred over the static type in languages that
-// support it.
-func ExceptionType(val string) attribute.KeyValue {
- return ExceptionTypeKey.String(val)
-}
-
-// FaaS attributes
-const (
- // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
- // semantic conventions. It represents a boolean that is true if the
- // serverless function is executed for the first time (aka cold-start).
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- FaaSColdstartKey = attribute.Key("faas.coldstart")
-
- // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
- // conventions. It represents a string containing the schedule period as
- // [Cron
- // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '0/5 * * * ? *'
- FaaSCronKey = attribute.Key("faas.cron")
-
- // FaaSDocumentCollectionKey is the attribute Key conforming to the
- // "faas.document.collection" semantic conventions. It represents the name
- // of the source on which the triggering operation was performed. For
- // example, in Cloud Storage or S3 corresponds to the bucket name, and in
- // Cosmos DB to the database name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myBucketName', 'myDBName'
- FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
-
- // FaaSDocumentNameKey is the attribute Key conforming to the
- // "faas.document.name" semantic conventions. It represents the document
- // name/table subjected to the operation. For example, in Cloud Storage or
- // S3 is the name of the file, and in Cosmos DB the table name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myFile.txt', 'myTableName'
- FaaSDocumentNameKey = attribute.Key("faas.document.name")
-
- // FaaSDocumentOperationKey is the attribute Key conforming to the
- // "faas.document.operation" semantic conventions. It represents the
- // describes the type of the operation that was performed on the data.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
-
- // FaaSDocumentTimeKey is the attribute Key conforming to the
- // "faas.document.time" semantic conventions. It represents a string
- // containing the time when the data was accessed in the [ISO
- // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
- // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2020-01-23T13:47:06Z'
- FaaSDocumentTimeKey = attribute.Key("faas.document.time")
-
- // FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
- // semantic conventions. It represents the execution environment ID as a
- // string, that will be potentially reused for other invocations to the
- // same function/function version.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
- // Note: * **AWS Lambda:** Use the (full) log stream name.
- FaaSInstanceKey = attribute.Key("faas.instance")
-
- // FaaSInvocationIDKey is the attribute Key conforming to the
- // "faas.invocation_id" semantic conventions. It represents the invocation
- // ID of the current function invocation.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
- FaaSInvocationIDKey = attribute.Key("faas.invocation_id")
-
- // FaaSInvokedNameKey is the attribute Key conforming to the
- // "faas.invoked_name" semantic conventions. It represents the name of the
- // invoked function.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'my-function'
- // Note: SHOULD be equal to the `faas.name` resource attribute of the
- // invoked function.
- FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
-
- // FaaSInvokedProviderKey is the attribute Key conforming to the
- // "faas.invoked_provider" semantic conventions. It represents the cloud
- // provider of the invoked function.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: SHOULD be equal to the `cloud.provider` resource attribute of the
- // invoked function.
- FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
-
- // FaaSInvokedRegionKey is the attribute Key conforming to the
- // "faas.invoked_region" semantic conventions. It represents the cloud
- // region of the invoked function.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'eu-central-1'
- // Note: SHOULD be equal to the `cloud.region` resource attribute of the
- // invoked function.
- FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
-
- // FaaSMaxMemoryKey is the attribute Key conforming to the
- // "faas.max_memory" semantic conventions. It represents the amount of
- // memory available to the serverless function converted to Bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 134217728
- // Note: It's recommended to set this attribute since e.g. too little
- // memory can easily stop a Java AWS Lambda function from working
- // correctly. On AWS Lambda, the environment variable
- // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must
- // be multiplied by 1,048,576).
- FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
-
- // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
- // conventions. It represents the name of the single function that this
- // runtime instance executes.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'my-function', 'myazurefunctionapp/some-function-name'
- // Note: This is the name of the function as configured/deployed on the
- // FaaS
- // platform and is usually different from the name of the callback
- // function (which may be stored in the
- // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes)
- // span attributes).
- //
- // For some cloud providers, the above definition is ambiguous. The
- // following
- // definition of function name MUST be used for this attribute
- // (and consequently the span name) for the listed cloud
- // providers/products:
- //
- // * **Azure:** The full name `<FUNCAPP>/<FUNC>`, i.e., function app name
- // followed by a forward slash followed by the function name (this form
- // can also be seen in the resource JSON for the function).
- // This means that a span attribute MUST be used, as an Azure function
- // app can host multiple functions that would usually share
- // a TracerProvider (see also the `cloud.resource_id` attribute).
- FaaSNameKey = attribute.Key("faas.name")
-
- // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
- // conventions. It represents a string containing the function invocation
- // time in the [ISO
- // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
- // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2020-01-23T13:47:06Z'
- FaaSTimeKey = attribute.Key("faas.time")
-
- // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger"
- // semantic conventions. It represents the type of the trigger which caused
- // this function invocation.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- FaaSTriggerKey = attribute.Key("faas.trigger")
-
- // FaaSVersionKey is the attribute Key conforming to the "faas.version"
- // semantic conventions. It represents the immutable version of the
- // function being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '26', 'pinkfroid-00002'
- // Note: Depending on the cloud provider and platform, use:
- //
- // * **AWS Lambda:** The [function
- // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html)
- // (an integer represented as a decimal string).
- // * **Google Cloud Run (Services):** The
- // [revision](https://cloud.google.com/run/docs/managing/revisions)
- // (i.e., the function name plus the revision suffix).
- // * **Google Cloud Functions:** The value of the
- // [`K_REVISION` environment
- // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically).
- // * **Azure Functions:** Not applicable. Do not set this attribute.
- FaaSVersionKey = attribute.Key("faas.version")
-)
-
-var (
- // When a new object is created
- FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
- // When an object is modified
- FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
- // When an object is deleted
- FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
-)
-
-var (
- // Alibaba Cloud
- FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
- // Amazon Web Services
- FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
- // Microsoft Azure
- FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
- // Google Cloud Platform
- FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
- // Tencent Cloud
- FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
-)
-
-var (
- // A response to some data source operation such as a database or filesystem read/write
- FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
- // To provide an answer to an inbound HTTP request
- FaaSTriggerHTTP = FaaSTriggerKey.String("http")
- // A function is set to be executed when messages are sent to a messaging system
- FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
- // A function is scheduled to be executed regularly
- FaaSTriggerTimer = FaaSTriggerKey.String("timer")
- // If none of the others apply
- FaaSTriggerOther = FaaSTriggerKey.String("other")
-)
-
-// FaaSColdstart returns an attribute KeyValue conforming to the
-// "faas.coldstart" semantic conventions. It represents a boolean that is true
-// if the serverless function is executed for the first time (aka cold-start).
-func FaaSColdstart(val bool) attribute.KeyValue {
- return FaaSColdstartKey.Bool(val)
-}
-
-// FaaSCron returns an attribute KeyValue conforming to the "faas.cron"
-// semantic conventions. It represents a string containing the schedule period
-// as [Cron
-// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
-func FaaSCron(val string) attribute.KeyValue {
- return FaaSCronKey.String(val)
-}
-
-// FaaSDocumentCollection returns an attribute KeyValue conforming to the
-// "faas.document.collection" semantic conventions. It represents the name of
-// the source on which the triggering operation was performed. For example, in
-// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
-// database name.
-func FaaSDocumentCollection(val string) attribute.KeyValue {
- return FaaSDocumentCollectionKey.String(val)
-}
-
-// FaaSDocumentName returns an attribute KeyValue conforming to the
-// "faas.document.name" semantic conventions. It represents the document
-// name/table subjected to the operation. For example, in Cloud Storage or S3
-// is the name of the file, and in Cosmos DB the table name.
-func FaaSDocumentName(val string) attribute.KeyValue {
- return FaaSDocumentNameKey.String(val)
-}
-
-// FaaSDocumentTime returns an attribute KeyValue conforming to the
-// "faas.document.time" semantic conventions. It represents a string containing
-// the time when the data was accessed in the [ISO
-// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
-// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
-func FaaSDocumentTime(val string) attribute.KeyValue {
- return FaaSDocumentTimeKey.String(val)
-}
-
-// FaaSInstance returns an attribute KeyValue conforming to the
-// "faas.instance" semantic conventions. It represents the execution
-// environment ID as a string, that will be potentially reused for other
-// invocations to the same function/function version.
-func FaaSInstance(val string) attribute.KeyValue {
- return FaaSInstanceKey.String(val)
-}
-
-// FaaSInvocationID returns an attribute KeyValue conforming to the
-// "faas.invocation_id" semantic conventions. It represents the invocation ID
-// of the current function invocation.
-func FaaSInvocationID(val string) attribute.KeyValue {
- return FaaSInvocationIDKey.String(val)
-}
-
-// FaaSInvokedName returns an attribute KeyValue conforming to the
-// "faas.invoked_name" semantic conventions. It represents the name of the
-// invoked function.
-func FaaSInvokedName(val string) attribute.KeyValue {
- return FaaSInvokedNameKey.String(val)
-}
-
-// FaaSInvokedRegion returns an attribute KeyValue conforming to the
-// "faas.invoked_region" semantic conventions. It represents the cloud region
-// of the invoked function.
-func FaaSInvokedRegion(val string) attribute.KeyValue {
- return FaaSInvokedRegionKey.String(val)
-}
-
-// FaaSMaxMemory returns an attribute KeyValue conforming to the
-// "faas.max_memory" semantic conventions. It represents the amount of memory
-// available to the serverless function converted to Bytes.
-func FaaSMaxMemory(val int) attribute.KeyValue {
- return FaaSMaxMemoryKey.Int(val)
-}
-
-// FaaSName returns an attribute KeyValue conforming to the "faas.name"
-// semantic conventions. It represents the name of the single function that
-// this runtime instance executes.
-func FaaSName(val string) attribute.KeyValue {
- return FaaSNameKey.String(val)
-}
-
-// FaaSTime returns an attribute KeyValue conforming to the "faas.time"
-// semantic conventions. It represents a string containing the function
-// invocation time in the [ISO
-// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
-// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
-func FaaSTime(val string) attribute.KeyValue {
- return FaaSTimeKey.String(val)
-}
-
-// FaaSVersion returns an attribute KeyValue conforming to the
-// "faas.version" semantic conventions. It represents the immutable version of
-// the function being executed.
-func FaaSVersion(val string) attribute.KeyValue {
- return FaaSVersionKey.String(val)
-}
-
-// Attributes for Feature Flags.
-const (
- // FeatureFlagKeyKey is the attribute Key conforming to the
- // "feature_flag.key" semantic conventions. It represents the unique
- // identifier of the feature flag.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'logo-color'
- FeatureFlagKeyKey = attribute.Key("feature_flag.key")
-
- // FeatureFlagProviderNameKey is the attribute Key conforming to the
- // "feature_flag.provider_name" semantic conventions. It represents the
- // name of the service provider that performs the flag evaluation.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Flag Manager'
- FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
-
- // FeatureFlagVariantKey is the attribute Key conforming to the
- // "feature_flag.variant" semantic conventions. It represents the sHOULD be
- // a semantic identifier for a value. If one is unavailable, a stringified
- // version of the value can be used.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'red', 'true', 'on'
- // Note: A semantic identifier, commonly referred to as a variant, provides
- // a means
- // for referring to a value without including the value itself. This can
- // provide additional context for understanding the meaning behind a value.
- // For example, the variant `red` maybe be used for the value `#c05543`.
- //
- // A stringified version of the value can be used in situations where a
- // semantic identifier is unavailable. String representation of the value
- // should be determined by the implementer.
- FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
-)
-
-// FeatureFlagKey returns an attribute KeyValue conforming to the
-// "feature_flag.key" semantic conventions. It represents the unique identifier
-// of the feature flag.
-func FeatureFlagKey(val string) attribute.KeyValue {
- return FeatureFlagKeyKey.String(val)
-}
-
-// FeatureFlagProviderName returns an attribute KeyValue conforming to the
-// "feature_flag.provider_name" semantic conventions. It represents the name of
-// the service provider that performs the flag evaluation.
-func FeatureFlagProviderName(val string) attribute.KeyValue {
- return FeatureFlagProviderNameKey.String(val)
-}
-
-// FeatureFlagVariant returns an attribute KeyValue conforming to the
-// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
-// semantic identifier for a value. If one is unavailable, a stringified
-// version of the value can be used.
-func FeatureFlagVariant(val string) attribute.KeyValue {
- return FeatureFlagVariantKey.String(val)
-}
-
-// Describes file attributes.
-const (
- // FileDirectoryKey is the attribute Key conforming to the "file.directory"
- // semantic conventions. It represents the directory where the file is
- // located. It should include the drive letter, when appropriate.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/home/user', 'C:\\Program Files\\MyApp'
- FileDirectoryKey = attribute.Key("file.directory")
-
- // FileExtensionKey is the attribute Key conforming to the "file.extension"
- // semantic conventions. It represents the file extension, excluding the
- // leading dot.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'png', 'gz'
- // Note: When the file name has multiple extensions (example.tar.gz), only
- // the last one should be captured ("gz", not "tar.gz").
- FileExtensionKey = attribute.Key("file.extension")
-
- // FileNameKey is the attribute Key conforming to the "file.name" semantic
- // conventions. It represents the name of the file including the extension,
- // without the directory.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'example.png'
- FileNameKey = attribute.Key("file.name")
-
- // FilePathKey is the attribute Key conforming to the "file.path" semantic
- // conventions. It represents the full path to the file, including the file
- // name. It should include the drive letter, when appropriate.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/home/alice/example.png', 'C:\\Program
- // Files\\MyApp\\myapp.exe'
- FilePathKey = attribute.Key("file.path")
-
- // FileSizeKey is the attribute Key conforming to the "file.size" semantic
- // conventions. It represents the file size in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- FileSizeKey = attribute.Key("file.size")
-)
-
-// FileDirectory returns an attribute KeyValue conforming to the
-// "file.directory" semantic conventions. It represents the directory where the
-// file is located. It should include the drive letter, when appropriate.
-func FileDirectory(val string) attribute.KeyValue {
- return FileDirectoryKey.String(val)
-}
-
-// FileExtension returns an attribute KeyValue conforming to the
-// "file.extension" semantic conventions. It represents the file extension,
-// excluding the leading dot.
-func FileExtension(val string) attribute.KeyValue {
- return FileExtensionKey.String(val)
-}
-
-// FileName returns an attribute KeyValue conforming to the "file.name"
-// semantic conventions. It represents the name of the file including the
-// extension, without the directory.
-func FileName(val string) attribute.KeyValue {
- return FileNameKey.String(val)
-}
-
-// FilePath returns an attribute KeyValue conforming to the "file.path"
-// semantic conventions. It represents the full path to the file, including the
-// file name. It should include the drive letter, when appropriate.
-func FilePath(val string) attribute.KeyValue {
- return FilePathKey.String(val)
-}
-
-// FileSize returns an attribute KeyValue conforming to the "file.size"
-// semantic conventions. It represents the file size in bytes.
-func FileSize(val int) attribute.KeyValue {
- return FileSizeKey.Int(val)
-}
-
-// Attributes for Google Cloud Run.
-const (
- // GCPCloudRunJobExecutionKey is the attribute Key conforming to the
- // "gcp.cloud_run.job.execution" semantic conventions. It represents the
- // name of the Cloud Run
- // [execution](https://cloud.google.com/run/docs/managing/job-executions)
- // being run for the Job, as set by the
- // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
- // environment variable.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'job-name-xxxx', 'sample-job-mdw84'
- GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution")
-
- // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the
- // "gcp.cloud_run.job.task_index" semantic conventions. It represents the
- // index for a task within an execution as provided by the
- // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
- // environment variable.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 0, 1
- GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index")
-)
-
-// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the
-// "gcp.cloud_run.job.execution" semantic conventions. It represents the name
-// of the Cloud Run
-// [execution](https://cloud.google.com/run/docs/managing/job-executions) being
-// run for the Job, as set by the
-// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
-// environment variable.
-func GCPCloudRunJobExecution(val string) attribute.KeyValue {
- return GCPCloudRunJobExecutionKey.String(val)
-}
-
-// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the
-// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index
-// for a task within an execution as provided by the
-// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
-// environment variable.
-func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue {
- return GCPCloudRunJobTaskIndexKey.Int(val)
-}
-
-// Attributes for Google Compute Engine (GCE).
-const (
- // GCPGceInstanceHostnameKey is the attribute Key conforming to the
- // "gcp.gce.instance.hostname" semantic conventions. It represents the
- // hostname of a GCE instance. This is the full value of the default or
- // [custom
- // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'my-host1234.example.com',
- // 'sample-vm.us-west1-b.c.my-project.internal'
- GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname")
-
- // GCPGceInstanceNameKey is the attribute Key conforming to the
- // "gcp.gce.instance.name" semantic conventions. It represents the instance
- // name of a GCE instance. This is the value provided by `host.name`, the
- // visible name of the instance in the Cloud Console UI, and the prefix for
- // the default hostname of the instance as defined by the [default internal
- // DNS
- // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'instance-1', 'my-vm-name'
- GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name")
-)
-
-// GCPGceInstanceHostname returns an attribute KeyValue conforming to the
-// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname
-// of a GCE instance. This is the full value of the default or [custom
-// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
-func GCPGceInstanceHostname(val string) attribute.KeyValue {
- return GCPGceInstanceHostnameKey.String(val)
-}
-
-// GCPGceInstanceName returns an attribute KeyValue conforming to the
-// "gcp.gce.instance.name" semantic conventions. It represents the instance
-// name of a GCE instance. This is the value provided by `host.name`, the
-// visible name of the instance in the Cloud Console UI, and the prefix for the
-// default hostname of the instance as defined by the [default internal DNS
-// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
-func GCPGceInstanceName(val string) attribute.KeyValue {
- return GCPGceInstanceNameKey.String(val)
-}
-
-// The attributes used to describe telemetry in the context of LLM (Large
-// Language Models) requests and responses.
-const (
- // GenAiCompletionKey is the attribute Key conforming to the
- // "gen_ai.completion" semantic conventions. It represents the full
- // response received from the LLM.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: "[{'role': 'assistant', 'content': 'The capital of France is
- // Paris.'}]"
- // Note: It's RECOMMENDED to format completions as JSON string matching
- // [OpenAI messages
- // format](https://platform.openai.com/docs/guides/text-generation)
- GenAiCompletionKey = attribute.Key("gen_ai.completion")
-
- // GenAiPromptKey is the attribute Key conforming to the "gen_ai.prompt"
- // semantic conventions. It represents the full prompt sent to an LLM.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: "[{'role': 'user', 'content': 'What is the capital of
- // France?'}]"
- // Note: It's RECOMMENDED to format prompts as JSON string matching [OpenAI
- // messages
- // format](https://platform.openai.com/docs/guides/text-generation)
- GenAiPromptKey = attribute.Key("gen_ai.prompt")
-
- // GenAiRequestMaxTokensKey is the attribute Key conforming to the
- // "gen_ai.request.max_tokens" semantic conventions. It represents the
- // maximum number of tokens the LLM generates for a request.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 100
- GenAiRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens")
-
- // GenAiRequestModelKey is the attribute Key conforming to the
- // "gen_ai.request.model" semantic conventions. It represents the name of
- // the LLM a request is being made to.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'gpt-4'
- GenAiRequestModelKey = attribute.Key("gen_ai.request.model")
-
- // GenAiRequestTemperatureKey is the attribute Key conforming to the
- // "gen_ai.request.temperature" semantic conventions. It represents the
- // temperature setting for the LLM request.
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 0.0
- GenAiRequestTemperatureKey = attribute.Key("gen_ai.request.temperature")
-
- // GenAiRequestTopPKey is the attribute Key conforming to the
- // "gen_ai.request.top_p" semantic conventions. It represents the top_p
- // sampling setting for the LLM request.
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1.0
- GenAiRequestTopPKey = attribute.Key("gen_ai.request.top_p")
-
- // GenAiResponseFinishReasonsKey is the attribute Key conforming to the
- // "gen_ai.response.finish_reasons" semantic conventions. It represents the
- // array of reasons the model stopped generating tokens, corresponding to
- // each generation received.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'stop'
- GenAiResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons")
-
- // GenAiResponseIDKey is the attribute Key conforming to the
- // "gen_ai.response.id" semantic conventions. It represents the unique
- // identifier for the completion.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'chatcmpl-123'
- GenAiResponseIDKey = attribute.Key("gen_ai.response.id")
-
- // GenAiResponseModelKey is the attribute Key conforming to the
- // "gen_ai.response.model" semantic conventions. It represents the name of
- // the LLM a response was generated from.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'gpt-4-0613'
- GenAiResponseModelKey = attribute.Key("gen_ai.response.model")
-
- // GenAiSystemKey is the attribute Key conforming to the "gen_ai.system"
- // semantic conventions. It represents the Generative AI product as
- // identified by the client instrumentation.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'openai'
- // Note: The actual GenAI product may differ from the one identified by the
- // client. For example, when using OpenAI client libraries to communicate
- // with Mistral, the `gen_ai.system` is set to `openai` based on the
- // instrumentation's best knowledge.
- GenAiSystemKey = attribute.Key("gen_ai.system")
-
- // GenAiUsageCompletionTokensKey is the attribute Key conforming to the
- // "gen_ai.usage.completion_tokens" semantic conventions. It represents the
- // number of tokens used in the LLM response (completion).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 180
- GenAiUsageCompletionTokensKey = attribute.Key("gen_ai.usage.completion_tokens")
-
- // GenAiUsagePromptTokensKey is the attribute Key conforming to the
- // "gen_ai.usage.prompt_tokens" semantic conventions. It represents the
- // number of tokens used in the LLM prompt.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 100
- GenAiUsagePromptTokensKey = attribute.Key("gen_ai.usage.prompt_tokens")
-)
-
-var (
- // OpenAI
- GenAiSystemOpenai = GenAiSystemKey.String("openai")
-)
-
-// GenAiCompletion returns an attribute KeyValue conforming to the
-// "gen_ai.completion" semantic conventions. It represents the full response
-// received from the LLM.
-func GenAiCompletion(val string) attribute.KeyValue {
- return GenAiCompletionKey.String(val)
-}
-
-// GenAiPrompt returns an attribute KeyValue conforming to the
-// "gen_ai.prompt" semantic conventions. It represents the full prompt sent to
-// an LLM.
-func GenAiPrompt(val string) attribute.KeyValue {
- return GenAiPromptKey.String(val)
-}
-
-// GenAiRequestMaxTokens returns an attribute KeyValue conforming to the
-// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum
-// number of tokens the LLM generates for a request.
-func GenAiRequestMaxTokens(val int) attribute.KeyValue {
- return GenAiRequestMaxTokensKey.Int(val)
-}
-
-// GenAiRequestModel returns an attribute KeyValue conforming to the
-// "gen_ai.request.model" semantic conventions. It represents the name of the
-// LLM a request is being made to.
-func GenAiRequestModel(val string) attribute.KeyValue {
- return GenAiRequestModelKey.String(val)
-}
-
-// GenAiRequestTemperature returns an attribute KeyValue conforming to the
-// "gen_ai.request.temperature" semantic conventions. It represents the
-// temperature setting for the LLM request.
-func GenAiRequestTemperature(val float64) attribute.KeyValue {
- return GenAiRequestTemperatureKey.Float64(val)
-}
-
-// GenAiRequestTopP returns an attribute KeyValue conforming to the
-// "gen_ai.request.top_p" semantic conventions. It represents the top_p
-// sampling setting for the LLM request.
-func GenAiRequestTopP(val float64) attribute.KeyValue {
- return GenAiRequestTopPKey.Float64(val)
-}
-
-// GenAiResponseFinishReasons returns an attribute KeyValue conforming to
-// the "gen_ai.response.finish_reasons" semantic conventions. It represents the
-// array of reasons the model stopped generating tokens, corresponding to each
-// generation received.
-func GenAiResponseFinishReasons(val ...string) attribute.KeyValue {
- return GenAiResponseFinishReasonsKey.StringSlice(val)
-}
-
-// GenAiResponseID returns an attribute KeyValue conforming to the
-// "gen_ai.response.id" semantic conventions. It represents the unique
-// identifier for the completion.
-func GenAiResponseID(val string) attribute.KeyValue {
- return GenAiResponseIDKey.String(val)
-}
-
-// GenAiResponseModel returns an attribute KeyValue conforming to the
-// "gen_ai.response.model" semantic conventions. It represents the name of the
-// LLM a response was generated from.
-func GenAiResponseModel(val string) attribute.KeyValue {
- return GenAiResponseModelKey.String(val)
-}
-
-// GenAiUsageCompletionTokens returns an attribute KeyValue conforming to
-// the "gen_ai.usage.completion_tokens" semantic conventions. It represents the
-// number of tokens used in the LLM response (completion).
-func GenAiUsageCompletionTokens(val int) attribute.KeyValue {
- return GenAiUsageCompletionTokensKey.Int(val)
-}
-
-// GenAiUsagePromptTokens returns an attribute KeyValue conforming to the
-// "gen_ai.usage.prompt_tokens" semantic conventions. It represents the number
-// of tokens used in the LLM prompt.
-func GenAiUsagePromptTokens(val int) attribute.KeyValue {
- return GenAiUsagePromptTokensKey.Int(val)
-}
-
-// Attributes for GraphQL.
-const (
- // GraphqlDocumentKey is the attribute Key conforming to the
- // "graphql.document" semantic conventions. It represents the GraphQL
- // document being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'query findBookByID { bookByID(id: ?) { name } }'
- // Note: The value may be sanitized to exclude sensitive information.
- GraphqlDocumentKey = attribute.Key("graphql.document")
-
- // GraphqlOperationNameKey is the attribute Key conforming to the
- // "graphql.operation.name" semantic conventions. It represents the name of
- // the operation being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'findBookByID'
- GraphqlOperationNameKey = attribute.Key("graphql.operation.name")
-
- // GraphqlOperationTypeKey is the attribute Key conforming to the
- // "graphql.operation.type" semantic conventions. It represents the type of
- // the operation being executed.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'query', 'mutation', 'subscription'
- GraphqlOperationTypeKey = attribute.Key("graphql.operation.type")
-)
-
-var (
- // GraphQL query
- GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query")
- // GraphQL mutation
- GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation")
- // GraphQL subscription
- GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription")
-)
-
-// GraphqlDocument returns an attribute KeyValue conforming to the
-// "graphql.document" semantic conventions. It represents the GraphQL document
-// being executed.
-func GraphqlDocument(val string) attribute.KeyValue {
- return GraphqlDocumentKey.String(val)
-}
-
-// GraphqlOperationName returns an attribute KeyValue conforming to the
-// "graphql.operation.name" semantic conventions. It represents the name of the
-// operation being executed.
-func GraphqlOperationName(val string) attribute.KeyValue {
- return GraphqlOperationNameKey.String(val)
-}
-
-// Attributes for the Android platform on which the Android application is
-// running.
-const (
- // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id"
- // semantic conventions. It represents the unique identifier for the
- // application
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da'
- HerokuAppIDKey = attribute.Key("heroku.app.id")
-
- // HerokuReleaseCommitKey is the attribute Key conforming to the
- // "heroku.release.commit" semantic conventions. It represents the commit
- // hash for the current release
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec'
- HerokuReleaseCommitKey = attribute.Key("heroku.release.commit")
-
- // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the
- // "heroku.release.creation_timestamp" semantic conventions. It represents
- // the time and date the release was created
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2022-10-23T18:00:42Z'
- HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp")
-)
-
-// HerokuAppID returns an attribute KeyValue conforming to the
-// "heroku.app.id" semantic conventions. It represents the unique identifier
-// for the application
-func HerokuAppID(val string) attribute.KeyValue {
- return HerokuAppIDKey.String(val)
-}
-
-// HerokuReleaseCommit returns an attribute KeyValue conforming to the
-// "heroku.release.commit" semantic conventions. It represents the commit hash
-// for the current release
-func HerokuReleaseCommit(val string) attribute.KeyValue {
- return HerokuReleaseCommitKey.String(val)
-}
-
-// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming
-// to the "heroku.release.creation_timestamp" semantic conventions. It
-// represents the time and date the release was created
-func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue {
- return HerokuReleaseCreationTimestampKey.String(val)
-}
-
-// A host is defined as a computing instance. For example, physical servers,
-// virtual machines, switches or disk array.
-const (
- // HostArchKey is the attribute Key conforming to the "host.arch" semantic
- // conventions. It represents the CPU architecture the host system is
- // running on.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- HostArchKey = attribute.Key("host.arch")
-
- // HostCPUCacheL2SizeKey is the attribute Key conforming to the
- // "host.cpu.cache.l2.size" semantic conventions. It represents the amount
- // of level 2 memory cache available to the processor (in Bytes).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 12288000
- HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size")
-
- // HostCPUFamilyKey is the attribute Key conforming to the
- // "host.cpu.family" semantic conventions. It represents the family or
- // generation of the CPU.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '6', 'PA-RISC 1.1e'
- HostCPUFamilyKey = attribute.Key("host.cpu.family")
-
- // HostCPUModelIDKey is the attribute Key conforming to the
- // "host.cpu.model.id" semantic conventions. It represents the model
- // identifier. It provides more granular information about the CPU,
- // distinguishing it from other CPUs within the same family.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '6', '9000/778/B180L'
- HostCPUModelIDKey = attribute.Key("host.cpu.model.id")
-
- // HostCPUModelNameKey is the attribute Key conforming to the
- // "host.cpu.model.name" semantic conventions. It represents the model
- // designation of the processor.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz'
- HostCPUModelNameKey = attribute.Key("host.cpu.model.name")
-
- // HostCPUSteppingKey is the attribute Key conforming to the
- // "host.cpu.stepping" semantic conventions. It represents the stepping or
- // core revisions.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1', 'r1p1'
- HostCPUSteppingKey = attribute.Key("host.cpu.stepping")
-
- // HostCPUVendorIDKey is the attribute Key conforming to the
- // "host.cpu.vendor.id" semantic conventions. It represents the processor
- // manufacturer identifier. A maximum 12-character string.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'GenuineIntel'
- // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor
- // ID string in EBX, EDX and ECX registers. Writing these to memory in this
- // order results in a 12-character string.
- HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id")
-
- // HostIDKey is the attribute Key conforming to the "host.id" semantic
- // conventions. It represents the unique host ID. For Cloud, this must be
- // the instance_id assigned by the cloud provider. For non-containerized
- // systems, this should be the `machine-id`. See the table below for the
- // sources to use to determine the `machine-id` based on operating system.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'fdbf79e8af94cb7f9e8df36789187052'
- HostIDKey = attribute.Key("host.id")
-
- // HostImageIDKey is the attribute Key conforming to the "host.image.id"
- // semantic conventions. It represents the vM image ID or host OS image ID.
- // For Cloud, this value is from the provider.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'ami-07b06b442921831e5'
- HostImageIDKey = attribute.Key("host.image.id")
-
- // HostImageNameKey is the attribute Key conforming to the
- // "host.image.name" semantic conventions. It represents the name of the VM
- // image or OS install the host was instantiated from.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
- HostImageNameKey = attribute.Key("host.image.name")
-
- // HostImageVersionKey is the attribute Key conforming to the
- // "host.image.version" semantic conventions. It represents the version
- // string of the VM image or host OS as defined in [Version
- // Attributes](/docs/resource/README.md#version-attributes).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '0.1'
- HostImageVersionKey = attribute.Key("host.image.version")
-
- // HostIPKey is the attribute Key conforming to the "host.ip" semantic
- // conventions. It represents the available IP addresses of the host,
- // excluding loopback interfaces.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e'
- // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6
- // addresses MUST be specified in the [RFC
- // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format.
- HostIPKey = attribute.Key("host.ip")
-
- // HostMacKey is the attribute Key conforming to the "host.mac" semantic
- // conventions. It represents the available MAC addresses of the host,
- // excluding loopback interfaces.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F'
- // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal
- // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf):
- // as hyphen-separated octets in uppercase hexadecimal form from most to
- // least significant.
- HostMacKey = attribute.Key("host.mac")
-
- // HostNameKey is the attribute Key conforming to the "host.name" semantic
- // conventions. It represents the name of the host. On Unix systems, it may
- // contain what the hostname command returns, or the fully qualified
- // hostname, or another name specified by the user.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry-test'
- HostNameKey = attribute.Key("host.name")
-
- // HostTypeKey is the attribute Key conforming to the "host.type" semantic
- // conventions. It represents the type of host. For Cloud, this must be the
- // machine type.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'n1-standard-1'
- HostTypeKey = attribute.Key("host.type")
-)
-
-var (
- // AMD64
- HostArchAMD64 = HostArchKey.String("amd64")
- // ARM32
- HostArchARM32 = HostArchKey.String("arm32")
- // ARM64
- HostArchARM64 = HostArchKey.String("arm64")
- // Itanium
- HostArchIA64 = HostArchKey.String("ia64")
- // 32-bit PowerPC
- HostArchPPC32 = HostArchKey.String("ppc32")
- // 64-bit PowerPC
- HostArchPPC64 = HostArchKey.String("ppc64")
- // IBM z/Architecture
- HostArchS390x = HostArchKey.String("s390x")
- // 32-bit x86
- HostArchX86 = HostArchKey.String("x86")
-)
-
-// HostCPUCacheL2Size returns an attribute KeyValue conforming to the
-// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of
-// level 2 memory cache available to the processor (in Bytes).
-func HostCPUCacheL2Size(val int) attribute.KeyValue {
- return HostCPUCacheL2SizeKey.Int(val)
-}
-
-// HostCPUFamily returns an attribute KeyValue conforming to the
-// "host.cpu.family" semantic conventions. It represents the family or
-// generation of the CPU.
-func HostCPUFamily(val string) attribute.KeyValue {
- return HostCPUFamilyKey.String(val)
-}
-
-// HostCPUModelID returns an attribute KeyValue conforming to the
-// "host.cpu.model.id" semantic conventions. It represents the model
-// identifier. It provides more granular information about the CPU,
-// distinguishing it from other CPUs within the same family.
-func HostCPUModelID(val string) attribute.KeyValue {
- return HostCPUModelIDKey.String(val)
-}
-
-// HostCPUModelName returns an attribute KeyValue conforming to the
-// "host.cpu.model.name" semantic conventions. It represents the model
-// designation of the processor.
-func HostCPUModelName(val string) attribute.KeyValue {
- return HostCPUModelNameKey.String(val)
-}
-
-// HostCPUStepping returns an attribute KeyValue conforming to the
-// "host.cpu.stepping" semantic conventions. It represents the stepping or core
-// revisions.
-func HostCPUStepping(val string) attribute.KeyValue {
- return HostCPUSteppingKey.String(val)
-}
-
-// HostCPUVendorID returns an attribute KeyValue conforming to the
-// "host.cpu.vendor.id" semantic conventions. It represents the processor
-// manufacturer identifier. A maximum 12-character string.
-func HostCPUVendorID(val string) attribute.KeyValue {
- return HostCPUVendorIDKey.String(val)
-}
-
-// HostID returns an attribute KeyValue conforming to the "host.id" semantic
-// conventions. It represents the unique host ID. For Cloud, this must be the
-// instance_id assigned by the cloud provider. For non-containerized systems,
-// this should be the `machine-id`. See the table below for the sources to use
-// to determine the `machine-id` based on operating system.
-func HostID(val string) attribute.KeyValue {
- return HostIDKey.String(val)
-}
-
-// HostImageID returns an attribute KeyValue conforming to the
-// "host.image.id" semantic conventions. It represents the vM image ID or host
-// OS image ID. For Cloud, this value is from the provider.
-func HostImageID(val string) attribute.KeyValue {
- return HostImageIDKey.String(val)
-}
-
-// HostImageName returns an attribute KeyValue conforming to the
-// "host.image.name" semantic conventions. It represents the name of the VM
-// image or OS install the host was instantiated from.
-func HostImageName(val string) attribute.KeyValue {
- return HostImageNameKey.String(val)
-}
-
-// HostImageVersion returns an attribute KeyValue conforming to the
-// "host.image.version" semantic conventions. It represents the version string
-// of the VM image or host OS as defined in [Version
-// Attributes](/docs/resource/README.md#version-attributes).
-func HostImageVersion(val string) attribute.KeyValue {
- return HostImageVersionKey.String(val)
-}
-
-// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic
-// conventions. It represents the available IP addresses of the host, excluding
-// loopback interfaces.
-func HostIP(val ...string) attribute.KeyValue {
- return HostIPKey.StringSlice(val)
-}
-
-// HostMac returns an attribute KeyValue conforming to the "host.mac"
-// semantic conventions. It represents the available MAC addresses of the host,
-// excluding loopback interfaces.
-func HostMac(val ...string) attribute.KeyValue {
- return HostMacKey.StringSlice(val)
-}
-
-// HostName returns an attribute KeyValue conforming to the "host.name"
-// semantic conventions. It represents the name of the host. On Unix systems,
-// it may contain what the hostname command returns, or the fully qualified
-// hostname, or another name specified by the user.
-func HostName(val string) attribute.KeyValue {
- return HostNameKey.String(val)
-}
-
-// HostType returns an attribute KeyValue conforming to the "host.type"
-// semantic conventions. It represents the type of host. For Cloud, this must
-// be the machine type.
-func HostType(val string) attribute.KeyValue {
- return HostTypeKey.String(val)
-}
-
-// Semantic convention attributes in the HTTP namespace.
-const (
- // HTTPConnectionStateKey is the attribute Key conforming to the
- // "http.connection.state" semantic conventions. It represents the state of
- // the HTTP connection in the HTTP connection pool.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'active', 'idle'
- HTTPConnectionStateKey = attribute.Key("http.connection.state")
-
- // HTTPRequestBodySizeKey is the attribute Key conforming to the
- // "http.request.body.size" semantic conventions. It represents the size of
- // the request payload body in bytes. This is the number of bytes
- // transferred excluding headers and is often, but not always, present as
- // the
- // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
- // header. For requests using transport encoding, this should be the
- // compressed size.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 3495
- HTTPRequestBodySizeKey = attribute.Key("http.request.body.size")
-
- // HTTPRequestMethodKey is the attribute Key conforming to the
- // "http.request.method" semantic conventions. It represents the hTTP
- // request method.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'GET', 'POST', 'HEAD'
- // Note: HTTP request method value SHOULD be "known" to the
- // instrumentation.
- // By default, this convention defines "known" methods as the ones listed
- // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods)
- // and the PATCH method defined in
- // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html).
- //
- // If the HTTP request method is not known to instrumentation, it MUST set
- // the `http.request.method` attribute to `_OTHER`.
- //
- // If the HTTP instrumentation could end up converting valid HTTP request
- // methods to `_OTHER`, then it MUST provide a way to override
- // the list of known HTTP methods. If this override is done via environment
- // variable, then the environment variable MUST be named
- // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated
- // list of case-sensitive known HTTP methods
- // (this list MUST be a full override of the default known method, it is
- // not a list of known methods in addition to the defaults).
- //
- // HTTP method names are case-sensitive and `http.request.method` attribute
- // value MUST match a known HTTP method name exactly.
- // Instrumentations for specific web frameworks that consider HTTP methods
- // to be case insensitive, SHOULD populate a canonical equivalent.
- // Tracing instrumentations that do so, MUST also set
- // `http.request.method_original` to the original value.
- HTTPRequestMethodKey = attribute.Key("http.request.method")
-
- // HTTPRequestMethodOriginalKey is the attribute Key conforming to the
- // "http.request.method_original" semantic conventions. It represents the
- // original HTTP method sent by the client in the request line.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'GeT', 'ACL', 'foo'
- HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original")
-
- // HTTPRequestResendCountKey is the attribute Key conforming to the
- // "http.request.resend_count" semantic conventions. It represents the
- // ordinal number of request resending attempt (for any reason, including
- // redirects).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 3
- // Note: The resend count SHOULD be updated each time an HTTP request gets
- // resent by the client, regardless of what was the cause of the resending
- // (e.g. redirection, authorization failure, 503 Server Unavailable,
- // network issues, or any other).
- HTTPRequestResendCountKey = attribute.Key("http.request.resend_count")
-
- // HTTPRequestSizeKey is the attribute Key conforming to the
- // "http.request.size" semantic conventions. It represents the total size
- // of the request in bytes. This should be the total number of bytes sent
- // over the wire, including the request line (HTTP/1.1), framing (HTTP/2
- // and HTTP/3), headers, and request body if any.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1437
- HTTPRequestSizeKey = attribute.Key("http.request.size")
-
- // HTTPResponseBodySizeKey is the attribute Key conforming to the
- // "http.response.body.size" semantic conventions. It represents the size
- // of the response payload body in bytes. This is the number of bytes
- // transferred excluding headers and is often, but not always, present as
- // the
- // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
- // header. For requests using transport encoding, this should be the
- // compressed size.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 3495
- HTTPResponseBodySizeKey = attribute.Key("http.response.body.size")
-
- // HTTPResponseSizeKey is the attribute Key conforming to the
- // "http.response.size" semantic conventions. It represents the total size
- // of the response in bytes. This should be the total number of bytes sent
- // over the wire, including the status line (HTTP/1.1), framing (HTTP/2 and
- // HTTP/3), headers, and response body and trailers if any.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1437
- HTTPResponseSizeKey = attribute.Key("http.response.size")
-
- // HTTPResponseStatusCodeKey is the attribute Key conforming to the
- // "http.response.status_code" semantic conventions. It represents the
- // [HTTP response status
- // code](https://tools.ietf.org/html/rfc7231#section-6).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 200
- HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code")
-
- // HTTPRouteKey is the attribute Key conforming to the "http.route"
- // semantic conventions. It represents the matched route, that is, the path
- // template in the format used by the respective server framework.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '/users/:userID?', '{controller}/{action}/{id?}'
- // Note: MUST NOT be populated when this is not supported by the HTTP
- // server framework as the route attribute should have low-cardinality and
- // the URI path can NOT substitute it.
- // SHOULD include the [application
- // root](/docs/http/http-spans.md#http-server-definitions) if there is one.
- HTTPRouteKey = attribute.Key("http.route")
-)
-
-var (
- // active state
- HTTPConnectionStateActive = HTTPConnectionStateKey.String("active")
- // idle state
- HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle")
-)
-
-var (
- // CONNECT method
- HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT")
- // DELETE method
- HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE")
- // GET method
- HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET")
- // HEAD method
- HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD")
- // OPTIONS method
- HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS")
- // PATCH method
- HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH")
- // POST method
- HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST")
- // PUT method
- HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT")
- // TRACE method
- HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE")
- // Any HTTP method that the instrumentation has no prior knowledge of
- HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER")
-)
-
-// HTTPRequestBodySize returns an attribute KeyValue conforming to the
-// "http.request.body.size" semantic conventions. It represents the size of the
-// request payload body in bytes. This is the number of bytes transferred
-// excluding headers and is often, but not always, present as the
-// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
-// header. For requests using transport encoding, this should be the compressed
-// size.
-func HTTPRequestBodySize(val int) attribute.KeyValue {
- return HTTPRequestBodySizeKey.Int(val)
-}
-
-// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the
-// "http.request.method_original" semantic conventions. It represents the
-// original HTTP method sent by the client in the request line.
-func HTTPRequestMethodOriginal(val string) attribute.KeyValue {
- return HTTPRequestMethodOriginalKey.String(val)
-}
-
-// HTTPRequestResendCount returns an attribute KeyValue conforming to the
-// "http.request.resend_count" semantic conventions. It represents the ordinal
-// number of request resending attempt (for any reason, including redirects).
-func HTTPRequestResendCount(val int) attribute.KeyValue {
- return HTTPRequestResendCountKey.Int(val)
-}
-
-// HTTPRequestSize returns an attribute KeyValue conforming to the
-// "http.request.size" semantic conventions. It represents the total size of
-// the request in bytes. This should be the total number of bytes sent over the
-// wire, including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3),
-// headers, and request body if any.
-func HTTPRequestSize(val int) attribute.KeyValue {
- return HTTPRequestSizeKey.Int(val)
-}
-
-// HTTPResponseBodySize returns an attribute KeyValue conforming to the
-// "http.response.body.size" semantic conventions. It represents the size of
-// the response payload body in bytes. This is the number of bytes transferred
-// excluding headers and is often, but not always, present as the
-// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
-// header. For requests using transport encoding, this should be the compressed
-// size.
-func HTTPResponseBodySize(val int) attribute.KeyValue {
- return HTTPResponseBodySizeKey.Int(val)
-}
-
-// HTTPResponseSize returns an attribute KeyValue conforming to the
-// "http.response.size" semantic conventions. It represents the total size of
-// the response in bytes. This should be the total number of bytes sent over
-// the wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3),
-// headers, and response body and trailers if any.
-func HTTPResponseSize(val int) attribute.KeyValue {
- return HTTPResponseSizeKey.Int(val)
-}
-
-// HTTPResponseStatusCode returns an attribute KeyValue conforming to the
-// "http.response.status_code" semantic conventions. It represents the [HTTP
-// response status code](https://tools.ietf.org/html/rfc7231#section-6).
-func HTTPResponseStatusCode(val int) attribute.KeyValue {
- return HTTPResponseStatusCodeKey.Int(val)
-}
-
-// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
-// semantic conventions. It represents the matched route, that is, the path
-// template in the format used by the respective server framework.
-func HTTPRoute(val string) attribute.KeyValue {
- return HTTPRouteKey.String(val)
-}
-
-// Java Virtual machine related attributes.
-const (
- // JvmBufferPoolNameKey is the attribute Key conforming to the
- // "jvm.buffer.pool.name" semantic conventions. It represents the name of
- // the buffer pool.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'mapped', 'direct'
- // Note: Pool names are generally obtained via
- // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()).
- JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name")
-
- // JvmGcActionKey is the attribute Key conforming to the "jvm.gc.action"
- // semantic conventions. It represents the name of the garbage collector
- // action.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'end of minor GC', 'end of major GC'
- // Note: Garbage collector action is generally obtained via
- // [GarbageCollectionNotificationInfo#getGcAction()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcAction()).
- JvmGcActionKey = attribute.Key("jvm.gc.action")
-
- // JvmGcNameKey is the attribute Key conforming to the "jvm.gc.name"
- // semantic conventions. It represents the name of the garbage collector.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'G1 Young Generation', 'G1 Old Generation'
- // Note: Garbage collector name is generally obtained via
- // [GarbageCollectionNotificationInfo#getGcName()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcName()).
- JvmGcNameKey = attribute.Key("jvm.gc.name")
-
- // JvmMemoryPoolNameKey is the attribute Key conforming to the
- // "jvm.memory.pool.name" semantic conventions. It represents the name of
- // the memory pool.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space'
- // Note: Pool names are generally obtained via
- // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()).
- JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name")
-
- // JvmMemoryTypeKey is the attribute Key conforming to the
- // "jvm.memory.type" semantic conventions. It represents the type of
- // memory.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'heap', 'non_heap'
- JvmMemoryTypeKey = attribute.Key("jvm.memory.type")
-
- // JvmThreadDaemonKey is the attribute Key conforming to the
- // "jvm.thread.daemon" semantic conventions. It represents the whether the
- // thread is daemon or not.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- JvmThreadDaemonKey = attribute.Key("jvm.thread.daemon")
-
- // JvmThreadStateKey is the attribute Key conforming to the
- // "jvm.thread.state" semantic conventions. It represents the state of the
- // thread.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'runnable', 'blocked'
- JvmThreadStateKey = attribute.Key("jvm.thread.state")
-)
-
-var (
- // Heap memory
- JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap")
- // Non-heap memory
- JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap")
-)
-
-var (
- // A thread that has not yet started is in this state
- JvmThreadStateNew = JvmThreadStateKey.String("new")
- // A thread executing in the Java virtual machine is in this state
- JvmThreadStateRunnable = JvmThreadStateKey.String("runnable")
- // A thread that is blocked waiting for a monitor lock is in this state
- JvmThreadStateBlocked = JvmThreadStateKey.String("blocked")
- // A thread that is waiting indefinitely for another thread to perform a particular action is in this state
- JvmThreadStateWaiting = JvmThreadStateKey.String("waiting")
- // A thread that is waiting for another thread to perform an action for up to a specified waiting time is in this state
- JvmThreadStateTimedWaiting = JvmThreadStateKey.String("timed_waiting")
- // A thread that has exited is in this state
- JvmThreadStateTerminated = JvmThreadStateKey.String("terminated")
-)
-
-// JvmBufferPoolName returns an attribute KeyValue conforming to the
-// "jvm.buffer.pool.name" semantic conventions. It represents the name of the
-// buffer pool.
-func JvmBufferPoolName(val string) attribute.KeyValue {
- return JvmBufferPoolNameKey.String(val)
-}
-
-// JvmGcAction returns an attribute KeyValue conforming to the
-// "jvm.gc.action" semantic conventions. It represents the name of the garbage
-// collector action.
-func JvmGcAction(val string) attribute.KeyValue {
- return JvmGcActionKey.String(val)
-}
-
-// JvmGcName returns an attribute KeyValue conforming to the "jvm.gc.name"
-// semantic conventions. It represents the name of the garbage collector.
-func JvmGcName(val string) attribute.KeyValue {
- return JvmGcNameKey.String(val)
-}
-
-// JvmMemoryPoolName returns an attribute KeyValue conforming to the
-// "jvm.memory.pool.name" semantic conventions. It represents the name of the
-// memory pool.
-func JvmMemoryPoolName(val string) attribute.KeyValue {
- return JvmMemoryPoolNameKey.String(val)
-}
-
-// JvmThreadDaemon returns an attribute KeyValue conforming to the
-// "jvm.thread.daemon" semantic conventions. It represents the whether the
-// thread is daemon or not.
-func JvmThreadDaemon(val bool) attribute.KeyValue {
- return JvmThreadDaemonKey.Bool(val)
-}
-
-// Kubernetes resource attributes.
-const (
- // K8SClusterNameKey is the attribute Key conforming to the
- // "k8s.cluster.name" semantic conventions. It represents the name of the
- // cluster.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry-cluster'
- K8SClusterNameKey = attribute.Key("k8s.cluster.name")
-
- // K8SClusterUIDKey is the attribute Key conforming to the
- // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for
- // the cluster, set to the UID of the `kube-system` namespace.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d'
- // Note: K8S doesn't have support for obtaining a cluster ID. If this is
- // ever
- // added, we will recommend collecting the `k8s.cluster.uid` through the
- // official APIs. In the meantime, we are able to use the `uid` of the
- // `kube-system` namespace as a proxy for cluster ID. Read on for the
- // rationale.
- //
- // Every object created in a K8S cluster is assigned a distinct UID. The
- // `kube-system` namespace is used by Kubernetes itself and will exist
- // for the lifetime of the cluster. Using the `uid` of the `kube-system`
- // namespace is a reasonable proxy for the K8S ClusterID as it will only
- // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are
- // UUIDs as standardized by
- // [ISO/IEC 9834-8 and ITU-T
- // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html).
- // Which states:
- //
- // > If generated according to one of the mechanisms defined in Rec.
- // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be
- // different from all other UUIDs generated before 3603 A.D., or is
- // extremely likely to be different (depending on the mechanism chosen).
- //
- // Therefore, UIDs between clusters should be extremely unlikely to
- // conflict.
- K8SClusterUIDKey = attribute.Key("k8s.cluster.uid")
-
- // K8SContainerNameKey is the attribute Key conforming to the
- // "k8s.container.name" semantic conventions. It represents the name of the
- // Container from Pod specification, must be unique within a Pod. Container
- // runtime usually uses different globally unique name (`container.name`).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'redis'
- K8SContainerNameKey = attribute.Key("k8s.container.name")
-
- // K8SContainerRestartCountKey is the attribute Key conforming to the
- // "k8s.container.restart_count" semantic conventions. It represents the
- // number of times the container was restarted. This attribute can be used
- // to identify a particular container (running or stopped) within a
- // container spec.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
-
- // K8SContainerStatusLastTerminatedReasonKey is the attribute Key
- // conforming to the "k8s.container.status.last_terminated_reason" semantic
- // conventions. It represents the last terminated reason of the Container.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Evicted', 'Error'
- K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason")
-
- // K8SCronJobNameKey is the attribute Key conforming to the
- // "k8s.cronjob.name" semantic conventions. It represents the name of the
- // CronJob.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry'
- K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
-
- // K8SCronJobUIDKey is the attribute Key conforming to the
- // "k8s.cronjob.uid" semantic conventions. It represents the UID of the
- // CronJob.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
-
- // K8SDaemonSetNameKey is the attribute Key conforming to the
- // "k8s.daemonset.name" semantic conventions. It represents the name of the
- // DaemonSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry'
- K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
-
- // K8SDaemonSetUIDKey is the attribute Key conforming to the
- // "k8s.daemonset.uid" semantic conventions. It represents the UID of the
- // DaemonSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
-
- // K8SDeploymentNameKey is the attribute Key conforming to the
- // "k8s.deployment.name" semantic conventions. It represents the name of
- // the Deployment.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry'
- K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
-
- // K8SDeploymentUIDKey is the attribute Key conforming to the
- // "k8s.deployment.uid" semantic conventions. It represents the UID of the
- // Deployment.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
-
- // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name"
- // semantic conventions. It represents the name of the Job.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry'
- K8SJobNameKey = attribute.Key("k8s.job.name")
-
- // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid"
- // semantic conventions. It represents the UID of the Job.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SJobUIDKey = attribute.Key("k8s.job.uid")
-
- // K8SNamespaceNameKey is the attribute Key conforming to the
- // "k8s.namespace.name" semantic conventions. It represents the name of the
- // namespace that the pod is running in.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'default'
- K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
-
- // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
- // semantic conventions. It represents the name of the Node.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'node-1'
- K8SNodeNameKey = attribute.Key("k8s.node.name")
-
- // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid"
- // semantic conventions. It represents the UID of the Node.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
- K8SNodeUIDKey = attribute.Key("k8s.node.uid")
-
- // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name"
- // semantic conventions. It represents the name of the Pod.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry-pod-autoconf'
- K8SPodNameKey = attribute.Key("k8s.pod.name")
-
- // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid"
- // semantic conventions. It represents the UID of the Pod.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SPodUIDKey = attribute.Key("k8s.pod.uid")
-
- // K8SReplicaSetNameKey is the attribute Key conforming to the
- // "k8s.replicaset.name" semantic conventions. It represents the name of
- // the ReplicaSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry'
- K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
-
- // K8SReplicaSetUIDKey is the attribute Key conforming to the
- // "k8s.replicaset.uid" semantic conventions. It represents the UID of the
- // ReplicaSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
-
- // K8SStatefulSetNameKey is the attribute Key conforming to the
- // "k8s.statefulset.name" semantic conventions. It represents the name of
- // the StatefulSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry'
- K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
-
- // K8SStatefulSetUIDKey is the attribute Key conforming to the
- // "k8s.statefulset.uid" semantic conventions. It represents the UID of the
- // StatefulSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
-)
-
-// K8SClusterName returns an attribute KeyValue conforming to the
-// "k8s.cluster.name" semantic conventions. It represents the name of the
-// cluster.
-func K8SClusterName(val string) attribute.KeyValue {
- return K8SClusterNameKey.String(val)
-}
-
-// K8SClusterUID returns an attribute KeyValue conforming to the
-// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the
-// cluster, set to the UID of the `kube-system` namespace.
-func K8SClusterUID(val string) attribute.KeyValue {
- return K8SClusterUIDKey.String(val)
-}
-
-// K8SContainerName returns an attribute KeyValue conforming to the
-// "k8s.container.name" semantic conventions. It represents the name of the
-// Container from Pod specification, must be unique within a Pod. Container
-// runtime usually uses different globally unique name (`container.name`).
-func K8SContainerName(val string) attribute.KeyValue {
- return K8SContainerNameKey.String(val)
-}
-
-// K8SContainerRestartCount returns an attribute KeyValue conforming to the
-// "k8s.container.restart_count" semantic conventions. It represents the number
-// of times the container was restarted. This attribute can be used to identify
-// a particular container (running or stopped) within a container spec.
-func K8SContainerRestartCount(val int) attribute.KeyValue {
- return K8SContainerRestartCountKey.Int(val)
-}
-
-// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue
-// conforming to the "k8s.container.status.last_terminated_reason" semantic
-// conventions. It represents the last terminated reason of the Container.
-func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue {
- return K8SContainerStatusLastTerminatedReasonKey.String(val)
-}
-
-// K8SCronJobName returns an attribute KeyValue conforming to the
-// "k8s.cronjob.name" semantic conventions. It represents the name of the
-// CronJob.
-func K8SCronJobName(val string) attribute.KeyValue {
- return K8SCronJobNameKey.String(val)
-}
-
-// K8SCronJobUID returns an attribute KeyValue conforming to the
-// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
-// CronJob.
-func K8SCronJobUID(val string) attribute.KeyValue {
- return K8SCronJobUIDKey.String(val)
-}
-
-// K8SDaemonSetName returns an attribute KeyValue conforming to the
-// "k8s.daemonset.name" semantic conventions. It represents the name of the
-// DaemonSet.
-func K8SDaemonSetName(val string) attribute.KeyValue {
- return K8SDaemonSetNameKey.String(val)
-}
-
-// K8SDaemonSetUID returns an attribute KeyValue conforming to the
-// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
-// DaemonSet.
-func K8SDaemonSetUID(val string) attribute.KeyValue {
- return K8SDaemonSetUIDKey.String(val)
-}
-
-// K8SDeploymentName returns an attribute KeyValue conforming to the
-// "k8s.deployment.name" semantic conventions. It represents the name of the
-// Deployment.
-func K8SDeploymentName(val string) attribute.KeyValue {
- return K8SDeploymentNameKey.String(val)
-}
-
-// K8SDeploymentUID returns an attribute KeyValue conforming to the
-// "k8s.deployment.uid" semantic conventions. It represents the UID of the
-// Deployment.
-func K8SDeploymentUID(val string) attribute.KeyValue {
- return K8SDeploymentUIDKey.String(val)
-}
-
-// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
-// semantic conventions. It represents the name of the Job.
-func K8SJobName(val string) attribute.KeyValue {
- return K8SJobNameKey.String(val)
-}
-
-// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
-// semantic conventions. It represents the UID of the Job.
-func K8SJobUID(val string) attribute.KeyValue {
- return K8SJobUIDKey.String(val)
-}
-
-// K8SNamespaceName returns an attribute KeyValue conforming to the
-// "k8s.namespace.name" semantic conventions. It represents the name of the
-// namespace that the pod is running in.
-func K8SNamespaceName(val string) attribute.KeyValue {
- return K8SNamespaceNameKey.String(val)
-}
-
-// K8SNodeName returns an attribute KeyValue conforming to the
-// "k8s.node.name" semantic conventions. It represents the name of the Node.
-func K8SNodeName(val string) attribute.KeyValue {
- return K8SNodeNameKey.String(val)
-}
-
-// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
-// semantic conventions. It represents the UID of the Node.
-func K8SNodeUID(val string) attribute.KeyValue {
- return K8SNodeUIDKey.String(val)
-}
-
-// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
-// semantic conventions. It represents the name of the Pod.
-func K8SPodName(val string) attribute.KeyValue {
- return K8SPodNameKey.String(val)
-}
-
-// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
-// semantic conventions. It represents the UID of the Pod.
-func K8SPodUID(val string) attribute.KeyValue {
- return K8SPodUIDKey.String(val)
-}
-
-// K8SReplicaSetName returns an attribute KeyValue conforming to the
-// "k8s.replicaset.name" semantic conventions. It represents the name of the
-// ReplicaSet.
-func K8SReplicaSetName(val string) attribute.KeyValue {
- return K8SReplicaSetNameKey.String(val)
-}
-
-// K8SReplicaSetUID returns an attribute KeyValue conforming to the
-// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
-// ReplicaSet.
-func K8SReplicaSetUID(val string) attribute.KeyValue {
- return K8SReplicaSetUIDKey.String(val)
-}
-
-// K8SStatefulSetName returns an attribute KeyValue conforming to the
-// "k8s.statefulset.name" semantic conventions. It represents the name of the
-// StatefulSet.
-func K8SStatefulSetName(val string) attribute.KeyValue {
- return K8SStatefulSetNameKey.String(val)
-}
-
-// K8SStatefulSetUID returns an attribute KeyValue conforming to the
-// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
-// StatefulSet.
-func K8SStatefulSetUID(val string) attribute.KeyValue {
- return K8SStatefulSetUIDKey.String(val)
-}
-
-// Log attributes
-const (
- // LogIostreamKey is the attribute Key conforming to the "log.iostream"
- // semantic conventions. It represents the stream associated with the log.
- // See below for a list of well-known values.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- LogIostreamKey = attribute.Key("log.iostream")
-)
-
-var (
- // Logs from stdout stream
- LogIostreamStdout = LogIostreamKey.String("stdout")
- // Events from stderr stream
- LogIostreamStderr = LogIostreamKey.String("stderr")
-)
-
-// Attributes for a file to which log was emitted.
-const (
- // LogFileNameKey is the attribute Key conforming to the "log.file.name"
- // semantic conventions. It represents the basename of the file.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'audit.log'
- LogFileNameKey = attribute.Key("log.file.name")
-
- // LogFileNameResolvedKey is the attribute Key conforming to the
- // "log.file.name_resolved" semantic conventions. It represents the
- // basename of the file, with symlinks resolved.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'uuid.log'
- LogFileNameResolvedKey = attribute.Key("log.file.name_resolved")
-
- // LogFilePathKey is the attribute Key conforming to the "log.file.path"
- // semantic conventions. It represents the full path to the file.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/var/log/mysql/audit.log'
- LogFilePathKey = attribute.Key("log.file.path")
-
- // LogFilePathResolvedKey is the attribute Key conforming to the
- // "log.file.path_resolved" semantic conventions. It represents the full
- // path to the file, with symlinks resolved.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/var/lib/docker/uuid.log'
- LogFilePathResolvedKey = attribute.Key("log.file.path_resolved")
-)
-
-// LogFileName returns an attribute KeyValue conforming to the
-// "log.file.name" semantic conventions. It represents the basename of the
-// file.
-func LogFileName(val string) attribute.KeyValue {
- return LogFileNameKey.String(val)
-}
-
-// LogFileNameResolved returns an attribute KeyValue conforming to the
-// "log.file.name_resolved" semantic conventions. It represents the basename of
-// the file, with symlinks resolved.
-func LogFileNameResolved(val string) attribute.KeyValue {
- return LogFileNameResolvedKey.String(val)
-}
-
-// LogFilePath returns an attribute KeyValue conforming to the
-// "log.file.path" semantic conventions. It represents the full path to the
-// file.
-func LogFilePath(val string) attribute.KeyValue {
- return LogFilePathKey.String(val)
-}
-
-// LogFilePathResolved returns an attribute KeyValue conforming to the
-// "log.file.path_resolved" semantic conventions. It represents the full path
-// to the file, with symlinks resolved.
-func LogFilePathResolved(val string) attribute.KeyValue {
- return LogFilePathResolvedKey.String(val)
-}
-
-// The generic attributes that may be used in any Log Record.
-const (
- // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid"
- // semantic conventions. It represents a unique identifier for the Log
- // Record.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV'
- // Note: If an id is provided, other log records with the same id will be
- // considered duplicates and can be removed safely. This means, that two
- // distinguishable log records MUST have different values.
- // The id MAY be an [Universally Unique Lexicographically Sortable
- // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers
- // (e.g. UUID) may be used as needed.
- LogRecordUIDKey = attribute.Key("log.record.uid")
-)
-
-// LogRecordUID returns an attribute KeyValue conforming to the
-// "log.record.uid" semantic conventions. It represents a unique identifier for
-// the Log Record.
-func LogRecordUID(val string) attribute.KeyValue {
- return LogRecordUIDKey.String(val)
-}
-
-// Attributes describing telemetry around messaging systems and messaging
-// activities.
-const (
- // MessagingBatchMessageCountKey is the attribute Key conforming to the
- // "messaging.batch.message_count" semantic conventions. It represents the
- // number of messages sent, received, or processed in the scope of the
- // batching operation.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 0, 1, 2
- // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
- // spans that operate with a single message. When a messaging client
- // library supports both batch and single-message API for the same
- // operation, instrumentations SHOULD use `messaging.batch.message_count`
- // for batching APIs and SHOULD NOT use it for single-message APIs.
- MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
-
- // MessagingClientIDKey is the attribute Key conforming to the
- // "messaging.client.id" semantic conventions. It represents a unique
- // identifier for the client that consumes or produces a message.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'client-5', 'myhost@8742@s8083jm'
- MessagingClientIDKey = attribute.Key("messaging.client.id")
-
- // MessagingDestinationAnonymousKey is the attribute Key conforming to the
- // "messaging.destination.anonymous" semantic conventions. It represents a
- // boolean that is true if the message destination is anonymous (could be
- // unnamed or have auto-generated name).
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
-
- // MessagingDestinationNameKey is the attribute Key conforming to the
- // "messaging.destination.name" semantic conventions. It represents the
- // message destination name
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MyQueue', 'MyTopic'
- // Note: Destination name SHOULD uniquely identify a specific queue, topic
- // or other entity within the broker. If
- // the broker doesn't have such notion, the destination name SHOULD
- // uniquely identify the broker.
- MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
-
- // MessagingDestinationPartitionIDKey is the attribute Key conforming to
- // the "messaging.destination.partition.id" semantic conventions. It
- // represents the identifier of the partition messages are sent to or
- // received from, unique within the `messaging.destination.name`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1'
- MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id")
-
- // MessagingDestinationTemplateKey is the attribute Key conforming to the
- // "messaging.destination.template" semantic conventions. It represents the
- // low cardinality representation of the messaging destination name
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/customers/{customerID}'
- // Note: Destination names could be constructed from templates. An example
- // would be a destination name involving a user name or product id.
- // Although the destination name in this case is of high cardinality, the
- // underlying template is of low cardinality and can be effectively used
- // for grouping and aggregation.
- MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
-
- // MessagingDestinationTemporaryKey is the attribute Key conforming to the
- // "messaging.destination.temporary" semantic conventions. It represents a
- // boolean that is true if the message destination is temporary and might
- // not exist anymore after messages are processed.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
-
- // MessagingDestinationPublishAnonymousKey is the attribute Key conforming
- // to the "messaging.destination_publish.anonymous" semantic conventions.
- // It represents a boolean that is true if the publish message destination
- // is anonymous (could be unnamed or have auto-generated name).
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous")
-
- // MessagingDestinationPublishNameKey is the attribute Key conforming to
- // the "messaging.destination_publish.name" semantic conventions. It
- // represents the name of the original destination the message was
- // published to
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MyQueue', 'MyTopic'
- // Note: The name SHOULD uniquely identify a specific queue, topic, or
- // other entity within the broker. If
- // the broker doesn't have such notion, the original destination name
- // SHOULD uniquely identify the broker.
- MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name")
-
- // MessagingMessageBodySizeKey is the attribute Key conforming to the
- // "messaging.message.body.size" semantic conventions. It represents the
- // size of the message body in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1439
- // Note: This can refer to both the compressed or uncompressed body size.
- // If both sizes are known, the uncompressed
- // body size should be used.
- MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size")
-
- // MessagingMessageConversationIDKey is the attribute Key conforming to the
- // "messaging.message.conversation_id" semantic conventions. It represents
- // the conversation ID identifying the conversation to which the message
- // belongs, represented as a string. Sometimes called "Correlation ID".
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MyConversationID'
- MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
-
- // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the
- // "messaging.message.envelope.size" semantic conventions. It represents
- // the size of the message body and metadata in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 2738
- // Note: This can refer to both the compressed or uncompressed size. If
- // both sizes are known, the uncompressed
- // size should be used.
- MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size")
-
- // MessagingMessageIDKey is the attribute Key conforming to the
- // "messaging.message.id" semantic conventions. It represents a value used
- // by the messaging system as an identifier for the message, represented as
- // a string.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '452a7c7c7c7048c2f887f61572b18fc2'
- MessagingMessageIDKey = attribute.Key("messaging.message.id")
-
- // MessagingOperationNameKey is the attribute Key conforming to the
- // "messaging.operation.name" semantic conventions. It represents the
- // system-specific name of the messaging operation.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'ack', 'nack', 'send'
- MessagingOperationNameKey = attribute.Key("messaging.operation.name")
-
- // MessagingOperationTypeKey is the attribute Key conforming to the
- // "messaging.operation.type" semantic conventions. It represents a string
- // identifying the type of the messaging operation.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: If a custom value is used, it MUST be of low cardinality.
- MessagingOperationTypeKey = attribute.Key("messaging.operation.type")
-
- // MessagingSystemKey is the attribute Key conforming to the
- // "messaging.system" semantic conventions. It represents the messaging
- // system as identified by the client instrumentation.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: The actual messaging system may differ from the one known by the
- // client. For example, when using Kafka client libraries to communicate
- // with Azure Event Hubs, the `messaging.system` is set to `kafka` based on
- // the instrumentation's best knowledge.
- MessagingSystemKey = attribute.Key("messaging.system")
-)
-
-var (
- // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created
- MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish")
- // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios
- MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create")
- // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages
- MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive")
- // One or more messages are delivered to or processed by a consumer
- MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("process")
- // One or more messages are settled
- MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle")
-)
-
-var (
- // Apache ActiveMQ
- MessagingSystemActivemq = MessagingSystemKey.String("activemq")
- // Amazon Simple Queue Service (SQS)
- MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs")
- // Azure Event Grid
- MessagingSystemEventgrid = MessagingSystemKey.String("eventgrid")
- // Azure Event Hubs
- MessagingSystemEventhubs = MessagingSystemKey.String("eventhubs")
- // Azure Service Bus
- MessagingSystemServicebus = MessagingSystemKey.String("servicebus")
- // Google Cloud Pub/Sub
- MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub")
- // Java Message Service
- MessagingSystemJms = MessagingSystemKey.String("jms")
- // Apache Kafka
- MessagingSystemKafka = MessagingSystemKey.String("kafka")
- // RabbitMQ
- MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq")
- // Apache RocketMQ
- MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq")
-)
-
-// MessagingBatchMessageCount returns an attribute KeyValue conforming to
-// the "messaging.batch.message_count" semantic conventions. It represents the
-// number of messages sent, received, or processed in the scope of the batching
-// operation.
-func MessagingBatchMessageCount(val int) attribute.KeyValue {
- return MessagingBatchMessageCountKey.Int(val)
-}
-
-// MessagingClientID returns an attribute KeyValue conforming to the
-// "messaging.client.id" semantic conventions. It represents a unique
-// identifier for the client that consumes or produces a message.
-func MessagingClientID(val string) attribute.KeyValue {
- return MessagingClientIDKey.String(val)
-}
-
-// MessagingDestinationAnonymous returns an attribute KeyValue conforming to
-// the "messaging.destination.anonymous" semantic conventions. It represents a
-// boolean that is true if the message destination is anonymous (could be
-// unnamed or have auto-generated name).
-func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
- return MessagingDestinationAnonymousKey.Bool(val)
-}
-
-// MessagingDestinationName returns an attribute KeyValue conforming to the
-// "messaging.destination.name" semantic conventions. It represents the message
-// destination name
-func MessagingDestinationName(val string) attribute.KeyValue {
- return MessagingDestinationNameKey.String(val)
-}
-
-// MessagingDestinationPartitionID returns an attribute KeyValue conforming
-// to the "messaging.destination.partition.id" semantic conventions. It
-// represents the identifier of the partition messages are sent to or received
-// from, unique within the `messaging.destination.name`.
-func MessagingDestinationPartitionID(val string) attribute.KeyValue {
- return MessagingDestinationPartitionIDKey.String(val)
-}
-
-// MessagingDestinationTemplate returns an attribute KeyValue conforming to
-// the "messaging.destination.template" semantic conventions. It represents the
-// low cardinality representation of the messaging destination name
-func MessagingDestinationTemplate(val string) attribute.KeyValue {
- return MessagingDestinationTemplateKey.String(val)
-}
-
-// MessagingDestinationTemporary returns an attribute KeyValue conforming to
-// the "messaging.destination.temporary" semantic conventions. It represents a
-// boolean that is true if the message destination is temporary and might not
-// exist anymore after messages are processed.
-func MessagingDestinationTemporary(val bool) attribute.KeyValue {
- return MessagingDestinationTemporaryKey.Bool(val)
-}
-
-// MessagingDestinationPublishAnonymous returns an attribute KeyValue
-// conforming to the "messaging.destination_publish.anonymous" semantic
-// conventions. It represents a boolean that is true if the publish message
-// destination is anonymous (could be unnamed or have auto-generated name).
-func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue {
- return MessagingDestinationPublishAnonymousKey.Bool(val)
-}
-
-// MessagingDestinationPublishName returns an attribute KeyValue conforming
-// to the "messaging.destination_publish.name" semantic conventions. It
-// represents the name of the original destination the message was published to
-func MessagingDestinationPublishName(val string) attribute.KeyValue {
- return MessagingDestinationPublishNameKey.String(val)
-}
-
-// MessagingMessageBodySize returns an attribute KeyValue conforming to the
-// "messaging.message.body.size" semantic conventions. It represents the size
-// of the message body in bytes.
-func MessagingMessageBodySize(val int) attribute.KeyValue {
- return MessagingMessageBodySizeKey.Int(val)
-}
-
-// MessagingMessageConversationID returns an attribute KeyValue conforming
-// to the "messaging.message.conversation_id" semantic conventions. It
-// represents the conversation ID identifying the conversation to which the
-// message belongs, represented as a string. Sometimes called "Correlation ID".
-func MessagingMessageConversationID(val string) attribute.KeyValue {
- return MessagingMessageConversationIDKey.String(val)
-}
-
-// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to
-// the "messaging.message.envelope.size" semantic conventions. It represents
-// the size of the message body and metadata in bytes.
-func MessagingMessageEnvelopeSize(val int) attribute.KeyValue {
- return MessagingMessageEnvelopeSizeKey.Int(val)
-}
-
-// MessagingMessageID returns an attribute KeyValue conforming to the
-// "messaging.message.id" semantic conventions. It represents a value used by
-// the messaging system as an identifier for the message, represented as a
-// string.
-func MessagingMessageID(val string) attribute.KeyValue {
- return MessagingMessageIDKey.String(val)
-}
-
-// MessagingOperationName returns an attribute KeyValue conforming to the
-// "messaging.operation.name" semantic conventions. It represents the
-// system-specific name of the messaging operation.
-func MessagingOperationName(val string) attribute.KeyValue {
- return MessagingOperationNameKey.String(val)
-}
-
-// This group describes attributes specific to Apache Kafka.
-const (
- // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the
- // "messaging.kafka.consumer.group" semantic conventions. It represents the
- // name of the Kafka Consumer Group that is handling the message. Only
- // applies to consumers, not producers.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'my-group'
- MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group")
-
- // MessagingKafkaMessageKeyKey is the attribute Key conforming to the
- // "messaging.kafka.message.key" semantic conventions. It represents the
- // message keys in Kafka are used for grouping alike messages to ensure
- // they're processed on the same partition. They differ from
- // `messaging.message.id` in that they're not unique. If the key is `null`,
- // the attribute MUST NOT be set.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myKey'
- // Note: If the key type is not string, it's string representation has to
- // be supplied for the attribute. If the key has no unambiguous, canonical
- // string form, don't include its value.
- MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
-
- // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the
- // "messaging.kafka.message.offset" semantic conventions. It represents the
- // offset of a record in the corresponding Kafka partition.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 42
- MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset")
-
- // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
- // "messaging.kafka.message.tombstone" semantic conventions. It represents
- // a boolean that is true if the message is a tombstone.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
-)
-
-// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to
-// the "messaging.kafka.consumer.group" semantic conventions. It represents the
-// name of the Kafka Consumer Group that is handling the message. Only applies
-// to consumers, not producers.
-func MessagingKafkaConsumerGroup(val string) attribute.KeyValue {
- return MessagingKafkaConsumerGroupKey.String(val)
-}
-
-// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
-// "messaging.kafka.message.key" semantic conventions. It represents the
-// message keys in Kafka are used for grouping alike messages to ensure they're
-// processed on the same partition. They differ from `messaging.message.id` in
-// that they're not unique. If the key is `null`, the attribute MUST NOT be
-// set.
-func MessagingKafkaMessageKey(val string) attribute.KeyValue {
- return MessagingKafkaMessageKeyKey.String(val)
-}
-
-// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to
-// the "messaging.kafka.message.offset" semantic conventions. It represents the
-// offset of a record in the corresponding Kafka partition.
-func MessagingKafkaMessageOffset(val int) attribute.KeyValue {
- return MessagingKafkaMessageOffsetKey.Int(val)
-}
-
-// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming
-// to the "messaging.kafka.message.tombstone" semantic conventions. It
-// represents a boolean that is true if the message is a tombstone.
-func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
- return MessagingKafkaMessageTombstoneKey.Bool(val)
-}
-
-// This group describes attributes specific to RabbitMQ.
-const (
- // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key
- // conforming to the "messaging.rabbitmq.destination.routing_key" semantic
- // conventions. It represents the rabbitMQ message routing key.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myKey'
- MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
-
- // MessagingRabbitmqMessageDeliveryTagKey is the attribute Key conforming
- // to the "messaging.rabbitmq.message.delivery_tag" semantic conventions.
- // It represents the rabbitMQ message delivery tag
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 123
- MessagingRabbitmqMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag")
-)
-
-// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue
-// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
-// conventions. It represents the rabbitMQ message routing key.
-func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue {
- return MessagingRabbitmqDestinationRoutingKeyKey.String(val)
-}
-
-// MessagingRabbitmqMessageDeliveryTag returns an attribute KeyValue
-// conforming to the "messaging.rabbitmq.message.delivery_tag" semantic
-// conventions. It represents the rabbitMQ message delivery tag
-func MessagingRabbitmqMessageDeliveryTag(val int) attribute.KeyValue {
- return MessagingRabbitmqMessageDeliveryTagKey.Int(val)
-}
-
-// This group describes attributes specific to RocketMQ.
-const (
- // MessagingRocketmqClientGroupKey is the attribute Key conforming to the
- // "messaging.rocketmq.client_group" semantic conventions. It represents
- // the name of the RocketMQ producer/consumer group that is handling the
- // message. The client type is identified by the SpanKind.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myConsumerGroup'
- MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
-
- // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to
- // the "messaging.rocketmq.consumption_model" semantic conventions. It
- // represents the model of message consumption. This only applies to
- // consumer spans.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
-
- // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key
- // conforming to the "messaging.rocketmq.message.delay_time_level" semantic
- // conventions. It represents the delay time level for delay message, which
- // determines the message delay time.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 3
- MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
-
- // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key
- // conforming to the "messaging.rocketmq.message.delivery_timestamp"
- // semantic conventions. It represents the timestamp in milliseconds that
- // the delay message is expected to be delivered to consumer.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1665987217045
- MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
-
- // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.group" semantic conventions. It represents
- // the it is essential for FIFO message. Messages that belong to the same
- // message group are always processed one by one within the same consumer
- // group.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myMessageGroup'
- MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
-
- // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.keys" semantic conventions. It represents
- // the key(s) of message, another way to mark message besides message id.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'keyA', 'keyB'
- MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
-
- // MessagingRocketmqMessageTagKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.tag" semantic conventions. It represents the
- // secondary classifier of message besides topic.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'tagA'
- MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
-
- // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.type" semantic conventions. It represents
- // the type of message.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
-
- // MessagingRocketmqNamespaceKey is the attribute Key conforming to the
- // "messaging.rocketmq.namespace" semantic conventions. It represents the
- // namespace of RocketMQ resources, resources in different namespaces are
- // individual.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myNamespace'
- MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
-)
-
-var (
- // Clustering consumption model
- MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
- // Broadcasting consumption model
- MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
-)
-
-var (
- // Normal message
- MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
- // FIFO message
- MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
- // Delay message
- MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
- // Transaction message
- MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
-)
-
-// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.client_group" semantic conventions. It represents
-// the name of the RocketMQ producer/consumer group that is handling the
-// message. The client type is identified by the SpanKind.
-func MessagingRocketmqClientGroup(val string) attribute.KeyValue {
- return MessagingRocketmqClientGroupKey.String(val)
-}
-
-// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue
-// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
-// conventions. It represents the delay time level for delay message, which
-// determines the message delay time.
-func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue {
- return MessagingRocketmqMessageDelayTimeLevelKey.Int(val)
-}
-
-// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue
-// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
-// conventions. It represents the timestamp in milliseconds that the delay
-// message is expected to be delivered to consumer.
-func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue {
- return MessagingRocketmqMessageDeliveryTimestampKey.Int(val)
-}
-
-// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.message.group" semantic conventions. It represents
-// the it is essential for FIFO message. Messages that belong to the same
-// message group are always processed one by one within the same consumer
-// group.
-func MessagingRocketmqMessageGroup(val string) attribute.KeyValue {
- return MessagingRocketmqMessageGroupKey.String(val)
-}
-
-// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.message.keys" semantic conventions. It represents
-// the key(s) of message, another way to mark message besides message id.
-func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue {
- return MessagingRocketmqMessageKeysKey.StringSlice(val)
-}
-
-// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.message.tag" semantic conventions. It represents the
-// secondary classifier of message besides topic.
-func MessagingRocketmqMessageTag(val string) attribute.KeyValue {
- return MessagingRocketmqMessageTagKey.String(val)
-}
-
-// MessagingRocketmqNamespace returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.namespace" semantic conventions. It represents the
-// namespace of RocketMQ resources, resources in different namespaces are
-// individual.
-func MessagingRocketmqNamespace(val string) attribute.KeyValue {
- return MessagingRocketmqNamespaceKey.String(val)
-}
-
-// This group describes attributes specific to GCP Pub/Sub.
-const (
- // MessagingGCPPubsubMessageAckDeadlineKey is the attribute Key conforming
- // to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions.
- // It represents the ack deadline in seconds set for the modify ack
- // deadline request.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 10
- MessagingGCPPubsubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline")
-
- // MessagingGCPPubsubMessageAckIDKey is the attribute Key conforming to the
- // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It
- // represents the ack id for a given message.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'ack_id'
- MessagingGCPPubsubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id")
-
- // MessagingGCPPubsubMessageDeliveryAttemptKey is the attribute Key
- // conforming to the "messaging.gcp_pubsub.message.delivery_attempt"
- // semantic conventions. It represents the delivery attempt for a given
- // message.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 2
- MessagingGCPPubsubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt")
-
- // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming
- // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions.
- // It represents the ordering key for a given message. If the attribute is
- // not present, the message does not have an ordering key.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'ordering_key'
- MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key")
-)
-
-// MessagingGCPPubsubMessageAckDeadline returns an attribute KeyValue
-// conforming to the "messaging.gcp_pubsub.message.ack_deadline" semantic
-// conventions. It represents the ack deadline in seconds set for the modify
-// ack deadline request.
-func MessagingGCPPubsubMessageAckDeadline(val int) attribute.KeyValue {
- return MessagingGCPPubsubMessageAckDeadlineKey.Int(val)
-}
-
-// MessagingGCPPubsubMessageAckID returns an attribute KeyValue conforming
-// to the "messaging.gcp_pubsub.message.ack_id" semantic conventions. It
-// represents the ack id for a given message.
-func MessagingGCPPubsubMessageAckID(val string) attribute.KeyValue {
- return MessagingGCPPubsubMessageAckIDKey.String(val)
-}
-
-// MessagingGCPPubsubMessageDeliveryAttempt returns an attribute KeyValue
-// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic
-// conventions. It represents the delivery attempt for a given message.
-func MessagingGCPPubsubMessageDeliveryAttempt(val int) attribute.KeyValue {
- return MessagingGCPPubsubMessageDeliveryAttemptKey.Int(val)
-}
-
-// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue
-// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic
-// conventions. It represents the ordering key for a given message. If the
-// attribute is not present, the message does not have an ordering key.
-func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue {
- return MessagingGCPPubsubMessageOrderingKeyKey.String(val)
-}
-
-// This group describes attributes specific to Azure Service Bus.
-const (
- // MessagingServicebusDestinationSubscriptionNameKey is the attribute Key
- // conforming to the "messaging.servicebus.destination.subscription_name"
- // semantic conventions. It represents the name of the subscription in the
- // topic messages are received from.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'mySubscription'
- MessagingServicebusDestinationSubscriptionNameKey = attribute.Key("messaging.servicebus.destination.subscription_name")
-
- // MessagingServicebusDispositionStatusKey is the attribute Key conforming
- // to the "messaging.servicebus.disposition_status" semantic conventions.
- // It represents the describes the [settlement
- // type](https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock).
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingServicebusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status")
-
- // MessagingServicebusMessageDeliveryCountKey is the attribute Key
- // conforming to the "messaging.servicebus.message.delivery_count" semantic
- // conventions. It represents the number of deliveries that have been
- // attempted for this message.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 2
- MessagingServicebusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count")
-
- // MessagingServicebusMessageEnqueuedTimeKey is the attribute Key
- // conforming to the "messaging.servicebus.message.enqueued_time" semantic
- // conventions. It represents the UTC epoch seconds at which the message
- // has been accepted and stored in the entity.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1701393730
- MessagingServicebusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time")
-)
-
-var (
- // Message is completed
- MessagingServicebusDispositionStatusComplete = MessagingServicebusDispositionStatusKey.String("complete")
- // Message is abandoned
- MessagingServicebusDispositionStatusAbandon = MessagingServicebusDispositionStatusKey.String("abandon")
- // Message is sent to dead letter queue
- MessagingServicebusDispositionStatusDeadLetter = MessagingServicebusDispositionStatusKey.String("dead_letter")
- // Message is deferred
- MessagingServicebusDispositionStatusDefer = MessagingServicebusDispositionStatusKey.String("defer")
-)
-
-// MessagingServicebusDestinationSubscriptionName returns an attribute
-// KeyValue conforming to the
-// "messaging.servicebus.destination.subscription_name" semantic conventions.
-// It represents the name of the subscription in the topic messages are
-// received from.
-func MessagingServicebusDestinationSubscriptionName(val string) attribute.KeyValue {
- return MessagingServicebusDestinationSubscriptionNameKey.String(val)
-}
-
-// MessagingServicebusMessageDeliveryCount returns an attribute KeyValue
-// conforming to the "messaging.servicebus.message.delivery_count" semantic
-// conventions. It represents the number of deliveries that have been attempted
-// for this message.
-func MessagingServicebusMessageDeliveryCount(val int) attribute.KeyValue {
- return MessagingServicebusMessageDeliveryCountKey.Int(val)
-}
-
-// MessagingServicebusMessageEnqueuedTime returns an attribute KeyValue
-// conforming to the "messaging.servicebus.message.enqueued_time" semantic
-// conventions. It represents the UTC epoch seconds at which the message has
-// been accepted and stored in the entity.
-func MessagingServicebusMessageEnqueuedTime(val int) attribute.KeyValue {
- return MessagingServicebusMessageEnqueuedTimeKey.Int(val)
-}
-
-// This group describes attributes specific to Azure Event Hubs.
-const (
- // MessagingEventhubsConsumerGroupKey is the attribute Key conforming to
- // the "messaging.eventhubs.consumer.group" semantic conventions. It
- // represents the name of the consumer group the event consumer is
- // associated with.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'indexer'
- MessagingEventhubsConsumerGroupKey = attribute.Key("messaging.eventhubs.consumer.group")
-
- // MessagingEventhubsMessageEnqueuedTimeKey is the attribute Key conforming
- // to the "messaging.eventhubs.message.enqueued_time" semantic conventions.
- // It represents the UTC epoch seconds at which the message has been
- // accepted and stored in the entity.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1701393730
- MessagingEventhubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time")
-)
-
-// MessagingEventhubsConsumerGroup returns an attribute KeyValue conforming
-// to the "messaging.eventhubs.consumer.group" semantic conventions. It
-// represents the name of the consumer group the event consumer is associated
-// with.
-func MessagingEventhubsConsumerGroup(val string) attribute.KeyValue {
- return MessagingEventhubsConsumerGroupKey.String(val)
-}
-
-// MessagingEventhubsMessageEnqueuedTime returns an attribute KeyValue
-// conforming to the "messaging.eventhubs.message.enqueued_time" semantic
-// conventions. It represents the UTC epoch seconds at which the message has
-// been accepted and stored in the entity.
-func MessagingEventhubsMessageEnqueuedTime(val int) attribute.KeyValue {
- return MessagingEventhubsMessageEnqueuedTimeKey.Int(val)
-}
-
-// These attributes may be used for any network related operation.
-const (
- // NetworkCarrierIccKey is the attribute Key conforming to the
- // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
- // alpha-2 2-character country code associated with the mobile carrier
- // network.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'DE'
- NetworkCarrierIccKey = attribute.Key("network.carrier.icc")
-
- // NetworkCarrierMccKey is the attribute Key conforming to the
- // "network.carrier.mcc" semantic conventions. It represents the mobile
- // carrier country code.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '310'
- NetworkCarrierMccKey = attribute.Key("network.carrier.mcc")
-
- // NetworkCarrierMncKey is the attribute Key conforming to the
- // "network.carrier.mnc" semantic conventions. It represents the mobile
- // carrier network code.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '001'
- NetworkCarrierMncKey = attribute.Key("network.carrier.mnc")
-
- // NetworkCarrierNameKey is the attribute Key conforming to the
- // "network.carrier.name" semantic conventions. It represents the name of
- // the mobile carrier.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'sprint'
- NetworkCarrierNameKey = attribute.Key("network.carrier.name")
-
- // NetworkConnectionSubtypeKey is the attribute Key conforming to the
- // "network.connection.subtype" semantic conventions. It represents the
- // this describes more details regarding the connection.type. It may be the
- // type of cell technology connection, but it could be used for describing
- // details about a wifi connection.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'LTE'
- NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype")
-
- // NetworkConnectionTypeKey is the attribute Key conforming to the
- // "network.connection.type" semantic conventions. It represents the
- // internet connection type.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'wifi'
- NetworkConnectionTypeKey = attribute.Key("network.connection.type")
-
- // NetworkIoDirectionKey is the attribute Key conforming to the
- // "network.io.direction" semantic conventions. It represents the network
- // IO operation direction.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'transmit'
- NetworkIoDirectionKey = attribute.Key("network.io.direction")
-
- // NetworkLocalAddressKey is the attribute Key conforming to the
- // "network.local.address" semantic conventions. It represents the local
- // address of the network connection - IP address or Unix domain socket
- // name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '10.1.2.80', '/tmp/my.sock'
- NetworkLocalAddressKey = attribute.Key("network.local.address")
-
- // NetworkLocalPortKey is the attribute Key conforming to the
- // "network.local.port" semantic conventions. It represents the local port
- // number of the network connection.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 65123
- NetworkLocalPortKey = attribute.Key("network.local.port")
-
- // NetworkPeerAddressKey is the attribute Key conforming to the
- // "network.peer.address" semantic conventions. It represents the peer
- // address of the network connection - IP address or Unix domain socket
- // name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '10.1.2.80', '/tmp/my.sock'
- NetworkPeerAddressKey = attribute.Key("network.peer.address")
-
- // NetworkPeerPortKey is the attribute Key conforming to the
- // "network.peer.port" semantic conventions. It represents the peer port
- // number of the network connection.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 65123
- NetworkPeerPortKey = attribute.Key("network.peer.port")
-
- // NetworkProtocolNameKey is the attribute Key conforming to the
- // "network.protocol.name" semantic conventions. It represents the [OSI
- // application layer](https://osi-model.com/application-layer/) or non-OSI
- // equivalent.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'amqp', 'http', 'mqtt'
- // Note: The value SHOULD be normalized to lowercase.
- NetworkProtocolNameKey = attribute.Key("network.protocol.name")
-
- // NetworkProtocolVersionKey is the attribute Key conforming to the
- // "network.protocol.version" semantic conventions. It represents the
- // actual version of the protocol used for network communication.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '1.1', '2'
- // Note: If protocol version is subject to negotiation (for example using
- // [ALPN](https://www.rfc-editor.org/rfc/rfc7301.html)), this attribute
- // SHOULD be set to the negotiated version. If the actual protocol version
- // is not known, this attribute SHOULD NOT be set.
- NetworkProtocolVersionKey = attribute.Key("network.protocol.version")
-
- // NetworkTransportKey is the attribute Key conforming to the
- // "network.transport" semantic conventions. It represents the [OSI
- // transport layer](https://osi-model.com/transport-layer/) or
- // [inter-process communication
- // method](https://wikipedia.org/wiki/Inter-process_communication).
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'tcp', 'udp'
- // Note: The value SHOULD be normalized to lowercase.
- //
- // Consider always setting the transport when setting a port number, since
- // a port number is ambiguous without knowing the transport. For example
- // different processes could be listening on TCP port 12345 and UDP port
- // 12345.
- NetworkTransportKey = attribute.Key("network.transport")
-
- // NetworkTypeKey is the attribute Key conforming to the "network.type"
- // semantic conventions. It represents the [OSI network
- // layer](https://osi-model.com/network-layer/) or non-OSI equivalent.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'ipv4', 'ipv6'
- // Note: The value SHOULD be normalized to lowercase.
- NetworkTypeKey = attribute.Key("network.type")
-)
-
-var (
- // GPRS
- NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs")
- // EDGE
- NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge")
- // UMTS
- NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts")
- // CDMA
- NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma")
- // EVDO Rel. 0
- NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0")
- // EVDO Rev. A
- NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a")
- // CDMA2000 1XRTT
- NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt")
- // HSDPA
- NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa")
- // HSUPA
- NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa")
- // HSPA
- NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa")
- // IDEN
- NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden")
- // EVDO Rev. B
- NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b")
- // LTE
- NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte")
- // EHRPD
- NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd")
- // HSPAP
- NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap")
- // GSM
- NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm")
- // TD-SCDMA
- NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma")
- // IWLAN
- NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan")
- // 5G NR (New Radio)
- NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr")
- // 5G NRNSA (New Radio Non-Standalone)
- NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa")
- // LTE CA
- NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca")
-)
-
-var (
- // wifi
- NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi")
- // wired
- NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired")
- // cell
- NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell")
- // unavailable
- NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable")
- // unknown
- NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown")
-)
-
-var (
- // transmit
- NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit")
- // receive
- NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive")
-)
-
-var (
- // TCP
- NetworkTransportTCP = NetworkTransportKey.String("tcp")
- // UDP
- NetworkTransportUDP = NetworkTransportKey.String("udp")
- // Named or anonymous pipe
- NetworkTransportPipe = NetworkTransportKey.String("pipe")
- // Unix domain socket
- NetworkTransportUnix = NetworkTransportKey.String("unix")
-)
-
-var (
- // IPv4
- NetworkTypeIpv4 = NetworkTypeKey.String("ipv4")
- // IPv6
- NetworkTypeIpv6 = NetworkTypeKey.String("ipv6")
-)
-
-// NetworkCarrierIcc returns an attribute KeyValue conforming to the
-// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
-// alpha-2 2-character country code associated with the mobile carrier network.
-func NetworkCarrierIcc(val string) attribute.KeyValue {
- return NetworkCarrierIccKey.String(val)
-}
-
-// NetworkCarrierMcc returns an attribute KeyValue conforming to the
-// "network.carrier.mcc" semantic conventions. It represents the mobile carrier
-// country code.
-func NetworkCarrierMcc(val string) attribute.KeyValue {
- return NetworkCarrierMccKey.String(val)
-}
-
-// NetworkCarrierMnc returns an attribute KeyValue conforming to the
-// "network.carrier.mnc" semantic conventions. It represents the mobile carrier
-// network code.
-func NetworkCarrierMnc(val string) attribute.KeyValue {
- return NetworkCarrierMncKey.String(val)
-}
-
-// NetworkCarrierName returns an attribute KeyValue conforming to the
-// "network.carrier.name" semantic conventions. It represents the name of the
-// mobile carrier.
-func NetworkCarrierName(val string) attribute.KeyValue {
- return NetworkCarrierNameKey.String(val)
-}
-
-// NetworkLocalAddress returns an attribute KeyValue conforming to the
-// "network.local.address" semantic conventions. It represents the local
-// address of the network connection - IP address or Unix domain socket name.
-func NetworkLocalAddress(val string) attribute.KeyValue {
- return NetworkLocalAddressKey.String(val)
-}
-
-// NetworkLocalPort returns an attribute KeyValue conforming to the
-// "network.local.port" semantic conventions. It represents the local port
-// number of the network connection.
-func NetworkLocalPort(val int) attribute.KeyValue {
- return NetworkLocalPortKey.Int(val)
-}
-
-// NetworkPeerAddress returns an attribute KeyValue conforming to the
-// "network.peer.address" semantic conventions. It represents the peer address
-// of the network connection - IP address or Unix domain socket name.
-func NetworkPeerAddress(val string) attribute.KeyValue {
- return NetworkPeerAddressKey.String(val)
-}
-
-// NetworkPeerPort returns an attribute KeyValue conforming to the
-// "network.peer.port" semantic conventions. It represents the peer port number
-// of the network connection.
-func NetworkPeerPort(val int) attribute.KeyValue {
- return NetworkPeerPortKey.Int(val)
-}
-
-// NetworkProtocolName returns an attribute KeyValue conforming to the
-// "network.protocol.name" semantic conventions. It represents the [OSI
-// application layer](https://osi-model.com/application-layer/) or non-OSI
-// equivalent.
-func NetworkProtocolName(val string) attribute.KeyValue {
- return NetworkProtocolNameKey.String(val)
-}
-
-// NetworkProtocolVersion returns an attribute KeyValue conforming to the
-// "network.protocol.version" semantic conventions. It represents the actual
-// version of the protocol used for network communication.
-func NetworkProtocolVersion(val string) attribute.KeyValue {
- return NetworkProtocolVersionKey.String(val)
-}
-
-// An OCI image manifest.
-const (
- // OciManifestDigestKey is the attribute Key conforming to the
- // "oci.manifest.digest" semantic conventions. It represents the digest of
- // the OCI image manifest. For container images specifically is the digest
- // by which the container image is known.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4'
- // Note: Follows [OCI Image Manifest
- // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md),
- // and specifically the [Digest
- // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests).
- // An example can be found in [Example Image
- // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest).
- OciManifestDigestKey = attribute.Key("oci.manifest.digest")
-)
-
-// OciManifestDigest returns an attribute KeyValue conforming to the
-// "oci.manifest.digest" semantic conventions. It represents the digest of the
-// OCI image manifest. For container images specifically is the digest by which
-// the container image is known.
-func OciManifestDigest(val string) attribute.KeyValue {
- return OciManifestDigestKey.String(val)
-}
-
-// Attributes used by the OpenTracing Shim layer.
-const (
- // OpentracingRefTypeKey is the attribute Key conforming to the
- // "opentracing.ref_type" semantic conventions. It represents the
- // parent-child Reference type
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: The causal relationship between a child Span and a parent Span.
- OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
-)
-
-var (
- // The parent Span depends on the child Span in some capacity
- OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
- // The parent Span doesn't depend in any way on the result of the child Span
- OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
-)
-
-// The operating system (OS) on which the process represented by this resource
-// is running.
-const (
- // OSBuildIDKey is the attribute Key conforming to the "os.build_id"
- // semantic conventions. It represents the unique identifier for a
- // particular build or compilation of the operating system.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'TQ3C.230805.001.B2', '20E247', '22621'
- OSBuildIDKey = attribute.Key("os.build_id")
-
- // OSDescriptionKey is the attribute Key conforming to the "os.description"
- // semantic conventions. It represents the human readable (not intended to
- // be parsed) OS version information, like e.g. reported by `ver` or
- // `lsb_release -a` commands.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1
- // LTS'
- OSDescriptionKey = attribute.Key("os.description")
-
- // OSNameKey is the attribute Key conforming to the "os.name" semantic
- // conventions. It represents the human readable operating system name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'iOS', 'Android', 'Ubuntu'
- OSNameKey = attribute.Key("os.name")
-
- // OSTypeKey is the attribute Key conforming to the "os.type" semantic
- // conventions. It represents the operating system type.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- OSTypeKey = attribute.Key("os.type")
-
- // OSVersionKey is the attribute Key conforming to the "os.version"
- // semantic conventions. It represents the version string of the operating
- // system as defined in [Version
- // Attributes](/docs/resource/README.md#version-attributes).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '14.2.1', '18.04.1'
- OSVersionKey = attribute.Key("os.version")
-)
-
-var (
- // Microsoft Windows
- OSTypeWindows = OSTypeKey.String("windows")
- // Linux
- OSTypeLinux = OSTypeKey.String("linux")
- // Apple Darwin
- OSTypeDarwin = OSTypeKey.String("darwin")
- // FreeBSD
- OSTypeFreeBSD = OSTypeKey.String("freebsd")
- // NetBSD
- OSTypeNetBSD = OSTypeKey.String("netbsd")
- // OpenBSD
- OSTypeOpenBSD = OSTypeKey.String("openbsd")
- // DragonFly BSD
- OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
- // HP-UX (Hewlett Packard Unix)
- OSTypeHPUX = OSTypeKey.String("hpux")
- // AIX (Advanced Interactive eXecutive)
- OSTypeAIX = OSTypeKey.String("aix")
- // SunOS, Oracle Solaris
- OSTypeSolaris = OSTypeKey.String("solaris")
- // IBM z/OS
- OSTypeZOS = OSTypeKey.String("z_os")
-)
-
-// OSBuildID returns an attribute KeyValue conforming to the "os.build_id"
-// semantic conventions. It represents the unique identifier for a particular
-// build or compilation of the operating system.
-func OSBuildID(val string) attribute.KeyValue {
- return OSBuildIDKey.String(val)
-}
-
-// OSDescription returns an attribute KeyValue conforming to the
-// "os.description" semantic conventions. It represents the human readable (not
-// intended to be parsed) OS version information, like e.g. reported by `ver`
-// or `lsb_release -a` commands.
-func OSDescription(val string) attribute.KeyValue {
- return OSDescriptionKey.String(val)
-}
-
-// OSName returns an attribute KeyValue conforming to the "os.name" semantic
-// conventions. It represents the human readable operating system name.
-func OSName(val string) attribute.KeyValue {
- return OSNameKey.String(val)
-}
-
-// OSVersion returns an attribute KeyValue conforming to the "os.version"
-// semantic conventions. It represents the version string of the operating
-// system as defined in [Version
-// Attributes](/docs/resource/README.md#version-attributes).
-func OSVersion(val string) attribute.KeyValue {
- return OSVersionKey.String(val)
-}
-
-// Attributes reserved for OpenTelemetry
-const (
- // OTelStatusCodeKey is the attribute Key conforming to the
- // "otel.status_code" semantic conventions. It represents the name of the
- // code, either "OK" or "ERROR". MUST NOT be set if the status code is
- // UNSET.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- OTelStatusCodeKey = attribute.Key("otel.status_code")
-
- // OTelStatusDescriptionKey is the attribute Key conforming to the
- // "otel.status_description" semantic conventions. It represents the
- // description of the Status if it has a value, otherwise not set.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'resource not found'
- OTelStatusDescriptionKey = attribute.Key("otel.status_description")
-)
-
-var (
- // The operation has been validated by an Application developer or Operator to have completed successfully
- OTelStatusCodeOk = OTelStatusCodeKey.String("OK")
- // The operation contains an error
- OTelStatusCodeError = OTelStatusCodeKey.String("ERROR")
-)
-
-// OTelStatusDescription returns an attribute KeyValue conforming to the
-// "otel.status_description" semantic conventions. It represents the
-// description of the Status if it has a value, otherwise not set.
-func OTelStatusDescription(val string) attribute.KeyValue {
- return OTelStatusDescriptionKey.String(val)
-}
-
-// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's
-// concepts.
-const (
- // OTelScopeNameKey is the attribute Key conforming to the
- // "otel.scope.name" semantic conventions. It represents the name of the
- // instrumentation scope - (`InstrumentationScope.Name` in OTLP).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'io.opentelemetry.contrib.mongodb'
- OTelScopeNameKey = attribute.Key("otel.scope.name")
-
- // OTelScopeVersionKey is the attribute Key conforming to the
- // "otel.scope.version" semantic conventions. It represents the version of
- // the instrumentation scope - (`InstrumentationScope.Version` in OTLP).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '1.0.0'
- OTelScopeVersionKey = attribute.Key("otel.scope.version")
-)
-
-// OTelScopeName returns an attribute KeyValue conforming to the
-// "otel.scope.name" semantic conventions. It represents the name of the
-// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
-func OTelScopeName(val string) attribute.KeyValue {
- return OTelScopeNameKey.String(val)
-}
-
-// OTelScopeVersion returns an attribute KeyValue conforming to the
-// "otel.scope.version" semantic conventions. It represents the version of the
-// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
-func OTelScopeVersion(val string) attribute.KeyValue {
- return OTelScopeVersionKey.String(val)
-}
-
-// Operations that access some remote service.
-const (
- // PeerServiceKey is the attribute Key conforming to the "peer.service"
- // semantic conventions. It represents the
- // [`service.name`](/docs/resource/README.md#service) of the remote
- // service. SHOULD be equal to the actual `service.name` resource attribute
- // of the remote service if any.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'AuthTokenCache'
- PeerServiceKey = attribute.Key("peer.service")
-)
-
-// PeerService returns an attribute KeyValue conforming to the
-// "peer.service" semantic conventions. It represents the
-// [`service.name`](/docs/resource/README.md#service) of the remote service.
-// SHOULD be equal to the actual `service.name` resource attribute of the
-// remote service if any.
-func PeerService(val string) attribute.KeyValue {
- return PeerServiceKey.String(val)
-}
-
-// An operating system process.
-const (
- // ProcessCommandKey is the attribute Key conforming to the
- // "process.command" semantic conventions. It represents the command used
- // to launch the process (i.e. the command name). On Linux based systems,
- // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can
- // be set to the first parameter extracted from `GetCommandLineW`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'cmd/otelcol'
- ProcessCommandKey = attribute.Key("process.command")
-
- // ProcessCommandArgsKey is the attribute Key conforming to the
- // "process.command_args" semantic conventions. It represents the all the
- // command arguments (including the command/executable itself) as received
- // by the process. On Linux-based systems (and some other Unixoid systems
- // supporting procfs), can be set according to the list of null-delimited
- // strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
- // this would be the full argv vector passed to `main`.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'cmd/otecol', '--config=config.yaml'
- ProcessCommandArgsKey = attribute.Key("process.command_args")
-
- // ProcessCommandLineKey is the attribute Key conforming to the
- // "process.command_line" semantic conventions. It represents the full
- // command used to launch the process as a single string representing the
- // full command. On Windows, can be set to the result of `GetCommandLineW`.
- // Do not set this if you have to assemble it just for monitoring; use
- // `process.command_args` instead.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
- ProcessCommandLineKey = attribute.Key("process.command_line")
-
- // ProcessContextSwitchTypeKey is the attribute Key conforming to the
- // "process.context_switch_type" semantic conventions. It represents the
- // specifies whether the context switches for this data point were
- // voluntary or involuntary.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type")
-
- // ProcessCreationTimeKey is the attribute Key conforming to the
- // "process.creation.time" semantic conventions. It represents the date and
- // time the process was created, in ISO 8601 format.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2023-11-21T09:25:34.853Z'
- ProcessCreationTimeKey = attribute.Key("process.creation.time")
-
- // ProcessExecutableNameKey is the attribute Key conforming to the
- // "process.executable.name" semantic conventions. It represents the name
- // of the process executable. On Linux based systems, can be set to the
- // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name
- // of `GetProcessImageFileNameW`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'otelcol'
- ProcessExecutableNameKey = attribute.Key("process.executable.name")
-
- // ProcessExecutablePathKey is the attribute Key conforming to the
- // "process.executable.path" semantic conventions. It represents the full
- // path to the process executable. On Linux based systems, can be set to
- // the target of `proc/[pid]/exe`. On Windows, can be set to the result of
- // `GetProcessImageFileNameW`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/usr/bin/cmd/otelcol'
- ProcessExecutablePathKey = attribute.Key("process.executable.path")
-
- // ProcessExitCodeKey is the attribute Key conforming to the
- // "process.exit.code" semantic conventions. It represents the exit code of
- // the process.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 127
- ProcessExitCodeKey = attribute.Key("process.exit.code")
-
- // ProcessExitTimeKey is the attribute Key conforming to the
- // "process.exit.time" semantic conventions. It represents the date and
- // time the process exited, in ISO 8601 format.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2023-11-21T09:26:12.315Z'
- ProcessExitTimeKey = attribute.Key("process.exit.time")
-
- // ProcessGroupLeaderPIDKey is the attribute Key conforming to the
- // "process.group_leader.pid" semantic conventions. It represents the PID
- // of the process's group leader. This is also the process group ID (PGID)
- // of the process.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 23
- ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid")
-
- // ProcessInteractiveKey is the attribute Key conforming to the
- // "process.interactive" semantic conventions. It represents the whether
- // the process is connected to an interactive shell.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- ProcessInteractiveKey = attribute.Key("process.interactive")
-
- // ProcessOwnerKey is the attribute Key conforming to the "process.owner"
- // semantic conventions. It represents the username of the user that owns
- // the process.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'root'
- ProcessOwnerKey = attribute.Key("process.owner")
-
- // ProcessPagingFaultTypeKey is the attribute Key conforming to the
- // "process.paging.fault_type" semantic conventions. It represents the type
- // of page fault for this data point. Type `major` is for major/hard page
- // faults, and `minor` is for minor/soft page faults.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type")
-
- // ProcessParentPIDKey is the attribute Key conforming to the
- // "process.parent_pid" semantic conventions. It represents the parent
- // Process identifier (PPID).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 111
- ProcessParentPIDKey = attribute.Key("process.parent_pid")
-
- // ProcessPIDKey is the attribute Key conforming to the "process.pid"
- // semantic conventions. It represents the process identifier (PID).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1234
- ProcessPIDKey = attribute.Key("process.pid")
-
- // ProcessRealUserIDKey is the attribute Key conforming to the
- // "process.real_user.id" semantic conventions. It represents the real user
- // ID (RUID) of the process.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1000
- ProcessRealUserIDKey = attribute.Key("process.real_user.id")
-
- // ProcessRealUserNameKey is the attribute Key conforming to the
- // "process.real_user.name" semantic conventions. It represents the
- // username of the real user of the process.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'operator'
- ProcessRealUserNameKey = attribute.Key("process.real_user.name")
-
- // ProcessRuntimeDescriptionKey is the attribute Key conforming to the
- // "process.runtime.description" semantic conventions. It represents an
- // additional description about the runtime of the process, for example a
- // specific vendor customization of the runtime environment.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
- ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
-
- // ProcessRuntimeNameKey is the attribute Key conforming to the
- // "process.runtime.name" semantic conventions. It represents the name of
- // the runtime of this process. For compiled native binaries, this SHOULD
- // be the name of the compiler.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'OpenJDK Runtime Environment'
- ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
-
- // ProcessRuntimeVersionKey is the attribute Key conforming to the
- // "process.runtime.version" semantic conventions. It represents the
- // version of the runtime of this process, as returned by the runtime
- // without modification.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '14.0.2'
- ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
-
- // ProcessSavedUserIDKey is the attribute Key conforming to the
- // "process.saved_user.id" semantic conventions. It represents the saved
- // user ID (SUID) of the process.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1002
- ProcessSavedUserIDKey = attribute.Key("process.saved_user.id")
-
- // ProcessSavedUserNameKey is the attribute Key conforming to the
- // "process.saved_user.name" semantic conventions. It represents the
- // username of the saved user.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'operator'
- ProcessSavedUserNameKey = attribute.Key("process.saved_user.name")
-
- // ProcessSessionLeaderPIDKey is the attribute Key conforming to the
- // "process.session_leader.pid" semantic conventions. It represents the PID
- // of the process's session leader. This is also the session ID (SID) of
- // the process.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 14
- ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid")
-
- // ProcessUserIDKey is the attribute Key conforming to the
- // "process.user.id" semantic conventions. It represents the effective user
- // ID (EUID) of the process.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1001
- ProcessUserIDKey = attribute.Key("process.user.id")
-
- // ProcessUserNameKey is the attribute Key conforming to the
- // "process.user.name" semantic conventions. It represents the username of
- // the effective user of the process.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'root'
- ProcessUserNameKey = attribute.Key("process.user.name")
-
- // ProcessVpidKey is the attribute Key conforming to the "process.vpid"
- // semantic conventions. It represents the virtual process identifier.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 12
- // Note: The process ID within a PID namespace. This is not necessarily
- // unique across all processes on the host but it is unique within the
- // process namespace that the process exists within.
- ProcessVpidKey = attribute.Key("process.vpid")
-)
-
-var (
- // voluntary
- ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary")
- // involuntary
- ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary")
-)
-
-var (
- // major
- ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major")
- // minor
- ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor")
-)
-
-// ProcessCommand returns an attribute KeyValue conforming to the
-// "process.command" semantic conventions. It represents the command used to
-// launch the process (i.e. the command name). On Linux based systems, can be
-// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to
-// the first parameter extracted from `GetCommandLineW`.
-func ProcessCommand(val string) attribute.KeyValue {
- return ProcessCommandKey.String(val)
-}
-
-// ProcessCommandArgs returns an attribute KeyValue conforming to the
-// "process.command_args" semantic conventions. It represents the all the
-// command arguments (including the command/executable itself) as received by
-// the process. On Linux-based systems (and some other Unixoid systems
-// supporting procfs), can be set according to the list of null-delimited
-// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
-// this would be the full argv vector passed to `main`.
-func ProcessCommandArgs(val ...string) attribute.KeyValue {
- return ProcessCommandArgsKey.StringSlice(val)
-}
-
-// ProcessCommandLine returns an attribute KeyValue conforming to the
-// "process.command_line" semantic conventions. It represents the full command
-// used to launch the process as a single string representing the full command.
-// On Windows, can be set to the result of `GetCommandLineW`. Do not set this
-// if you have to assemble it just for monitoring; use `process.command_args`
-// instead.
-func ProcessCommandLine(val string) attribute.KeyValue {
- return ProcessCommandLineKey.String(val)
-}
-
-// ProcessCreationTime returns an attribute KeyValue conforming to the
-// "process.creation.time" semantic conventions. It represents the date and
-// time the process was created, in ISO 8601 format.
-func ProcessCreationTime(val string) attribute.KeyValue {
- return ProcessCreationTimeKey.String(val)
-}
-
-// ProcessExecutableName returns an attribute KeyValue conforming to the
-// "process.executable.name" semantic conventions. It represents the name of
-// the process executable. On Linux based systems, can be set to the `Name` in
-// `proc/[pid]/status`. On Windows, can be set to the base name of
-// `GetProcessImageFileNameW`.
-func ProcessExecutableName(val string) attribute.KeyValue {
- return ProcessExecutableNameKey.String(val)
-}
-
-// ProcessExecutablePath returns an attribute KeyValue conforming to the
-// "process.executable.path" semantic conventions. It represents the full path
-// to the process executable. On Linux based systems, can be set to the target
-// of `proc/[pid]/exe`. On Windows, can be set to the result of
-// `GetProcessImageFileNameW`.
-func ProcessExecutablePath(val string) attribute.KeyValue {
- return ProcessExecutablePathKey.String(val)
-}
-
-// ProcessExitCode returns an attribute KeyValue conforming to the
-// "process.exit.code" semantic conventions. It represents the exit code of the
-// process.
-func ProcessExitCode(val int) attribute.KeyValue {
- return ProcessExitCodeKey.Int(val)
-}
-
-// ProcessExitTime returns an attribute KeyValue conforming to the
-// "process.exit.time" semantic conventions. It represents the date and time
-// the process exited, in ISO 8601 format.
-func ProcessExitTime(val string) attribute.KeyValue {
- return ProcessExitTimeKey.String(val)
-}
-
-// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the
-// "process.group_leader.pid" semantic conventions. It represents the PID of
-// the process's group leader. This is also the process group ID (PGID) of the
-// process.
-func ProcessGroupLeaderPID(val int) attribute.KeyValue {
- return ProcessGroupLeaderPIDKey.Int(val)
-}
-
-// ProcessInteractive returns an attribute KeyValue conforming to the
-// "process.interactive" semantic conventions. It represents the whether the
-// process is connected to an interactive shell.
-func ProcessInteractive(val bool) attribute.KeyValue {
- return ProcessInteractiveKey.Bool(val)
-}
-
-// ProcessOwner returns an attribute KeyValue conforming to the
-// "process.owner" semantic conventions. It represents the username of the user
-// that owns the process.
-func ProcessOwner(val string) attribute.KeyValue {
- return ProcessOwnerKey.String(val)
-}
-
-// ProcessParentPID returns an attribute KeyValue conforming to the
-// "process.parent_pid" semantic conventions. It represents the parent Process
-// identifier (PPID).
-func ProcessParentPID(val int) attribute.KeyValue {
- return ProcessParentPIDKey.Int(val)
-}
-
-// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
-// semantic conventions. It represents the process identifier (PID).
-func ProcessPID(val int) attribute.KeyValue {
- return ProcessPIDKey.Int(val)
-}
-
-// ProcessRealUserID returns an attribute KeyValue conforming to the
-// "process.real_user.id" semantic conventions. It represents the real user ID
-// (RUID) of the process.
-func ProcessRealUserID(val int) attribute.KeyValue {
- return ProcessRealUserIDKey.Int(val)
-}
-
-// ProcessRealUserName returns an attribute KeyValue conforming to the
-// "process.real_user.name" semantic conventions. It represents the username of
-// the real user of the process.
-func ProcessRealUserName(val string) attribute.KeyValue {
- return ProcessRealUserNameKey.String(val)
-}
-
-// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
-// "process.runtime.description" semantic conventions. It represents an
-// additional description about the runtime of the process, for example a
-// specific vendor customization of the runtime environment.
-func ProcessRuntimeDescription(val string) attribute.KeyValue {
- return ProcessRuntimeDescriptionKey.String(val)
-}
-
-// ProcessRuntimeName returns an attribute KeyValue conforming to the
-// "process.runtime.name" semantic conventions. It represents the name of the
-// runtime of this process. For compiled native binaries, this SHOULD be the
-// name of the compiler.
-func ProcessRuntimeName(val string) attribute.KeyValue {
- return ProcessRuntimeNameKey.String(val)
-}
-
-// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
-// "process.runtime.version" semantic conventions. It represents the version of
-// the runtime of this process, as returned by the runtime without
-// modification.
-func ProcessRuntimeVersion(val string) attribute.KeyValue {
- return ProcessRuntimeVersionKey.String(val)
-}
-
-// ProcessSavedUserID returns an attribute KeyValue conforming to the
-// "process.saved_user.id" semantic conventions. It represents the saved user
-// ID (SUID) of the process.
-func ProcessSavedUserID(val int) attribute.KeyValue {
- return ProcessSavedUserIDKey.Int(val)
-}
-
-// ProcessSavedUserName returns an attribute KeyValue conforming to the
-// "process.saved_user.name" semantic conventions. It represents the username
-// of the saved user.
-func ProcessSavedUserName(val string) attribute.KeyValue {
- return ProcessSavedUserNameKey.String(val)
-}
-
-// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the
-// "process.session_leader.pid" semantic conventions. It represents the PID of
-// the process's session leader. This is also the session ID (SID) of the
-// process.
-func ProcessSessionLeaderPID(val int) attribute.KeyValue {
- return ProcessSessionLeaderPIDKey.Int(val)
-}
-
-// ProcessUserID returns an attribute KeyValue conforming to the
-// "process.user.id" semantic conventions. It represents the effective user ID
-// (EUID) of the process.
-func ProcessUserID(val int) attribute.KeyValue {
- return ProcessUserIDKey.Int(val)
-}
-
-// ProcessUserName returns an attribute KeyValue conforming to the
-// "process.user.name" semantic conventions. It represents the username of the
-// effective user of the process.
-func ProcessUserName(val string) attribute.KeyValue {
- return ProcessUserNameKey.String(val)
-}
-
-// ProcessVpid returns an attribute KeyValue conforming to the
-// "process.vpid" semantic conventions. It represents the virtual process
-// identifier.
-func ProcessVpid(val int) attribute.KeyValue {
- return ProcessVpidKey.Int(val)
-}
-
-// Attributes for process CPU
-const (
- // ProcessCPUStateKey is the attribute Key conforming to the
- // "process.cpu.state" semantic conventions. It represents the CPU state of
- // the process.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- ProcessCPUStateKey = attribute.Key("process.cpu.state")
-)
-
-var (
- // system
- ProcessCPUStateSystem = ProcessCPUStateKey.String("system")
- // user
- ProcessCPUStateUser = ProcessCPUStateKey.String("user")
- // wait
- ProcessCPUStateWait = ProcessCPUStateKey.String("wait")
-)
-
-// Attributes for remote procedure calls.
-const (
- // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the
- // "rpc.connect_rpc.error_code" semantic conventions. It represents the
- // [error codes](https://connect.build/docs/protocol/#error-codes) of the
- // Connect request. Error codes are always string values.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code")
-
- // RPCGRPCStatusCodeKey is the attribute Key conforming to the
- // "rpc.grpc.status_code" semantic conventions. It represents the [numeric
- // status
- // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of
- // the gRPC request.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
-
- // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the
- // "rpc.jsonrpc.error_code" semantic conventions. It represents the
- // `error.code` property of response if it is an error response.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: -32700, 100
- RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
-
- // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the
- // "rpc.jsonrpc.error_message" semantic conventions. It represents the
- // `error.message` property of response if it is an error response.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Parse error', 'User already exists'
- RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
-
- // RPCJsonrpcRequestIDKey is the attribute Key conforming to the
- // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
- // property of request or response. Since protocol allows id to be int,
- // string, `null` or missing (for notifications), value is expected to be
- // cast to string for simplicity. Use empty string in case of `null` value.
- // Omit entirely if this is a notification.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '10', 'request-7', ''
- RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
-
- // RPCJsonrpcVersionKey is the attribute Key conforming to the
- // "rpc.jsonrpc.version" semantic conventions. It represents the protocol
- // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
- // doesn't specify this, the value can be omitted.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2.0', '1.0'
- RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
-
- // RPCMessageCompressedSizeKey is the attribute Key conforming to the
- // "rpc.message.compressed_size" semantic conventions. It represents the
- // compressed size of the message in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size")
-
- // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id"
- // semantic conventions. It represents the mUST be calculated as two
- // different counters starting from `1` one for sent messages and one for
- // received message.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: This way we guarantee that the values will be consistent between
- // different implementations.
- RPCMessageIDKey = attribute.Key("rpc.message.id")
-
- // RPCMessageTypeKey is the attribute Key conforming to the
- // "rpc.message.type" semantic conventions. It represents the whether this
- // is a received or sent message.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- RPCMessageTypeKey = attribute.Key("rpc.message.type")
-
- // RPCMessageUncompressedSizeKey is the attribute Key conforming to the
- // "rpc.message.uncompressed_size" semantic conventions. It represents the
- // uncompressed size of the message in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size")
-
- // RPCMethodKey is the attribute Key conforming to the "rpc.method"
- // semantic conventions. It represents the name of the (logical) method
- // being called, must be equal to the $method part in the span name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'exampleMethod'
- // Note: This is the logical name of the method from the RPC interface
- // perspective, which can be different from the name of any implementing
- // method/function. The `code.function` attribute may be used to store the
- // latter (e.g., method actually executing the call on the server side, RPC
- // client stub method on the client side).
- RPCMethodKey = attribute.Key("rpc.method")
-
- // RPCServiceKey is the attribute Key conforming to the "rpc.service"
- // semantic conventions. It represents the full (logical) name of the
- // service being called, including its package name, if applicable.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myservice.EchoService'
- // Note: This is the logical name of the service from the RPC interface
- // perspective, which can be different from the name of any implementing
- // class. The `code.namespace` attribute may be used to store the latter
- // (despite the attribute name, it may include a class name; e.g., class
- // with method actually executing the call on the server side, RPC client
- // stub class on the client side).
- RPCServiceKey = attribute.Key("rpc.service")
-
- // RPCSystemKey is the attribute Key conforming to the "rpc.system"
- // semantic conventions. It represents a string identifying the remoting
- // system. See below for a list of well-known identifiers.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- RPCSystemKey = attribute.Key("rpc.system")
-)
-
-var (
- // cancelled
- RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled")
- // unknown
- RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown")
- // invalid_argument
- RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument")
- // deadline_exceeded
- RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded")
- // not_found
- RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found")
- // already_exists
- RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists")
- // permission_denied
- RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied")
- // resource_exhausted
- RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted")
- // failed_precondition
- RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition")
- // aborted
- RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted")
- // out_of_range
- RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range")
- // unimplemented
- RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented")
- // internal
- RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal")
- // unavailable
- RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable")
- // data_loss
- RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss")
- // unauthenticated
- RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated")
-)
-
-var (
- // OK
- RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
- // CANCELLED
- RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
- // UNKNOWN
- RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
- // INVALID_ARGUMENT
- RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
- // DEADLINE_EXCEEDED
- RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
- // NOT_FOUND
- RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
- // ALREADY_EXISTS
- RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
- // PERMISSION_DENIED
- RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
- // RESOURCE_EXHAUSTED
- RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
- // FAILED_PRECONDITION
- RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
- // ABORTED
- RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
- // OUT_OF_RANGE
- RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
- // UNIMPLEMENTED
- RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
- // INTERNAL
- RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
- // UNAVAILABLE
- RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
- // DATA_LOSS
- RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
- // UNAUTHENTICATED
- RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
-)
-
-var (
- // sent
- RPCMessageTypeSent = RPCMessageTypeKey.String("SENT")
- // received
- RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED")
-)
-
-var (
- // gRPC
- RPCSystemGRPC = RPCSystemKey.String("grpc")
- // Java RMI
- RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
- // .NET WCF
- RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
- // Apache Dubbo
- RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
- // Connect RPC
- RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc")
-)
-
-// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.error_code" semantic conventions. It represents the
-// `error.code` property of response if it is an error response.
-func RPCJsonrpcErrorCode(val int) attribute.KeyValue {
- return RPCJsonrpcErrorCodeKey.Int(val)
-}
-
-// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.error_message" semantic conventions. It represents the
-// `error.message` property of response if it is an error response.
-func RPCJsonrpcErrorMessage(val string) attribute.KeyValue {
- return RPCJsonrpcErrorMessageKey.String(val)
-}
-
-// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
-// property of request or response. Since protocol allows id to be int, string,
-// `null` or missing (for notifications), value is expected to be cast to
-// string for simplicity. Use empty string in case of `null` value. Omit
-// entirely if this is a notification.
-func RPCJsonrpcRequestID(val string) attribute.KeyValue {
- return RPCJsonrpcRequestIDKey.String(val)
-}
-
-// RPCJsonrpcVersion returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
-// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
-// doesn't specify this, the value can be omitted.
-func RPCJsonrpcVersion(val string) attribute.KeyValue {
- return RPCJsonrpcVersionKey.String(val)
-}
-
-// RPCMessageCompressedSize returns an attribute KeyValue conforming to the
-// "rpc.message.compressed_size" semantic conventions. It represents the
-// compressed size of the message in bytes.
-func RPCMessageCompressedSize(val int) attribute.KeyValue {
- return RPCMessageCompressedSizeKey.Int(val)
-}
-
-// RPCMessageID returns an attribute KeyValue conforming to the
-// "rpc.message.id" semantic conventions. It represents the mUST be calculated
-// as two different counters starting from `1` one for sent messages and one
-// for received message.
-func RPCMessageID(val int) attribute.KeyValue {
- return RPCMessageIDKey.Int(val)
-}
-
-// RPCMessageUncompressedSize returns an attribute KeyValue conforming to
-// the "rpc.message.uncompressed_size" semantic conventions. It represents the
-// uncompressed size of the message in bytes.
-func RPCMessageUncompressedSize(val int) attribute.KeyValue {
- return RPCMessageUncompressedSizeKey.Int(val)
-}
-
-// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
-// semantic conventions. It represents the name of the (logical) method being
-// called, must be equal to the $method part in the span name.
-func RPCMethod(val string) attribute.KeyValue {
- return RPCMethodKey.String(val)
-}
-
-// RPCService returns an attribute KeyValue conforming to the "rpc.service"
-// semantic conventions. It represents the full (logical) name of the service
-// being called, including its package name, if applicable.
-func RPCService(val string) attribute.KeyValue {
- return RPCServiceKey.String(val)
-}
-
-// These attributes may be used to describe the server in a connection-based
-// network interaction where there is one side that initiates the connection
-// (the client is the side that initiates the connection). This covers all TCP
-// network interactions since TCP is connection-based and one side initiates
-// the connection (an exception is made for peer-to-peer communication over TCP
-// where the "user-facing" surface of the protocol / API doesn't expose a clear
-// notion of client and server). This also covers UDP network interactions
-// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS.
-const (
- // ServerAddressKey is the attribute Key conforming to the "server.address"
- // semantic conventions. It represents the server domain name if available
- // without reverse DNS lookup; otherwise, IP address or Unix domain socket
- // name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock'
- // Note: When observed from the client side, and when communicating through
- // an intermediary, `server.address` SHOULD represent the server address
- // behind any intermediaries, for example proxies, if it's available.
- ServerAddressKey = attribute.Key("server.address")
-
- // ServerPortKey is the attribute Key conforming to the "server.port"
- // semantic conventions. It represents the server port number.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 80, 8080, 443
- // Note: When observed from the client side, and when communicating through
- // an intermediary, `server.port` SHOULD represent the server port behind
- // any intermediaries, for example proxies, if it's available.
- ServerPortKey = attribute.Key("server.port")
-)
-
-// ServerAddress returns an attribute KeyValue conforming to the
-// "server.address" semantic conventions. It represents the server domain name
-// if available without reverse DNS lookup; otherwise, IP address or Unix
-// domain socket name.
-func ServerAddress(val string) attribute.KeyValue {
- return ServerAddressKey.String(val)
-}
-
-// ServerPort returns an attribute KeyValue conforming to the "server.port"
-// semantic conventions. It represents the server port number.
-func ServerPort(val int) attribute.KeyValue {
- return ServerPortKey.Int(val)
-}
-
-// A service instance.
-const (
- // ServiceInstanceIDKey is the attribute Key conforming to the
- // "service.instance.id" semantic conventions. It represents the string ID
- // of the service instance.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '627cc493-f310-47de-96bd-71410b7dec09'
- // Note: MUST be unique for each instance of the same
- // `service.namespace,service.name` pair (in other words
- // `service.namespace,service.name,service.instance.id` triplet MUST be
- // globally unique). The ID helps to
- // distinguish instances of the same service that exist at the same time
- // (e.g. instances of a horizontally scaled
- // service).
- //
- // Implementations, such as SDKs, are recommended to generate a random
- // Version 1 or Version 4 [RFC
- // 4122](https://www.ietf.org/rfc/rfc4122.txt) UUID, but are free to use an
- // inherent unique ID as the source of
- // this value if stability is desirable. In that case, the ID SHOULD be
- // used as source of a UUID Version 5 and
- // SHOULD use the following UUID as the namespace:
- // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`.
- //
- // UUIDs are typically recommended, as only an opaque value for the
- // purposes of identifying a service instance is
- // needed. Similar to what can be seen in the man page for the
- // [`/etc/machine-id`](https://www.freedesktop.org/software/systemd/man/machine-id.html)
- // file, the underlying
- // data, such as pod name and namespace should be treated as confidential,
- // being the user's choice to expose it
- // or not via another resource attribute.
- //
- // For applications running behind an application server (like unicorn), we
- // do not recommend using one identifier
- // for all processes participating in the application. Instead, it's
- // recommended each division (e.g. a worker
- // thread in unicorn) to have its own instance.id.
- //
- // It's not recommended for a Collector to set `service.instance.id` if it
- // can't unambiguously determine the
- // service instance that is generating that telemetry. For instance,
- // creating an UUID based on `pod.name` will
- // likely be wrong, as the Collector might not know from which container
- // within that pod the telemetry originated.
- // However, Collectors can set the `service.instance.id` if they can
- // unambiguously determine the service instance
- // for that telemetry. This is typically the case for scraping receivers,
- // as they know the target address and
- // port.
- ServiceInstanceIDKey = attribute.Key("service.instance.id")
-
- // ServiceNameKey is the attribute Key conforming to the "service.name"
- // semantic conventions. It represents the logical name of the service.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'shoppingcart'
- // Note: MUST be the same for all instances of horizontally scaled
- // services. If the value was not specified, SDKs MUST fallback to
- // `unknown_service:` concatenated with
- // [`process.executable.name`](process.md), e.g. `unknown_service:bash`. If
- // `process.executable.name` is not available, the value MUST be set to
- // `unknown_service`.
- ServiceNameKey = attribute.Key("service.name")
-
- // ServiceNamespaceKey is the attribute Key conforming to the
- // "service.namespace" semantic conventions. It represents a namespace for
- // `service.name`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Shop'
- // Note: A string value having a meaning that helps to distinguish a group
- // of services, for example the team name that owns a group of services.
- // `service.name` is expected to be unique within the same namespace. If
- // `service.namespace` is not specified in the Resource then `service.name`
- // is expected to be unique for all services that have no explicit
- // namespace defined (so the empty/unspecified namespace is simply one more
- // valid namespace). Zero-length namespace string is assumed equal to
- // unspecified namespace.
- ServiceNamespaceKey = attribute.Key("service.namespace")
-
- // ServiceVersionKey is the attribute Key conforming to the
- // "service.version" semantic conventions. It represents the version string
- // of the service API or implementation. The format is not defined by these
- // conventions.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '2.0.0', 'a01dbef8a'
- ServiceVersionKey = attribute.Key("service.version")
-)
-
-// ServiceInstanceID returns an attribute KeyValue conforming to the
-// "service.instance.id" semantic conventions. It represents the string ID of
-// the service instance.
-func ServiceInstanceID(val string) attribute.KeyValue {
- return ServiceInstanceIDKey.String(val)
-}
-
-// ServiceName returns an attribute KeyValue conforming to the
-// "service.name" semantic conventions. It represents the logical name of the
-// service.
-func ServiceName(val string) attribute.KeyValue {
- return ServiceNameKey.String(val)
-}
-
-// ServiceNamespace returns an attribute KeyValue conforming to the
-// "service.namespace" semantic conventions. It represents a namespace for
-// `service.name`.
-func ServiceNamespace(val string) attribute.KeyValue {
- return ServiceNamespaceKey.String(val)
-}
-
-// ServiceVersion returns an attribute KeyValue conforming to the
-// "service.version" semantic conventions. It represents the version string of
-// the service API or implementation. The format is not defined by these
-// conventions.
-func ServiceVersion(val string) attribute.KeyValue {
- return ServiceVersionKey.String(val)
-}
-
-// Session is defined as the period of time encompassing all activities
-// performed by the application and the actions executed by the end user.
-// Consequently, a Session is represented as a collection of Logs, Events, and
-// Spans emitted by the Client Application throughout the Session's duration.
-// Each Session is assigned a unique identifier, which is included as an
-// attribute in the Logs, Events, and Spans generated during the Session's
-// lifecycle.
-// When a session reaches end of life, typically due to user inactivity or
-// session timeout, a new session identifier will be assigned. The previous
-// session identifier may be provided by the instrumentation so that telemetry
-// backends can link the two sessions.
-const (
- // SessionIDKey is the attribute Key conforming to the "session.id"
- // semantic conventions. It represents a unique id to identify a session.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '00112233-4455-6677-8899-aabbccddeeff'
- SessionIDKey = attribute.Key("session.id")
-
- // SessionPreviousIDKey is the attribute Key conforming to the
- // "session.previous_id" semantic conventions. It represents the previous
- // `session.id` for this user, when known.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '00112233-4455-6677-8899-aabbccddeeff'
- SessionPreviousIDKey = attribute.Key("session.previous_id")
-)
-
-// SessionID returns an attribute KeyValue conforming to the "session.id"
-// semantic conventions. It represents a unique id to identify a session.
-func SessionID(val string) attribute.KeyValue {
- return SessionIDKey.String(val)
-}
-
-// SessionPreviousID returns an attribute KeyValue conforming to the
-// "session.previous_id" semantic conventions. It represents the previous
-// `session.id` for this user, when known.
-func SessionPreviousID(val string) attribute.KeyValue {
- return SessionPreviousIDKey.String(val)
-}
-
-// SignalR attributes
-const (
- // SignalrConnectionStatusKey is the attribute Key conforming to the
- // "signalr.connection.status" semantic conventions. It represents the
- // signalR HTTP connection closure status.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'app_shutdown', 'timeout'
- SignalrConnectionStatusKey = attribute.Key("signalr.connection.status")
-
- // SignalrTransportKey is the attribute Key conforming to the
- // "signalr.transport" semantic conventions. It represents the [SignalR
- // transport
- // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md)
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'web_sockets', 'long_polling'
- SignalrTransportKey = attribute.Key("signalr.transport")
-)
-
-var (
- // The connection was closed normally
- SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure")
- // The connection was closed due to a timeout
- SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout")
- // The connection was closed because the app is shutting down
- SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown")
-)
-
-var (
- // ServerSentEvents protocol
- SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events")
- // LongPolling protocol
- SignalrTransportLongPolling = SignalrTransportKey.String("long_polling")
- // WebSockets protocol
- SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets")
-)
-
-// These attributes may be used to describe the sender of a network
-// exchange/packet. These should be used when there is no client/server
-// relationship between the two sides, or when that relationship is unknown.
-// This covers low-level network interactions (e.g. packet tracing) where you
-// don't know if there was a connection or which side initiated it. This also
-// covers unidirectional UDP flows and peer-to-peer communication where the
-// "user-facing" surface of the protocol / API doesn't expose a clear notion of
-// client and server.
-const (
- // SourceAddressKey is the attribute Key conforming to the "source.address"
- // semantic conventions. It represents the source address - domain name if
- // available without reverse DNS lookup; otherwise, IP address or Unix
- // domain socket name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock'
- // Note: When observed from the destination side, and when communicating
- // through an intermediary, `source.address` SHOULD represent the source
- // address behind any intermediaries, for example proxies, if it's
- // available.
- SourceAddressKey = attribute.Key("source.address")
-
- // SourcePortKey is the attribute Key conforming to the "source.port"
- // semantic conventions. It represents the source port number
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 3389, 2888
- SourcePortKey = attribute.Key("source.port")
-)
-
-// SourceAddress returns an attribute KeyValue conforming to the
-// "source.address" semantic conventions. It represents the source address -
-// domain name if available without reverse DNS lookup; otherwise, IP address
-// or Unix domain socket name.
-func SourceAddress(val string) attribute.KeyValue {
- return SourceAddressKey.String(val)
-}
-
-// SourcePort returns an attribute KeyValue conforming to the "source.port"
-// semantic conventions. It represents the source port number
-func SourcePort(val int) attribute.KeyValue {
- return SourcePortKey.Int(val)
-}
-
-// Describes System attributes
-const (
- // SystemDeviceKey is the attribute Key conforming to the "system.device"
- // semantic conventions. It represents the device identifier
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '(identifier)'
- SystemDeviceKey = attribute.Key("system.device")
-)
-
-// SystemDevice returns an attribute KeyValue conforming to the
-// "system.device" semantic conventions. It represents the device identifier
-func SystemDevice(val string) attribute.KeyValue {
- return SystemDeviceKey.String(val)
-}
-
-// Describes System CPU attributes
-const (
- // SystemCPULogicalNumberKey is the attribute Key conforming to the
- // "system.cpu.logical_number" semantic conventions. It represents the
- // logical CPU number [0..n-1]
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1
- SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number")
-
- // SystemCPUStateKey is the attribute Key conforming to the
- // "system.cpu.state" semantic conventions. It represents the state of the
- // CPU
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'idle', 'interrupt'
- SystemCPUStateKey = attribute.Key("system.cpu.state")
-)
-
-var (
- // user
- SystemCPUStateUser = SystemCPUStateKey.String("user")
- // system
- SystemCPUStateSystem = SystemCPUStateKey.String("system")
- // nice
- SystemCPUStateNice = SystemCPUStateKey.String("nice")
- // idle
- SystemCPUStateIdle = SystemCPUStateKey.String("idle")
- // iowait
- SystemCPUStateIowait = SystemCPUStateKey.String("iowait")
- // interrupt
- SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt")
- // steal
- SystemCPUStateSteal = SystemCPUStateKey.String("steal")
-)
-
-// SystemCPULogicalNumber returns an attribute KeyValue conforming to the
-// "system.cpu.logical_number" semantic conventions. It represents the logical
-// CPU number [0..n-1]
-func SystemCPULogicalNumber(val int) attribute.KeyValue {
- return SystemCPULogicalNumberKey.Int(val)
-}
-
-// Describes System Memory attributes
-const (
- // SystemMemoryStateKey is the attribute Key conforming to the
- // "system.memory.state" semantic conventions. It represents the memory
- // state
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'free', 'cached'
- SystemMemoryStateKey = attribute.Key("system.memory.state")
-)
-
-var (
- // used
- SystemMemoryStateUsed = SystemMemoryStateKey.String("used")
- // free
- SystemMemoryStateFree = SystemMemoryStateKey.String("free")
- // shared
- SystemMemoryStateShared = SystemMemoryStateKey.String("shared")
- // buffers
- SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers")
- // cached
- SystemMemoryStateCached = SystemMemoryStateKey.String("cached")
-)
-
-// Describes System Memory Paging attributes
-const (
- // SystemPagingDirectionKey is the attribute Key conforming to the
- // "system.paging.direction" semantic conventions. It represents the paging
- // access direction
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'in'
- SystemPagingDirectionKey = attribute.Key("system.paging.direction")
-
- // SystemPagingStateKey is the attribute Key conforming to the
- // "system.paging.state" semantic conventions. It represents the memory
- // paging state
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'free'
- SystemPagingStateKey = attribute.Key("system.paging.state")
-
- // SystemPagingTypeKey is the attribute Key conforming to the
- // "system.paging.type" semantic conventions. It represents the memory
- // paging type
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'minor'
- SystemPagingTypeKey = attribute.Key("system.paging.type")
-)
-
-var (
- // in
- SystemPagingDirectionIn = SystemPagingDirectionKey.String("in")
- // out
- SystemPagingDirectionOut = SystemPagingDirectionKey.String("out")
-)
-
-var (
- // used
- SystemPagingStateUsed = SystemPagingStateKey.String("used")
- // free
- SystemPagingStateFree = SystemPagingStateKey.String("free")
-)
-
-var (
- // major
- SystemPagingTypeMajor = SystemPagingTypeKey.String("major")
- // minor
- SystemPagingTypeMinor = SystemPagingTypeKey.String("minor")
-)
-
-// Describes Filesystem attributes
-const (
- // SystemFilesystemModeKey is the attribute Key conforming to the
- // "system.filesystem.mode" semantic conventions. It represents the
- // filesystem mode
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'rw, ro'
- SystemFilesystemModeKey = attribute.Key("system.filesystem.mode")
-
- // SystemFilesystemMountpointKey is the attribute Key conforming to the
- // "system.filesystem.mountpoint" semantic conventions. It represents the
- // filesystem mount path
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/mnt/data'
- SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint")
-
- // SystemFilesystemStateKey is the attribute Key conforming to the
- // "system.filesystem.state" semantic conventions. It represents the
- // filesystem state
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'used'
- SystemFilesystemStateKey = attribute.Key("system.filesystem.state")
-
- // SystemFilesystemTypeKey is the attribute Key conforming to the
- // "system.filesystem.type" semantic conventions. It represents the
- // filesystem type
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'ext4'
- SystemFilesystemTypeKey = attribute.Key("system.filesystem.type")
-)
-
-var (
- // used
- SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used")
- // free
- SystemFilesystemStateFree = SystemFilesystemStateKey.String("free")
- // reserved
- SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved")
-)
-
-var (
- // fat32
- SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32")
- // exfat
- SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat")
- // ntfs
- SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs")
- // refs
- SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs")
- // hfsplus
- SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus")
- // ext4
- SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4")
-)
-
-// SystemFilesystemMode returns an attribute KeyValue conforming to the
-// "system.filesystem.mode" semantic conventions. It represents the filesystem
-// mode
-func SystemFilesystemMode(val string) attribute.KeyValue {
- return SystemFilesystemModeKey.String(val)
-}
-
-// SystemFilesystemMountpoint returns an attribute KeyValue conforming to
-// the "system.filesystem.mountpoint" semantic conventions. It represents the
-// filesystem mount path
-func SystemFilesystemMountpoint(val string) attribute.KeyValue {
- return SystemFilesystemMountpointKey.String(val)
-}
-
-// Describes Network attributes
-const (
- // SystemNetworkStateKey is the attribute Key conforming to the
- // "system.network.state" semantic conventions. It represents a stateless
- // protocol MUST NOT set this attribute
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'close_wait'
- SystemNetworkStateKey = attribute.Key("system.network.state")
-)
-
-var (
- // close
- SystemNetworkStateClose = SystemNetworkStateKey.String("close")
- // close_wait
- SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait")
- // closing
- SystemNetworkStateClosing = SystemNetworkStateKey.String("closing")
- // delete
- SystemNetworkStateDelete = SystemNetworkStateKey.String("delete")
- // established
- SystemNetworkStateEstablished = SystemNetworkStateKey.String("established")
- // fin_wait_1
- SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1")
- // fin_wait_2
- SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2")
- // last_ack
- SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack")
- // listen
- SystemNetworkStateListen = SystemNetworkStateKey.String("listen")
- // syn_recv
- SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv")
- // syn_sent
- SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent")
- // time_wait
- SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait")
-)
-
-// Describes System Process attributes
-const (
- // SystemProcessStatusKey is the attribute Key conforming to the
- // "system.process.status" semantic conventions. It represents the process
- // state, e.g., [Linux Process State
- // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES)
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'running'
- SystemProcessStatusKey = attribute.Key("system.process.status")
-)
-
-var (
- // running
- SystemProcessStatusRunning = SystemProcessStatusKey.String("running")
- // sleeping
- SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping")
- // stopped
- SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped")
- // defunct
- SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct")
-)
-
-// Attributes for telemetry SDK.
-const (
- // TelemetrySDKLanguageKey is the attribute Key conforming to the
- // "telemetry.sdk.language" semantic conventions. It represents the
- // language of the telemetry SDK.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
-
- // TelemetrySDKNameKey is the attribute Key conforming to the
- // "telemetry.sdk.name" semantic conventions. It represents the name of the
- // telemetry SDK as defined above.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'opentelemetry'
- // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute
- // to `opentelemetry`.
- // If another SDK, like a fork or a vendor-provided implementation, is
- // used, this SDK MUST set the
- // `telemetry.sdk.name` attribute to the fully-qualified class or module
- // name of this SDK's main entry point
- // or another suitable identifier depending on the language.
- // The identifier `opentelemetry` is reserved and MUST NOT be used in this
- // case.
- // All custom identifiers SHOULD be stable across different versions of an
- // implementation.
- TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
-
- // TelemetrySDKVersionKey is the attribute Key conforming to the
- // "telemetry.sdk.version" semantic conventions. It represents the version
- // string of the telemetry SDK.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: '1.2.3'
- TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
-
- // TelemetryDistroNameKey is the attribute Key conforming to the
- // "telemetry.distro.name" semantic conventions. It represents the name of
- // the auto instrumentation agent or distribution, if used.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'parts-unlimited-java'
- // Note: Official auto instrumentation agents and distributions SHOULD set
- // the `telemetry.distro.name` attribute to
- // a string starting with `opentelemetry-`, e.g.
- // `opentelemetry-java-instrumentation`.
- TelemetryDistroNameKey = attribute.Key("telemetry.distro.name")
-
- // TelemetryDistroVersionKey is the attribute Key conforming to the
- // "telemetry.distro.version" semantic conventions. It represents the
- // version string of the auto instrumentation agent or distribution, if
- // used.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1.2.3'
- TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version")
-)
-
-var (
- // cpp
- TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
- // dotnet
- TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
- // erlang
- TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
- // go
- TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
- // java
- TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
- // nodejs
- TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
- // php
- TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
- // python
- TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
- // ruby
- TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
- // rust
- TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust")
- // swift
- TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
- // webjs
- TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
-)
-
-// TelemetrySDKName returns an attribute KeyValue conforming to the
-// "telemetry.sdk.name" semantic conventions. It represents the name of the
-// telemetry SDK as defined above.
-func TelemetrySDKName(val string) attribute.KeyValue {
- return TelemetrySDKNameKey.String(val)
-}
-
-// TelemetrySDKVersion returns an attribute KeyValue conforming to the
-// "telemetry.sdk.version" semantic conventions. It represents the version
-// string of the telemetry SDK.
-func TelemetrySDKVersion(val string) attribute.KeyValue {
- return TelemetrySDKVersionKey.String(val)
-}
-
-// TelemetryDistroName returns an attribute KeyValue conforming to the
-// "telemetry.distro.name" semantic conventions. It represents the name of the
-// auto instrumentation agent or distribution, if used.
-func TelemetryDistroName(val string) attribute.KeyValue {
- return TelemetryDistroNameKey.String(val)
-}
-
-// TelemetryDistroVersion returns an attribute KeyValue conforming to the
-// "telemetry.distro.version" semantic conventions. It represents the version
-// string of the auto instrumentation agent or distribution, if used.
-func TelemetryDistroVersion(val string) attribute.KeyValue {
- return TelemetryDistroVersionKey.String(val)
-}
-
-// These attributes may be used for any operation to store information about a
-// thread that started a span.
-const (
- // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
- // conventions. It represents the current "managed" thread ID (as opposed
- // to OS thread ID).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 42
- ThreadIDKey = attribute.Key("thread.id")
-
- // ThreadNameKey is the attribute Key conforming to the "thread.name"
- // semantic conventions. It represents the current thread name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'main'
- ThreadNameKey = attribute.Key("thread.name")
-)
-
-// ThreadID returns an attribute KeyValue conforming to the "thread.id"
-// semantic conventions. It represents the current "managed" thread ID (as
-// opposed to OS thread ID).
-func ThreadID(val int) attribute.KeyValue {
- return ThreadIDKey.Int(val)
-}
-
-// ThreadName returns an attribute KeyValue conforming to the "thread.name"
-// semantic conventions. It represents the current thread name.
-func ThreadName(val string) attribute.KeyValue {
- return ThreadNameKey.String(val)
-}
-
-// Semantic convention attributes in the TLS namespace.
-const (
- // TLSCipherKey is the attribute Key conforming to the "tls.cipher"
- // semantic conventions. It represents the string indicating the
- // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5)
- // used during the current connection.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA',
- // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256'
- // Note: The values allowed for `tls.cipher` MUST be one of the
- // `Descriptions` of the [registered TLS Cipher
- // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4).
- TLSCipherKey = attribute.Key("tls.cipher")
-
- // TLSClientCertificateKey is the attribute Key conforming to the
- // "tls.client.certificate" semantic conventions. It represents the
- // pEM-encoded stand-alone certificate offered by the client. This is
- // usually mutually-exclusive of `client.certificate_chain` since this
- // value also exists in that list.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MII...'
- TLSClientCertificateKey = attribute.Key("tls.client.certificate")
-
- // TLSClientCertificateChainKey is the attribute Key conforming to the
- // "tls.client.certificate_chain" semantic conventions. It represents the
- // array of PEM-encoded certificates that make up the certificate chain
- // offered by the client. This is usually mutually-exclusive of
- // `client.certificate` since that value should be the first certificate in
- // the chain.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MII...', 'MI...'
- TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain")
-
- // TLSClientHashMd5Key is the attribute Key conforming to the
- // "tls.client.hash.md5" semantic conventions. It represents the
- // certificate fingerprint using the MD5 digest of DER-encoded version of
- // certificate offered by the client. For consistency with other hash
- // values, this value should be formatted as an uppercase hash.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC'
- TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5")
-
- // TLSClientHashSha1Key is the attribute Key conforming to the
- // "tls.client.hash.sha1" semantic conventions. It represents the
- // certificate fingerprint using the SHA1 digest of DER-encoded version of
- // certificate offered by the client. For consistency with other hash
- // values, this value should be formatted as an uppercase hash.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A'
- TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1")
-
- // TLSClientHashSha256Key is the attribute Key conforming to the
- // "tls.client.hash.sha256" semantic conventions. It represents the
- // certificate fingerprint using the SHA256 digest of DER-encoded version
- // of certificate offered by the client. For consistency with other hash
- // values, this value should be formatted as an uppercase hash.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0'
- TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256")
-
- // TLSClientIssuerKey is the attribute Key conforming to the
- // "tls.client.issuer" semantic conventions. It represents the
- // distinguished name of
- // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6)
- // of the issuer of the x.509 certificate presented by the client.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example,
- // DC=com'
- TLSClientIssuerKey = attribute.Key("tls.client.issuer")
-
- // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3"
- // semantic conventions. It represents a hash that identifies clients based
- // on how they perform an SSL/TLS handshake.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'd4e5b18d6b55c71272893221c96ba240'
- TLSClientJa3Key = attribute.Key("tls.client.ja3")
-
- // TLSClientNotAfterKey is the attribute Key conforming to the
- // "tls.client.not_after" semantic conventions. It represents the date/Time
- // indicating when client certificate is no longer considered valid.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2021-01-01T00:00:00.000Z'
- TLSClientNotAfterKey = attribute.Key("tls.client.not_after")
-
- // TLSClientNotBeforeKey is the attribute Key conforming to the
- // "tls.client.not_before" semantic conventions. It represents the
- // date/Time indicating when client certificate is first considered valid.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1970-01-01T00:00:00.000Z'
- TLSClientNotBeforeKey = attribute.Key("tls.client.not_before")
-
- // TLSClientServerNameKey is the attribute Key conforming to the
- // "tls.client.server_name" semantic conventions. It represents the also
- // called an SNI, this tells the server which hostname to which the client
- // is attempting to connect to.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry.io'
- TLSClientServerNameKey = attribute.Key("tls.client.server_name")
-
- // TLSClientSubjectKey is the attribute Key conforming to the
- // "tls.client.subject" semantic conventions. It represents the
- // distinguished name of subject of the x.509 certificate presented by the
- // client.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com'
- TLSClientSubjectKey = attribute.Key("tls.client.subject")
-
- // TLSClientSupportedCiphersKey is the attribute Key conforming to the
- // "tls.client.supported_ciphers" semantic conventions. It represents the
- // array of ciphers offered by the client during the client hello.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
- // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."'
- TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers")
-
- // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic
- // conventions. It represents the string indicating the curve used for the
- // given cipher, when applicable
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'secp256r1'
- TLSCurveKey = attribute.Key("tls.curve")
-
- // TLSEstablishedKey is the attribute Key conforming to the
- // "tls.established" semantic conventions. It represents the boolean flag
- // indicating if the TLS negotiation was successful and transitioned to an
- // encrypted tunnel.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: True
- TLSEstablishedKey = attribute.Key("tls.established")
-
- // TLSNextProtocolKey is the attribute Key conforming to the
- // "tls.next_protocol" semantic conventions. It represents the string
- // indicating the protocol being tunneled. Per the values in the [IANA
- // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids),
- // this string should be lower case.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'http/1.1'
- TLSNextProtocolKey = attribute.Key("tls.next_protocol")
-
- // TLSProtocolNameKey is the attribute Key conforming to the
- // "tls.protocol.name" semantic conventions. It represents the normalized
- // lowercase protocol name parsed from original string of the negotiated
- // [SSL/TLS protocol
- // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- TLSProtocolNameKey = attribute.Key("tls.protocol.name")
-
- // TLSProtocolVersionKey is the attribute Key conforming to the
- // "tls.protocol.version" semantic conventions. It represents the numeric
- // part of the version parsed from the original string of the negotiated
- // [SSL/TLS protocol
- // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1.2', '3'
- TLSProtocolVersionKey = attribute.Key("tls.protocol.version")
-
- // TLSResumedKey is the attribute Key conforming to the "tls.resumed"
- // semantic conventions. It represents the boolean flag indicating if this
- // TLS connection was resumed from an existing TLS negotiation.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: True
- TLSResumedKey = attribute.Key("tls.resumed")
-
- // TLSServerCertificateKey is the attribute Key conforming to the
- // "tls.server.certificate" semantic conventions. It represents the
- // pEM-encoded stand-alone certificate offered by the server. This is
- // usually mutually-exclusive of `server.certificate_chain` since this
- // value also exists in that list.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MII...'
- TLSServerCertificateKey = attribute.Key("tls.server.certificate")
-
- // TLSServerCertificateChainKey is the attribute Key conforming to the
- // "tls.server.certificate_chain" semantic conventions. It represents the
- // array of PEM-encoded certificates that make up the certificate chain
- // offered by the server. This is usually mutually-exclusive of
- // `server.certificate` since that value should be the first certificate in
- // the chain.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MII...', 'MI...'
- TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain")
-
- // TLSServerHashMd5Key is the attribute Key conforming to the
- // "tls.server.hash.md5" semantic conventions. It represents the
- // certificate fingerprint using the MD5 digest of DER-encoded version of
- // certificate offered by the server. For consistency with other hash
- // values, this value should be formatted as an uppercase hash.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC'
- TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5")
-
- // TLSServerHashSha1Key is the attribute Key conforming to the
- // "tls.server.hash.sha1" semantic conventions. It represents the
- // certificate fingerprint using the SHA1 digest of DER-encoded version of
- // certificate offered by the server. For consistency with other hash
- // values, this value should be formatted as an uppercase hash.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A'
- TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1")
-
- // TLSServerHashSha256Key is the attribute Key conforming to the
- // "tls.server.hash.sha256" semantic conventions. It represents the
- // certificate fingerprint using the SHA256 digest of DER-encoded version
- // of certificate offered by the server. For consistency with other hash
- // values, this value should be formatted as an uppercase hash.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0'
- TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256")
-
- // TLSServerIssuerKey is the attribute Key conforming to the
- // "tls.server.issuer" semantic conventions. It represents the
- // distinguished name of
- // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6)
- // of the issuer of the x.509 certificate presented by the client.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example,
- // DC=com'
- TLSServerIssuerKey = attribute.Key("tls.server.issuer")
-
- // TLSServerJa3sKey is the attribute Key conforming to the
- // "tls.server.ja3s" semantic conventions. It represents a hash that
- // identifies servers based on how they perform an SSL/TLS handshake.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'd4e5b18d6b55c71272893221c96ba240'
- TLSServerJa3sKey = attribute.Key("tls.server.ja3s")
-
- // TLSServerNotAfterKey is the attribute Key conforming to the
- // "tls.server.not_after" semantic conventions. It represents the date/Time
- // indicating when server certificate is no longer considered valid.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2021-01-01T00:00:00.000Z'
- TLSServerNotAfterKey = attribute.Key("tls.server.not_after")
-
- // TLSServerNotBeforeKey is the attribute Key conforming to the
- // "tls.server.not_before" semantic conventions. It represents the
- // date/Time indicating when server certificate is first considered valid.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1970-01-01T00:00:00.000Z'
- TLSServerNotBeforeKey = attribute.Key("tls.server.not_before")
-
- // TLSServerSubjectKey is the attribute Key conforming to the
- // "tls.server.subject" semantic conventions. It represents the
- // distinguished name of subject of the x.509 certificate presented by the
- // server.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com'
- TLSServerSubjectKey = attribute.Key("tls.server.subject")
-)
-
-var (
- // ssl
- TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl")
- // tls
- TLSProtocolNameTLS = TLSProtocolNameKey.String("tls")
-)
-
-// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher"
-// semantic conventions. It represents the string indicating the
-// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used
-// during the current connection.
-func TLSCipher(val string) attribute.KeyValue {
- return TLSCipherKey.String(val)
-}
-
-// TLSClientCertificate returns an attribute KeyValue conforming to the
-// "tls.client.certificate" semantic conventions. It represents the pEM-encoded
-// stand-alone certificate offered by the client. This is usually
-// mutually-exclusive of `client.certificate_chain` since this value also
-// exists in that list.
-func TLSClientCertificate(val string) attribute.KeyValue {
- return TLSClientCertificateKey.String(val)
-}
-
-// TLSClientCertificateChain returns an attribute KeyValue conforming to the
-// "tls.client.certificate_chain" semantic conventions. It represents the array
-// of PEM-encoded certificates that make up the certificate chain offered by
-// the client. This is usually mutually-exclusive of `client.certificate` since
-// that value should be the first certificate in the chain.
-func TLSClientCertificateChain(val ...string) attribute.KeyValue {
- return TLSClientCertificateChainKey.StringSlice(val)
-}
-
-// TLSClientHashMd5 returns an attribute KeyValue conforming to the
-// "tls.client.hash.md5" semantic conventions. It represents the certificate
-// fingerprint using the MD5 digest of DER-encoded version of certificate
-// offered by the client. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSClientHashMd5(val string) attribute.KeyValue {
- return TLSClientHashMd5Key.String(val)
-}
-
-// TLSClientHashSha1 returns an attribute KeyValue conforming to the
-// "tls.client.hash.sha1" semantic conventions. It represents the certificate
-// fingerprint using the SHA1 digest of DER-encoded version of certificate
-// offered by the client. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSClientHashSha1(val string) attribute.KeyValue {
- return TLSClientHashSha1Key.String(val)
-}
-
-// TLSClientHashSha256 returns an attribute KeyValue conforming to the
-// "tls.client.hash.sha256" semantic conventions. It represents the certificate
-// fingerprint using the SHA256 digest of DER-encoded version of certificate
-// offered by the client. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSClientHashSha256(val string) attribute.KeyValue {
- return TLSClientHashSha256Key.String(val)
-}
-
-// TLSClientIssuer returns an attribute KeyValue conforming to the
-// "tls.client.issuer" semantic conventions. It represents the distinguished
-// name of
-// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of
-// the issuer of the x.509 certificate presented by the client.
-func TLSClientIssuer(val string) attribute.KeyValue {
- return TLSClientIssuerKey.String(val)
-}
-
-// TLSClientJa3 returns an attribute KeyValue conforming to the
-// "tls.client.ja3" semantic conventions. It represents a hash that identifies
-// clients based on how they perform an SSL/TLS handshake.
-func TLSClientJa3(val string) attribute.KeyValue {
- return TLSClientJa3Key.String(val)
-}
-
-// TLSClientNotAfter returns an attribute KeyValue conforming to the
-// "tls.client.not_after" semantic conventions. It represents the date/Time
-// indicating when client certificate is no longer considered valid.
-func TLSClientNotAfter(val string) attribute.KeyValue {
- return TLSClientNotAfterKey.String(val)
-}
-
-// TLSClientNotBefore returns an attribute KeyValue conforming to the
-// "tls.client.not_before" semantic conventions. It represents the date/Time
-// indicating when client certificate is first considered valid.
-func TLSClientNotBefore(val string) attribute.KeyValue {
- return TLSClientNotBeforeKey.String(val)
-}
-
-// TLSClientServerName returns an attribute KeyValue conforming to the
-// "tls.client.server_name" semantic conventions. It represents the also called
-// an SNI, this tells the server which hostname to which the client is
-// attempting to connect to.
-func TLSClientServerName(val string) attribute.KeyValue {
- return TLSClientServerNameKey.String(val)
-}
-
-// TLSClientSubject returns an attribute KeyValue conforming to the
-// "tls.client.subject" semantic conventions. It represents the distinguished
-// name of subject of the x.509 certificate presented by the client.
-func TLSClientSubject(val string) attribute.KeyValue {
- return TLSClientSubjectKey.String(val)
-}
-
-// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the
-// "tls.client.supported_ciphers" semantic conventions. It represents the array
-// of ciphers offered by the client during the client hello.
-func TLSClientSupportedCiphers(val ...string) attribute.KeyValue {
- return TLSClientSupportedCiphersKey.StringSlice(val)
-}
-
-// TLSCurve returns an attribute KeyValue conforming to the "tls.curve"
-// semantic conventions. It represents the string indicating the curve used for
-// the given cipher, when applicable
-func TLSCurve(val string) attribute.KeyValue {
- return TLSCurveKey.String(val)
-}
-
-// TLSEstablished returns an attribute KeyValue conforming to the
-// "tls.established" semantic conventions. It represents the boolean flag
-// indicating if the TLS negotiation was successful and transitioned to an
-// encrypted tunnel.
-func TLSEstablished(val bool) attribute.KeyValue {
- return TLSEstablishedKey.Bool(val)
-}
-
-// TLSNextProtocol returns an attribute KeyValue conforming to the
-// "tls.next_protocol" semantic conventions. It represents the string
-// indicating the protocol being tunneled. Per the values in the [IANA
-// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids),
-// this string should be lower case.
-func TLSNextProtocol(val string) attribute.KeyValue {
- return TLSNextProtocolKey.String(val)
-}
-
-// TLSProtocolVersion returns an attribute KeyValue conforming to the
-// "tls.protocol.version" semantic conventions. It represents the numeric part
-// of the version parsed from the original string of the negotiated [SSL/TLS
-// protocol
-// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
-func TLSProtocolVersion(val string) attribute.KeyValue {
- return TLSProtocolVersionKey.String(val)
-}
-
-// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed"
-// semantic conventions. It represents the boolean flag indicating if this TLS
-// connection was resumed from an existing TLS negotiation.
-func TLSResumed(val bool) attribute.KeyValue {
- return TLSResumedKey.Bool(val)
-}
-
-// TLSServerCertificate returns an attribute KeyValue conforming to the
-// "tls.server.certificate" semantic conventions. It represents the pEM-encoded
-// stand-alone certificate offered by the server. This is usually
-// mutually-exclusive of `server.certificate_chain` since this value also
-// exists in that list.
-func TLSServerCertificate(val string) attribute.KeyValue {
- return TLSServerCertificateKey.String(val)
-}
-
-// TLSServerCertificateChain returns an attribute KeyValue conforming to the
-// "tls.server.certificate_chain" semantic conventions. It represents the array
-// of PEM-encoded certificates that make up the certificate chain offered by
-// the server. This is usually mutually-exclusive of `server.certificate` since
-// that value should be the first certificate in the chain.
-func TLSServerCertificateChain(val ...string) attribute.KeyValue {
- return TLSServerCertificateChainKey.StringSlice(val)
-}
-
-// TLSServerHashMd5 returns an attribute KeyValue conforming to the
-// "tls.server.hash.md5" semantic conventions. It represents the certificate
-// fingerprint using the MD5 digest of DER-encoded version of certificate
-// offered by the server. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSServerHashMd5(val string) attribute.KeyValue {
- return TLSServerHashMd5Key.String(val)
-}
-
-// TLSServerHashSha1 returns an attribute KeyValue conforming to the
-// "tls.server.hash.sha1" semantic conventions. It represents the certificate
-// fingerprint using the SHA1 digest of DER-encoded version of certificate
-// offered by the server. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSServerHashSha1(val string) attribute.KeyValue {
- return TLSServerHashSha1Key.String(val)
-}
-
-// TLSServerHashSha256 returns an attribute KeyValue conforming to the
-// "tls.server.hash.sha256" semantic conventions. It represents the certificate
-// fingerprint using the SHA256 digest of DER-encoded version of certificate
-// offered by the server. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSServerHashSha256(val string) attribute.KeyValue {
- return TLSServerHashSha256Key.String(val)
-}
-
-// TLSServerIssuer returns an attribute KeyValue conforming to the
-// "tls.server.issuer" semantic conventions. It represents the distinguished
-// name of
-// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of
-// the issuer of the x.509 certificate presented by the client.
-func TLSServerIssuer(val string) attribute.KeyValue {
- return TLSServerIssuerKey.String(val)
-}
-
-// TLSServerJa3s returns an attribute KeyValue conforming to the
-// "tls.server.ja3s" semantic conventions. It represents a hash that identifies
-// servers based on how they perform an SSL/TLS handshake.
-func TLSServerJa3s(val string) attribute.KeyValue {
- return TLSServerJa3sKey.String(val)
-}
-
-// TLSServerNotAfter returns an attribute KeyValue conforming to the
-// "tls.server.not_after" semantic conventions. It represents the date/Time
-// indicating when server certificate is no longer considered valid.
-func TLSServerNotAfter(val string) attribute.KeyValue {
- return TLSServerNotAfterKey.String(val)
-}
-
-// TLSServerNotBefore returns an attribute KeyValue conforming to the
-// "tls.server.not_before" semantic conventions. It represents the date/Time
-// indicating when server certificate is first considered valid.
-func TLSServerNotBefore(val string) attribute.KeyValue {
- return TLSServerNotBeforeKey.String(val)
-}
-
-// TLSServerSubject returns an attribute KeyValue conforming to the
-// "tls.server.subject" semantic conventions. It represents the distinguished
-// name of subject of the x.509 certificate presented by the server.
-func TLSServerSubject(val string) attribute.KeyValue {
- return TLSServerSubjectKey.String(val)
-}
-
-// Attributes describing URL.
-const (
- // URLDomainKey is the attribute Key conforming to the "url.domain"
- // semantic conventions. It represents the domain extracted from the
- // `url.full`, such as "opentelemetry.io".
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'www.foo.bar', 'opentelemetry.io', '3.12.167.2',
- // '[1080:0:0:0:8:800:200C:417A]'
- // Note: In some cases a URL may refer to an IP and/or port directly,
- // without a domain name. In this case, the IP address would go to the
- // domain field. If the URL contains a [literal IPv6
- // address](https://www.rfc-editor.org/rfc/rfc2732#section-2) enclosed by
- // `[` and `]`, the `[` and `]` characters should also be captured in the
- // domain field.
- URLDomainKey = attribute.Key("url.domain")
-
- // URLExtensionKey is the attribute Key conforming to the "url.extension"
- // semantic conventions. It represents the file extension extracted from
- // the `url.full`, excluding the leading dot.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'png', 'gz'
- // Note: The file extension is only set if it exists, as not every url has
- // a file extension. When the file name has multiple extensions
- // `example.tar.gz`, only the last one should be captured `gz`, not
- // `tar.gz`.
- URLExtensionKey = attribute.Key("url.extension")
-
- // URLFragmentKey is the attribute Key conforming to the "url.fragment"
- // semantic conventions. It represents the [URI
- // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'SemConv'
- URLFragmentKey = attribute.Key("url.fragment")
-
- // URLFullKey is the attribute Key conforming to the "url.full" semantic
- // conventions. It represents the absolute URL describing a network
- // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv',
- // '//localhost'
- // Note: For network calls, URL usually has
- // `scheme://host[:port][path][?query][#fragment]` format, where the
- // fragment is not transmitted over HTTP, but if it is known, it SHOULD be
- // included nevertheless.
- // `url.full` MUST NOT contain credentials passed via URL in form of
- // `https://username:password@www.example.com/`. In such case username and
- // password SHOULD be redacted and attribute's value SHOULD be
- // `https://REDACTED:REDACTED@www.example.com/`.
- // `url.full` SHOULD capture the absolute URL when it is available (or can
- // be reconstructed). Sensitive content provided in `url.full` SHOULD be
- // scrubbed when instrumentations can identify it.
- URLFullKey = attribute.Key("url.full")
-
- // URLOriginalKey is the attribute Key conforming to the "url.original"
- // semantic conventions. It represents the unmodified original URL as seen
- // in the event source.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv',
- // 'search?q=OpenTelemetry'
- // Note: In network monitoring, the observed URL may be a full URL, whereas
- // in access logs, the URL is often just represented as a path. This field
- // is meant to represent the URL as it was observed, complete or not.
- // `url.original` might contain credentials passed via URL in form of
- // `https://username:password@www.example.com/`. In such case password and
- // username SHOULD NOT be redacted and attribute's value SHOULD remain the
- // same.
- URLOriginalKey = attribute.Key("url.original")
-
- // URLPathKey is the attribute Key conforming to the "url.path" semantic
- // conventions. It represents the [URI
- // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '/search'
- // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when
- // instrumentations can identify it.
- URLPathKey = attribute.Key("url.path")
-
- // URLPortKey is the attribute Key conforming to the "url.port" semantic
- // conventions. It represents the port extracted from the `url.full`
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 443
- URLPortKey = attribute.Key("url.port")
-
- // URLQueryKey is the attribute Key conforming to the "url.query" semantic
- // conventions. It represents the [URI
- // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'q=OpenTelemetry'
- // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when
- // instrumentations can identify it.
- URLQueryKey = attribute.Key("url.query")
-
- // URLRegisteredDomainKey is the attribute Key conforming to the
- // "url.registered_domain" semantic conventions. It represents the highest
- // registered url domain, stripped of the subdomain.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'example.com', 'foo.co.uk'
- // Note: This value can be determined precisely with the [public suffix
- // list](http://publicsuffix.org). For example, the registered domain for
- // `foo.example.com` is `example.com`. Trying to approximate this by simply
- // taking the last two labels will not work well for TLDs such as `co.uk`.
- URLRegisteredDomainKey = attribute.Key("url.registered_domain")
-
- // URLSchemeKey is the attribute Key conforming to the "url.scheme"
- // semantic conventions. It represents the [URI
- // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
- // identifying the used protocol.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'https', 'ftp', 'telnet'
- URLSchemeKey = attribute.Key("url.scheme")
-
- // URLSubdomainKey is the attribute Key conforming to the "url.subdomain"
- // semantic conventions. It represents the subdomain portion of a fully
- // qualified domain name includes all of the names except the host name
- // under the registered_domain. In a partially qualified domain, or if the
- // qualification level of the full name cannot be determined, subdomain
- // contains all of the names below the registered domain.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'east', 'sub2.sub1'
- // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If
- // the domain has multiple levels of subdomain, such as
- // `sub2.sub1.example.com`, the subdomain field should contain `sub2.sub1`,
- // with no trailing period.
- URLSubdomainKey = attribute.Key("url.subdomain")
-
- // URLTemplateKey is the attribute Key conforming to the "url.template"
- // semantic conventions. It represents the low-cardinality template of an
- // [absolute path
- // reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/users/{id}', '/users/:id', '/users?id={id}'
- URLTemplateKey = attribute.Key("url.template")
-
- // URLTopLevelDomainKey is the attribute Key conforming to the
- // "url.top_level_domain" semantic conventions. It represents the effective
- // top level domain (eTLD), also known as the domain suffix, is the last
- // part of the domain name. For example, the top level domain for
- // example.com is `com`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'com', 'co.uk'
- // Note: This value can be determined precisely with the [public suffix
- // list](http://publicsuffix.org).
- URLTopLevelDomainKey = attribute.Key("url.top_level_domain")
-)
-
-// URLDomain returns an attribute KeyValue conforming to the "url.domain"
-// semantic conventions. It represents the domain extracted from the
-// `url.full`, such as "opentelemetry.io".
-func URLDomain(val string) attribute.KeyValue {
- return URLDomainKey.String(val)
-}
-
-// URLExtension returns an attribute KeyValue conforming to the
-// "url.extension" semantic conventions. It represents the file extension
-// extracted from the `url.full`, excluding the leading dot.
-func URLExtension(val string) attribute.KeyValue {
- return URLExtensionKey.String(val)
-}
-
-// URLFragment returns an attribute KeyValue conforming to the
-// "url.fragment" semantic conventions. It represents the [URI
-// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
-func URLFragment(val string) attribute.KeyValue {
- return URLFragmentKey.String(val)
-}
-
-// URLFull returns an attribute KeyValue conforming to the "url.full"
-// semantic conventions. It represents the absolute URL describing a network
-// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
-func URLFull(val string) attribute.KeyValue {
- return URLFullKey.String(val)
-}
-
-// URLOriginal returns an attribute KeyValue conforming to the
-// "url.original" semantic conventions. It represents the unmodified original
-// URL as seen in the event source.
-func URLOriginal(val string) attribute.KeyValue {
- return URLOriginalKey.String(val)
-}
-
-// URLPath returns an attribute KeyValue conforming to the "url.path"
-// semantic conventions. It represents the [URI
-// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
-func URLPath(val string) attribute.KeyValue {
- return URLPathKey.String(val)
-}
-
-// URLPort returns an attribute KeyValue conforming to the "url.port"
-// semantic conventions. It represents the port extracted from the `url.full`
-func URLPort(val int) attribute.KeyValue {
- return URLPortKey.Int(val)
-}
-
-// URLQuery returns an attribute KeyValue conforming to the "url.query"
-// semantic conventions. It represents the [URI
-// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
-func URLQuery(val string) attribute.KeyValue {
- return URLQueryKey.String(val)
-}
-
-// URLRegisteredDomain returns an attribute KeyValue conforming to the
-// "url.registered_domain" semantic conventions. It represents the highest
-// registered url domain, stripped of the subdomain.
-func URLRegisteredDomain(val string) attribute.KeyValue {
- return URLRegisteredDomainKey.String(val)
-}
-
-// URLScheme returns an attribute KeyValue conforming to the "url.scheme"
-// semantic conventions. It represents the [URI
-// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
-// identifying the used protocol.
-func URLScheme(val string) attribute.KeyValue {
- return URLSchemeKey.String(val)
-}
-
-// URLSubdomain returns an attribute KeyValue conforming to the
-// "url.subdomain" semantic conventions. It represents the subdomain portion of
-// a fully qualified domain name includes all of the names except the host name
-// under the registered_domain. In a partially qualified domain, or if the
-// qualification level of the full name cannot be determined, subdomain
-// contains all of the names below the registered domain.
-func URLSubdomain(val string) attribute.KeyValue {
- return URLSubdomainKey.String(val)
-}
-
-// URLTemplate returns an attribute KeyValue conforming to the
-// "url.template" semantic conventions. It represents the low-cardinality
-// template of an [absolute path
-// reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2).
-func URLTemplate(val string) attribute.KeyValue {
- return URLTemplateKey.String(val)
-}
-
-// URLTopLevelDomain returns an attribute KeyValue conforming to the
-// "url.top_level_domain" semantic conventions. It represents the effective top
-// level domain (eTLD), also known as the domain suffix, is the last part of
-// the domain name. For example, the top level domain for example.com is `com`.
-func URLTopLevelDomain(val string) attribute.KeyValue {
- return URLTopLevelDomainKey.String(val)
-}
-
-// Describes user-agent attributes.
-const (
- // UserAgentNameKey is the attribute Key conforming to the
- // "user_agent.name" semantic conventions. It represents the name of the
- // user-agent extracted from original. Usually refers to the browser's
- // name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Safari', 'YourApp'
- // Note: [Example](https://www.whatsmyua.info) of extracting browser's name
- // from original string. In the case of using a user-agent for non-browser
- // products, such as microservices with multiple names/versions inside the
- // `user_agent.original`, the most significant name SHOULD be selected. In
- // such a scenario it should align with `user_agent.version`
- UserAgentNameKey = attribute.Key("user_agent.name")
-
- // UserAgentOriginalKey is the attribute Key conforming to the
- // "user_agent.original" semantic conventions. It represents the value of
- // the [HTTP
- // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
- // header sent by the client.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU
- // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko)
- // Version/14.1.2 Mobile/15E148 Safari/604.1', 'YourApp/1.0.0
- // grpc-java-okhttp/1.27.2'
- UserAgentOriginalKey = attribute.Key("user_agent.original")
-
- // UserAgentVersionKey is the attribute Key conforming to the
- // "user_agent.version" semantic conventions. It represents the version of
- // the user-agent extracted from original. Usually refers to the browser's
- // version
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '14.1.2', '1.0.0'
- // Note: [Example](https://www.whatsmyua.info) of extracting browser's
- // version from original string. In the case of using a user-agent for
- // non-browser products, such as microservices with multiple names/versions
- // inside the `user_agent.original`, the most significant version SHOULD be
- // selected. In such a scenario it should align with `user_agent.name`
- UserAgentVersionKey = attribute.Key("user_agent.version")
-)
-
-// UserAgentName returns an attribute KeyValue conforming to the
-// "user_agent.name" semantic conventions. It represents the name of the
-// user-agent extracted from original. Usually refers to the browser's name.
-func UserAgentName(val string) attribute.KeyValue {
- return UserAgentNameKey.String(val)
-}
-
-// UserAgentOriginal returns an attribute KeyValue conforming to the
-// "user_agent.original" semantic conventions. It represents the value of the
-// [HTTP
-// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
-// header sent by the client.
-func UserAgentOriginal(val string) attribute.KeyValue {
- return UserAgentOriginalKey.String(val)
-}
-
-// UserAgentVersion returns an attribute KeyValue conforming to the
-// "user_agent.version" semantic conventions. It represents the version of the
-// user-agent extracted from original. Usually refers to the browser's version
-func UserAgentVersion(val string) attribute.KeyValue {
- return UserAgentVersionKey.String(val)
-}
-
-// The attributes used to describe the packaged software running the
-// application code.
-const (
- // WebEngineDescriptionKey is the attribute Key conforming to the
- // "webengine.description" semantic conventions. It represents the
- // additional description of the web engine (e.g. detailed version and
- // edition information).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
- // 2.2.2.Final'
- WebEngineDescriptionKey = attribute.Key("webengine.description")
-
- // WebEngineNameKey is the attribute Key conforming to the "webengine.name"
- // semantic conventions. It represents the name of the web engine.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'WildFly'
- WebEngineNameKey = attribute.Key("webengine.name")
-
- // WebEngineVersionKey is the attribute Key conforming to the
- // "webengine.version" semantic conventions. It represents the version of
- // the web engine.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '21.0.0'
- WebEngineVersionKey = attribute.Key("webengine.version")
-)
-
-// WebEngineDescription returns an attribute KeyValue conforming to the
-// "webengine.description" semantic conventions. It represents the additional
-// description of the web engine (e.g. detailed version and edition
-// information).
-func WebEngineDescription(val string) attribute.KeyValue {
- return WebEngineDescriptionKey.String(val)
-}
-
-// WebEngineName returns an attribute KeyValue conforming to the
-// "webengine.name" semantic conventions. It represents the name of the web
-// engine.
-func WebEngineName(val string) attribute.KeyValue {
- return WebEngineNameKey.String(val)
-}
-
-// WebEngineVersion returns an attribute KeyValue conforming to the
-// "webengine.version" semantic conventions. It represents the version of the
-// web engine.
-func WebEngineVersion(val string) attribute.KeyValue {
- return WebEngineVersionKey.String(val)
-}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go
deleted file mode 100644
index d031bbea7..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Package semconv implements OpenTelemetry semantic conventions.
-//
-// OpenTelemetry semantic conventions are agreed standardized naming
-// patterns for OpenTelemetry things. This package represents the v1.26.0
-// version of the OpenTelemetry semantic conventions.
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go
deleted file mode 100644
index bfaee0d56..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
-
-const (
- // ExceptionEventName is the name of the Span event representing an exception.
- ExceptionEventName = "exception"
-)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go
deleted file mode 100644
index fcdb9f485..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go
+++ /dev/null
@@ -1,1307 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
-
-const (
-
- // ContainerCPUTime is the metric conforming to the "container.cpu.time"
- // semantic conventions. It represents the total CPU time consumed.
- // Instrument: counter
- // Unit: s
- // Stability: Experimental
- ContainerCPUTimeName = "container.cpu.time"
- ContainerCPUTimeUnit = "s"
- ContainerCPUTimeDescription = "Total CPU time consumed"
-
- // ContainerMemoryUsage is the metric conforming to the
- // "container.memory.usage" semantic conventions. It represents the memory
- // usage of the container.
- // Instrument: counter
- // Unit: By
- // Stability: Experimental
- ContainerMemoryUsageName = "container.memory.usage"
- ContainerMemoryUsageUnit = "By"
- ContainerMemoryUsageDescription = "Memory usage of the container."
-
- // ContainerDiskIo is the metric conforming to the "container.disk.io" semantic
- // conventions. It represents the disk bytes for the container.
- // Instrument: counter
- // Unit: By
- // Stability: Experimental
- ContainerDiskIoName = "container.disk.io"
- ContainerDiskIoUnit = "By"
- ContainerDiskIoDescription = "Disk bytes for the container."
-
- // ContainerNetworkIo is the metric conforming to the "container.network.io"
- // semantic conventions. It represents the network bytes for the container.
- // Instrument: counter
- // Unit: By
- // Stability: Experimental
- ContainerNetworkIoName = "container.network.io"
- ContainerNetworkIoUnit = "By"
- ContainerNetworkIoDescription = "Network bytes for the container."
-
- // DBClientOperationDuration is the metric conforming to the
- // "db.client.operation.duration" semantic conventions. It represents the
- // duration of database client operations.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- DBClientOperationDurationName = "db.client.operation.duration"
- DBClientOperationDurationUnit = "s"
- DBClientOperationDurationDescription = "Duration of database client operations."
-
- // DBClientConnectionCount is the metric conforming to the
- // "db.client.connection.count" semantic conventions. It represents the number
- // of connections that are currently in state described by the `state`
- // attribute.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionCountName = "db.client.connection.count"
- DBClientConnectionCountUnit = "{connection}"
- DBClientConnectionCountDescription = "The number of connections that are currently in state described by the `state` attribute"
-
- // DBClientConnectionIdleMax is the metric conforming to the
- // "db.client.connection.idle.max" semantic conventions. It represents the
- // maximum number of idle open connections allowed.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionIdleMaxName = "db.client.connection.idle.max"
- DBClientConnectionIdleMaxUnit = "{connection}"
- DBClientConnectionIdleMaxDescription = "The maximum number of idle open connections allowed"
-
- // DBClientConnectionIdleMin is the metric conforming to the
- // "db.client.connection.idle.min" semantic conventions. It represents the
- // minimum number of idle open connections allowed.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionIdleMinName = "db.client.connection.idle.min"
- DBClientConnectionIdleMinUnit = "{connection}"
- DBClientConnectionIdleMinDescription = "The minimum number of idle open connections allowed"
-
- // DBClientConnectionMax is the metric conforming to the
- // "db.client.connection.max" semantic conventions. It represents the maximum
- // number of open connections allowed.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionMaxName = "db.client.connection.max"
- DBClientConnectionMaxUnit = "{connection}"
- DBClientConnectionMaxDescription = "The maximum number of open connections allowed"
-
- // DBClientConnectionPendingRequests is the metric conforming to the
- // "db.client.connection.pending_requests" semantic conventions. It represents
- // the number of pending requests for an open connection, cumulative for the
- // entire pool.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Experimental
- DBClientConnectionPendingRequestsName = "db.client.connection.pending_requests"
- DBClientConnectionPendingRequestsUnit = "{request}"
- DBClientConnectionPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool"
-
- // DBClientConnectionTimeouts is the metric conforming to the
- // "db.client.connection.timeouts" semantic conventions. It represents the
- // number of connection timeouts that have occurred trying to obtain a
- // connection from the pool.
- // Instrument: counter
- // Unit: {timeout}
- // Stability: Experimental
- DBClientConnectionTimeoutsName = "db.client.connection.timeouts"
- DBClientConnectionTimeoutsUnit = "{timeout}"
- DBClientConnectionTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool"
-
- // DBClientConnectionCreateTime is the metric conforming to the
- // "db.client.connection.create_time" semantic conventions. It represents the
- // time it took to create a new connection.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- DBClientConnectionCreateTimeName = "db.client.connection.create_time"
- DBClientConnectionCreateTimeUnit = "s"
- DBClientConnectionCreateTimeDescription = "The time it took to create a new connection"
-
- // DBClientConnectionWaitTime is the metric conforming to the
- // "db.client.connection.wait_time" semantic conventions. It represents the
- // time it took to obtain an open connection from the pool.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- DBClientConnectionWaitTimeName = "db.client.connection.wait_time"
- DBClientConnectionWaitTimeUnit = "s"
- DBClientConnectionWaitTimeDescription = "The time it took to obtain an open connection from the pool"
-
- // DBClientConnectionUseTime is the metric conforming to the
- // "db.client.connection.use_time" semantic conventions. It represents the time
- // between borrowing a connection and returning it to the pool.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- DBClientConnectionUseTimeName = "db.client.connection.use_time"
- DBClientConnectionUseTimeUnit = "s"
- DBClientConnectionUseTimeDescription = "The time between borrowing a connection and returning it to the pool"
-
- // DBClientConnectionsUsage is the metric conforming to the
- // "db.client.connections.usage" semantic conventions. It represents the
- // deprecated, use `db.client.connection.count` instead.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionsUsageName = "db.client.connections.usage"
- DBClientConnectionsUsageUnit = "{connection}"
- DBClientConnectionsUsageDescription = "Deprecated, use `db.client.connection.count` instead."
-
- // DBClientConnectionsIdleMax is the metric conforming to the
- // "db.client.connections.idle.max" semantic conventions. It represents the
- // deprecated, use `db.client.connection.idle.max` instead.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionsIdleMaxName = "db.client.connections.idle.max"
- DBClientConnectionsIdleMaxUnit = "{connection}"
- DBClientConnectionsIdleMaxDescription = "Deprecated, use `db.client.connection.idle.max` instead."
-
- // DBClientConnectionsIdleMin is the metric conforming to the
- // "db.client.connections.idle.min" semantic conventions. It represents the
- // deprecated, use `db.client.connection.idle.min` instead.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionsIdleMinName = "db.client.connections.idle.min"
- DBClientConnectionsIdleMinUnit = "{connection}"
- DBClientConnectionsIdleMinDescription = "Deprecated, use `db.client.connection.idle.min` instead."
-
- // DBClientConnectionsMax is the metric conforming to the
- // "db.client.connections.max" semantic conventions. It represents the
- // deprecated, use `db.client.connection.max` instead.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionsMaxName = "db.client.connections.max"
- DBClientConnectionsMaxUnit = "{connection}"
- DBClientConnectionsMaxDescription = "Deprecated, use `db.client.connection.max` instead."
-
- // DBClientConnectionsPendingRequests is the metric conforming to the
- // "db.client.connections.pending_requests" semantic conventions. It represents
- // the deprecated, use `db.client.connection.pending_requests` instead.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Experimental
- DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests"
- DBClientConnectionsPendingRequestsUnit = "{request}"
- DBClientConnectionsPendingRequestsDescription = "Deprecated, use `db.client.connection.pending_requests` instead."
-
- // DBClientConnectionsTimeouts is the metric conforming to the
- // "db.client.connections.timeouts" semantic conventions. It represents the
- // deprecated, use `db.client.connection.timeouts` instead.
- // Instrument: counter
- // Unit: {timeout}
- // Stability: Experimental
- DBClientConnectionsTimeoutsName = "db.client.connections.timeouts"
- DBClientConnectionsTimeoutsUnit = "{timeout}"
- DBClientConnectionsTimeoutsDescription = "Deprecated, use `db.client.connection.timeouts` instead."
-
- // DBClientConnectionsCreateTime is the metric conforming to the
- // "db.client.connections.create_time" semantic conventions. It represents the
- // deprecated, use `db.client.connection.create_time` instead. Note: the unit
- // also changed from `ms` to `s`.
- // Instrument: histogram
- // Unit: ms
- // Stability: Experimental
- DBClientConnectionsCreateTimeName = "db.client.connections.create_time"
- DBClientConnectionsCreateTimeUnit = "ms"
- DBClientConnectionsCreateTimeDescription = "Deprecated, use `db.client.connection.create_time` instead. Note: the unit also changed from `ms` to `s`."
-
- // DBClientConnectionsWaitTime is the metric conforming to the
- // "db.client.connections.wait_time" semantic conventions. It represents the
- // deprecated, use `db.client.connection.wait_time` instead. Note: the unit
- // also changed from `ms` to `s`.
- // Instrument: histogram
- // Unit: ms
- // Stability: Experimental
- DBClientConnectionsWaitTimeName = "db.client.connections.wait_time"
- DBClientConnectionsWaitTimeUnit = "ms"
- DBClientConnectionsWaitTimeDescription = "Deprecated, use `db.client.connection.wait_time` instead. Note: the unit also changed from `ms` to `s`."
-
- // DBClientConnectionsUseTime is the metric conforming to the
- // "db.client.connections.use_time" semantic conventions. It represents the
- // deprecated, use `db.client.connection.use_time` instead. Note: the unit also
- // changed from `ms` to `s`.
- // Instrument: histogram
- // Unit: ms
- // Stability: Experimental
- DBClientConnectionsUseTimeName = "db.client.connections.use_time"
- DBClientConnectionsUseTimeUnit = "ms"
- DBClientConnectionsUseTimeDescription = "Deprecated, use `db.client.connection.use_time` instead. Note: the unit also changed from `ms` to `s`."
-
- // DNSLookupDuration is the metric conforming to the "dns.lookup.duration"
- // semantic conventions. It represents the measures the time taken to perform a
- // DNS lookup.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- DNSLookupDurationName = "dns.lookup.duration"
- DNSLookupDurationUnit = "s"
- DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup."
-
- // AspnetcoreRoutingMatchAttempts is the metric conforming to the
- // "aspnetcore.routing.match_attempts" semantic conventions. It represents the
- // number of requests that were attempted to be matched to an endpoint.
- // Instrument: counter
- // Unit: {match_attempt}
- // Stability: Stable
- AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts"
- AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}"
- AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint."
-
- // AspnetcoreDiagnosticsExceptions is the metric conforming to the
- // "aspnetcore.diagnostics.exceptions" semantic conventions. It represents the
- // number of exceptions caught by exception handling middleware.
- // Instrument: counter
- // Unit: {exception}
- // Stability: Stable
- AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions"
- AspnetcoreDiagnosticsExceptionsUnit = "{exception}"
- AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware."
-
- // AspnetcoreRateLimitingActiveRequestLeases is the metric conforming to the
- // "aspnetcore.rate_limiting.active_request_leases" semantic conventions. It
- // represents the number of requests that are currently active on the server
- // that hold a rate limiting lease.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Stable
- AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases"
- AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}"
- AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease."
-
- // AspnetcoreRateLimitingRequestLeaseDuration is the metric conforming to the
- // "aspnetcore.rate_limiting.request_lease.duration" semantic conventions. It
- // represents the duration of rate limiting lease held by requests on the
- // server.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration"
- AspnetcoreRateLimitingRequestLeaseDurationUnit = "s"
- AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server."
-
- // AspnetcoreRateLimitingRequestTimeInQueue is the metric conforming to the
- // "aspnetcore.rate_limiting.request.time_in_queue" semantic conventions. It
- // represents the time the request spent in a queue waiting to acquire a rate
- // limiting lease.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue"
- AspnetcoreRateLimitingRequestTimeInQueueUnit = "s"
- AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease."
-
- // AspnetcoreRateLimitingQueuedRequests is the metric conforming to the
- // "aspnetcore.rate_limiting.queued_requests" semantic conventions. It
- // represents the number of requests that are currently queued, waiting to
- // acquire a rate limiting lease.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Stable
- AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests"
- AspnetcoreRateLimitingQueuedRequestsUnit = "{request}"
- AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease."
-
- // AspnetcoreRateLimitingRequests is the metric conforming to the
- // "aspnetcore.rate_limiting.requests" semantic conventions. It represents the
- // number of requests that tried to acquire a rate limiting lease.
- // Instrument: counter
- // Unit: {request}
- // Stability: Stable
- AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests"
- AspnetcoreRateLimitingRequestsUnit = "{request}"
- AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease."
-
- // KestrelActiveConnections is the metric conforming to the
- // "kestrel.active_connections" semantic conventions. It represents the number
- // of connections that are currently active on the server.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Stable
- KestrelActiveConnectionsName = "kestrel.active_connections"
- KestrelActiveConnectionsUnit = "{connection}"
- KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server."
-
- // KestrelConnectionDuration is the metric conforming to the
- // "kestrel.connection.duration" semantic conventions. It represents the
- // duration of connections on the server.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- KestrelConnectionDurationName = "kestrel.connection.duration"
- KestrelConnectionDurationUnit = "s"
- KestrelConnectionDurationDescription = "The duration of connections on the server."
-
- // KestrelRejectedConnections is the metric conforming to the
- // "kestrel.rejected_connections" semantic conventions. It represents the
- // number of connections rejected by the server.
- // Instrument: counter
- // Unit: {connection}
- // Stability: Stable
- KestrelRejectedConnectionsName = "kestrel.rejected_connections"
- KestrelRejectedConnectionsUnit = "{connection}"
- KestrelRejectedConnectionsDescription = "Number of connections rejected by the server."
-
- // KestrelQueuedConnections is the metric conforming to the
- // "kestrel.queued_connections" semantic conventions. It represents the number
- // of connections that are currently queued and are waiting to start.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Stable
- KestrelQueuedConnectionsName = "kestrel.queued_connections"
- KestrelQueuedConnectionsUnit = "{connection}"
- KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start."
-
- // KestrelQueuedRequests is the metric conforming to the
- // "kestrel.queued_requests" semantic conventions. It represents the number of
- // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are
- // currently queued and are waiting to start.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Stable
- KestrelQueuedRequestsName = "kestrel.queued_requests"
- KestrelQueuedRequestsUnit = "{request}"
- KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start."
-
- // KestrelUpgradedConnections is the metric conforming to the
- // "kestrel.upgraded_connections" semantic conventions. It represents the
- // number of connections that are currently upgraded (WebSockets). .
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Stable
- KestrelUpgradedConnectionsName = "kestrel.upgraded_connections"
- KestrelUpgradedConnectionsUnit = "{connection}"
- KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ."
-
- // KestrelTLSHandshakeDuration is the metric conforming to the
- // "kestrel.tls_handshake.duration" semantic conventions. It represents the
- // duration of TLS handshakes on the server.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration"
- KestrelTLSHandshakeDurationUnit = "s"
- KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server."
-
- // KestrelActiveTLSHandshakes is the metric conforming to the
- // "kestrel.active_tls_handshakes" semantic conventions. It represents the
- // number of TLS handshakes that are currently in progress on the server.
- // Instrument: updowncounter
- // Unit: {handshake}
- // Stability: Stable
- KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes"
- KestrelActiveTLSHandshakesUnit = "{handshake}"
- KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server."
-
- // SignalrServerConnectionDuration is the metric conforming to the
- // "signalr.server.connection.duration" semantic conventions. It represents the
- // duration of connections on the server.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- SignalrServerConnectionDurationName = "signalr.server.connection.duration"
- SignalrServerConnectionDurationUnit = "s"
- SignalrServerConnectionDurationDescription = "The duration of connections on the server."
-
- // SignalrServerActiveConnections is the metric conforming to the
- // "signalr.server.active_connections" semantic conventions. It represents the
- // number of connections that are currently active on the server.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Stable
- SignalrServerActiveConnectionsName = "signalr.server.active_connections"
- SignalrServerActiveConnectionsUnit = "{connection}"
- SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server."
-
- // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration"
- // semantic conventions. It represents the measures the duration of the
- // function's logic execution.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- FaaSInvokeDurationName = "faas.invoke_duration"
- FaaSInvokeDurationUnit = "s"
- FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution"
-
- // FaaSInitDuration is the metric conforming to the "faas.init_duration"
- // semantic conventions. It represents the measures the duration of the
- // function's initialization, such as a cold start.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- FaaSInitDurationName = "faas.init_duration"
- FaaSInitDurationUnit = "s"
- FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start"
-
- // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic
- // conventions. It represents the number of invocation cold starts.
- // Instrument: counter
- // Unit: {coldstart}
- // Stability: Experimental
- FaaSColdstartsName = "faas.coldstarts"
- FaaSColdstartsUnit = "{coldstart}"
- FaaSColdstartsDescription = "Number of invocation cold starts"
-
- // FaaSErrors is the metric conforming to the "faas.errors" semantic
- // conventions. It represents the number of invocation errors.
- // Instrument: counter
- // Unit: {error}
- // Stability: Experimental
- FaaSErrorsName = "faas.errors"
- FaaSErrorsUnit = "{error}"
- FaaSErrorsDescription = "Number of invocation errors"
-
- // FaaSInvocations is the metric conforming to the "faas.invocations" semantic
- // conventions. It represents the number of successful invocations.
- // Instrument: counter
- // Unit: {invocation}
- // Stability: Experimental
- FaaSInvocationsName = "faas.invocations"
- FaaSInvocationsUnit = "{invocation}"
- FaaSInvocationsDescription = "Number of successful invocations"
-
- // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic
- // conventions. It represents the number of invocation timeouts.
- // Instrument: counter
- // Unit: {timeout}
- // Stability: Experimental
- FaaSTimeoutsName = "faas.timeouts"
- FaaSTimeoutsUnit = "{timeout}"
- FaaSTimeoutsDescription = "Number of invocation timeouts"
-
- // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic
- // conventions. It represents the distribution of max memory usage per
- // invocation.
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- FaaSMemUsageName = "faas.mem_usage"
- FaaSMemUsageUnit = "By"
- FaaSMemUsageDescription = "Distribution of max memory usage per invocation"
-
- // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic
- // conventions. It represents the distribution of CPU usage per invocation.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- FaaSCPUUsageName = "faas.cpu_usage"
- FaaSCPUUsageUnit = "s"
- FaaSCPUUsageDescription = "Distribution of CPU usage per invocation"
-
- // FaaSNetIo is the metric conforming to the "faas.net_io" semantic
- // conventions. It represents the distribution of net I/O usage per invocation.
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- FaaSNetIoName = "faas.net_io"
- FaaSNetIoUnit = "By"
- FaaSNetIoDescription = "Distribution of net I/O usage per invocation"
-
- // HTTPServerRequestDuration is the metric conforming to the
- // "http.server.request.duration" semantic conventions. It represents the
- // duration of HTTP server requests.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- HTTPServerRequestDurationName = "http.server.request.duration"
- HTTPServerRequestDurationUnit = "s"
- HTTPServerRequestDurationDescription = "Duration of HTTP server requests."
-
- // HTTPServerActiveRequests is the metric conforming to the
- // "http.server.active_requests" semantic conventions. It represents the number
- // of active HTTP server requests.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Experimental
- HTTPServerActiveRequestsName = "http.server.active_requests"
- HTTPServerActiveRequestsUnit = "{request}"
- HTTPServerActiveRequestsDescription = "Number of active HTTP server requests."
-
- // HTTPServerRequestBodySize is the metric conforming to the
- // "http.server.request.body.size" semantic conventions. It represents the size
- // of HTTP server request bodies.
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- HTTPServerRequestBodySizeName = "http.server.request.body.size"
- HTTPServerRequestBodySizeUnit = "By"
- HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies."
-
- // HTTPServerResponseBodySize is the metric conforming to the
- // "http.server.response.body.size" semantic conventions. It represents the
- // size of HTTP server response bodies.
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- HTTPServerResponseBodySizeName = "http.server.response.body.size"
- HTTPServerResponseBodySizeUnit = "By"
- HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies."
-
- // HTTPClientRequestDuration is the metric conforming to the
- // "http.client.request.duration" semantic conventions. It represents the
- // duration of HTTP client requests.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- HTTPClientRequestDurationName = "http.client.request.duration"
- HTTPClientRequestDurationUnit = "s"
- HTTPClientRequestDurationDescription = "Duration of HTTP client requests."
-
- // HTTPClientRequestBodySize is the metric conforming to the
- // "http.client.request.body.size" semantic conventions. It represents the size
- // of HTTP client request bodies.
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- HTTPClientRequestBodySizeName = "http.client.request.body.size"
- HTTPClientRequestBodySizeUnit = "By"
- HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies."
-
- // HTTPClientResponseBodySize is the metric conforming to the
- // "http.client.response.body.size" semantic conventions. It represents the
- // size of HTTP client response bodies.
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- HTTPClientResponseBodySizeName = "http.client.response.body.size"
- HTTPClientResponseBodySizeUnit = "By"
- HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies."
-
- // HTTPClientOpenConnections is the metric conforming to the
- // "http.client.open_connections" semantic conventions. It represents the
- // number of outbound HTTP connections that are currently active or idle on the
- // client.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- HTTPClientOpenConnectionsName = "http.client.open_connections"
- HTTPClientOpenConnectionsUnit = "{connection}"
- HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client."
-
- // HTTPClientConnectionDuration is the metric conforming to the
- // "http.client.connection.duration" semantic conventions. It represents the
- // duration of the successfully established outbound HTTP connections.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- HTTPClientConnectionDurationName = "http.client.connection.duration"
- HTTPClientConnectionDurationUnit = "s"
- HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections."
-
- // HTTPClientActiveRequests is the metric conforming to the
- // "http.client.active_requests" semantic conventions. It represents the number
- // of active HTTP requests.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Experimental
- HTTPClientActiveRequestsName = "http.client.active_requests"
- HTTPClientActiveRequestsUnit = "{request}"
- HTTPClientActiveRequestsDescription = "Number of active HTTP requests."
-
- // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic
- // conventions. It represents the measure of initial memory requested.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- JvmMemoryInitName = "jvm.memory.init"
- JvmMemoryInitUnit = "By"
- JvmMemoryInitDescription = "Measure of initial memory requested."
-
- // JvmSystemCPUUtilization is the metric conforming to the
- // "jvm.system.cpu.utilization" semantic conventions. It represents the recent
- // CPU utilization for the whole system as reported by the JVM.
- // Instrument: gauge
- // Unit: 1
- // Stability: Experimental
- JvmSystemCPUUtilizationName = "jvm.system.cpu.utilization"
- JvmSystemCPUUtilizationUnit = "1"
- JvmSystemCPUUtilizationDescription = "Recent CPU utilization for the whole system as reported by the JVM."
-
- // JvmSystemCPULoad1m is the metric conforming to the "jvm.system.cpu.load_1m"
- // semantic conventions. It represents the average CPU load of the whole system
- // for the last minute as reported by the JVM.
- // Instrument: gauge
- // Unit: {run_queue_item}
- // Stability: Experimental
- JvmSystemCPULoad1mName = "jvm.system.cpu.load_1m"
- JvmSystemCPULoad1mUnit = "{run_queue_item}"
- JvmSystemCPULoad1mDescription = "Average CPU load of the whole system for the last minute as reported by the JVM."
-
- // JvmBufferMemoryUsage is the metric conforming to the
- // "jvm.buffer.memory.usage" semantic conventions. It represents the measure of
- // memory used by buffers.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- JvmBufferMemoryUsageName = "jvm.buffer.memory.usage"
- JvmBufferMemoryUsageUnit = "By"
- JvmBufferMemoryUsageDescription = "Measure of memory used by buffers."
-
- // JvmBufferMemoryLimit is the metric conforming to the
- // "jvm.buffer.memory.limit" semantic conventions. It represents the measure of
- // total memory capacity of buffers.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- JvmBufferMemoryLimitName = "jvm.buffer.memory.limit"
- JvmBufferMemoryLimitUnit = "By"
- JvmBufferMemoryLimitDescription = "Measure of total memory capacity of buffers."
-
- // JvmBufferCount is the metric conforming to the "jvm.buffer.count" semantic
- // conventions. It represents the number of buffers in the pool.
- // Instrument: updowncounter
- // Unit: {buffer}
- // Stability: Experimental
- JvmBufferCountName = "jvm.buffer.count"
- JvmBufferCountUnit = "{buffer}"
- JvmBufferCountDescription = "Number of buffers in the pool."
-
- // JvmMemoryUsed is the metric conforming to the "jvm.memory.used" semantic
- // conventions. It represents the measure of memory used.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Stable
- JvmMemoryUsedName = "jvm.memory.used"
- JvmMemoryUsedUnit = "By"
- JvmMemoryUsedDescription = "Measure of memory used."
-
- // JvmMemoryCommitted is the metric conforming to the "jvm.memory.committed"
- // semantic conventions. It represents the measure of memory committed.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Stable
- JvmMemoryCommittedName = "jvm.memory.committed"
- JvmMemoryCommittedUnit = "By"
- JvmMemoryCommittedDescription = "Measure of memory committed."
-
- // JvmMemoryLimit is the metric conforming to the "jvm.memory.limit" semantic
- // conventions. It represents the measure of max obtainable memory.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Stable
- JvmMemoryLimitName = "jvm.memory.limit"
- JvmMemoryLimitUnit = "By"
- JvmMemoryLimitDescription = "Measure of max obtainable memory."
-
- // JvmMemoryUsedAfterLastGc is the metric conforming to the
- // "jvm.memory.used_after_last_gc" semantic conventions. It represents the
- // measure of memory used, as measured after the most recent garbage collection
- // event on this pool.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Stable
- JvmMemoryUsedAfterLastGcName = "jvm.memory.used_after_last_gc"
- JvmMemoryUsedAfterLastGcUnit = "By"
- JvmMemoryUsedAfterLastGcDescription = "Measure of memory used, as measured after the most recent garbage collection event on this pool."
-
- // JvmGcDuration is the metric conforming to the "jvm.gc.duration" semantic
- // conventions. It represents the duration of JVM garbage collection actions.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- JvmGcDurationName = "jvm.gc.duration"
- JvmGcDurationUnit = "s"
- JvmGcDurationDescription = "Duration of JVM garbage collection actions."
-
- // JvmThreadCount is the metric conforming to the "jvm.thread.count" semantic
- // conventions. It represents the number of executing platform threads.
- // Instrument: updowncounter
- // Unit: {thread}
- // Stability: Stable
- JvmThreadCountName = "jvm.thread.count"
- JvmThreadCountUnit = "{thread}"
- JvmThreadCountDescription = "Number of executing platform threads."
-
- // JvmClassLoaded is the metric conforming to the "jvm.class.loaded" semantic
- // conventions. It represents the number of classes loaded since JVM start.
- // Instrument: counter
- // Unit: {class}
- // Stability: Stable
- JvmClassLoadedName = "jvm.class.loaded"
- JvmClassLoadedUnit = "{class}"
- JvmClassLoadedDescription = "Number of classes loaded since JVM start."
-
- // JvmClassUnloaded is the metric conforming to the "jvm.class.unloaded"
- // semantic conventions. It represents the number of classes unloaded since JVM
- // start.
- // Instrument: counter
- // Unit: {class}
- // Stability: Stable
- JvmClassUnloadedName = "jvm.class.unloaded"
- JvmClassUnloadedUnit = "{class}"
- JvmClassUnloadedDescription = "Number of classes unloaded since JVM start."
-
- // JvmClassCount is the metric conforming to the "jvm.class.count" semantic
- // conventions. It represents the number of classes currently loaded.
- // Instrument: updowncounter
- // Unit: {class}
- // Stability: Stable
- JvmClassCountName = "jvm.class.count"
- JvmClassCountUnit = "{class}"
- JvmClassCountDescription = "Number of classes currently loaded."
-
- // JvmCPUCount is the metric conforming to the "jvm.cpu.count" semantic
- // conventions. It represents the number of processors available to the Java
- // virtual machine.
- // Instrument: updowncounter
- // Unit: {cpu}
- // Stability: Stable
- JvmCPUCountName = "jvm.cpu.count"
- JvmCPUCountUnit = "{cpu}"
- JvmCPUCountDescription = "Number of processors available to the Java virtual machine."
-
- // JvmCPUTime is the metric conforming to the "jvm.cpu.time" semantic
- // conventions. It represents the cPU time used by the process as reported by
- // the JVM.
- // Instrument: counter
- // Unit: s
- // Stability: Stable
- JvmCPUTimeName = "jvm.cpu.time"
- JvmCPUTimeUnit = "s"
- JvmCPUTimeDescription = "CPU time used by the process as reported by the JVM."
-
- // JvmCPURecentUtilization is the metric conforming to the
- // "jvm.cpu.recent_utilization" semantic conventions. It represents the recent
- // CPU utilization for the process as reported by the JVM.
- // Instrument: gauge
- // Unit: 1
- // Stability: Stable
- JvmCPURecentUtilizationName = "jvm.cpu.recent_utilization"
- JvmCPURecentUtilizationUnit = "1"
- JvmCPURecentUtilizationDescription = "Recent CPU utilization for the process as reported by the JVM."
-
- // MessagingPublishDuration is the metric conforming to the
- // "messaging.publish.duration" semantic conventions. It represents the
- // measures the duration of publish operation.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- MessagingPublishDurationName = "messaging.publish.duration"
- MessagingPublishDurationUnit = "s"
- MessagingPublishDurationDescription = "Measures the duration of publish operation."
-
- // MessagingReceiveDuration is the metric conforming to the
- // "messaging.receive.duration" semantic conventions. It represents the
- // measures the duration of receive operation.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- MessagingReceiveDurationName = "messaging.receive.duration"
- MessagingReceiveDurationUnit = "s"
- MessagingReceiveDurationDescription = "Measures the duration of receive operation."
-
- // MessagingProcessDuration is the metric conforming to the
- // "messaging.process.duration" semantic conventions. It represents the
- // measures the duration of process operation.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- MessagingProcessDurationName = "messaging.process.duration"
- MessagingProcessDurationUnit = "s"
- MessagingProcessDurationDescription = "Measures the duration of process operation."
-
- // MessagingPublishMessages is the metric conforming to the
- // "messaging.publish.messages" semantic conventions. It represents the
- // measures the number of published messages.
- // Instrument: counter
- // Unit: {message}
- // Stability: Experimental
- MessagingPublishMessagesName = "messaging.publish.messages"
- MessagingPublishMessagesUnit = "{message}"
- MessagingPublishMessagesDescription = "Measures the number of published messages."
-
- // MessagingReceiveMessages is the metric conforming to the
- // "messaging.receive.messages" semantic conventions. It represents the
- // measures the number of received messages.
- // Instrument: counter
- // Unit: {message}
- // Stability: Experimental
- MessagingReceiveMessagesName = "messaging.receive.messages"
- MessagingReceiveMessagesUnit = "{message}"
- MessagingReceiveMessagesDescription = "Measures the number of received messages."
-
- // MessagingProcessMessages is the metric conforming to the
- // "messaging.process.messages" semantic conventions. It represents the
- // measures the number of processed messages.
- // Instrument: counter
- // Unit: {message}
- // Stability: Experimental
- MessagingProcessMessagesName = "messaging.process.messages"
- MessagingProcessMessagesUnit = "{message}"
- MessagingProcessMessagesDescription = "Measures the number of processed messages."
-
- // ProcessCPUTime is the metric conforming to the "process.cpu.time" semantic
- // conventions. It represents the total CPU seconds broken down by different
- // states.
- // Instrument: counter
- // Unit: s
- // Stability: Experimental
- ProcessCPUTimeName = "process.cpu.time"
- ProcessCPUTimeUnit = "s"
- ProcessCPUTimeDescription = "Total CPU seconds broken down by different states."
-
- // ProcessCPUUtilization is the metric conforming to the
- // "process.cpu.utilization" semantic conventions. It represents the difference
- // in process.cpu.time since the last measurement, divided by the elapsed time
- // and number of CPUs available to the process.
- // Instrument: gauge
- // Unit: 1
- // Stability: Experimental
- ProcessCPUUtilizationName = "process.cpu.utilization"
- ProcessCPUUtilizationUnit = "1"
- ProcessCPUUtilizationDescription = "Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process."
-
- // ProcessMemoryUsage is the metric conforming to the "process.memory.usage"
- // semantic conventions. It represents the amount of physical memory in use.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- ProcessMemoryUsageName = "process.memory.usage"
- ProcessMemoryUsageUnit = "By"
- ProcessMemoryUsageDescription = "The amount of physical memory in use."
-
- // ProcessMemoryVirtual is the metric conforming to the
- // "process.memory.virtual" semantic conventions. It represents the amount of
- // committed virtual memory.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- ProcessMemoryVirtualName = "process.memory.virtual"
- ProcessMemoryVirtualUnit = "By"
- ProcessMemoryVirtualDescription = "The amount of committed virtual memory."
-
- // ProcessDiskIo is the metric conforming to the "process.disk.io" semantic
- // conventions. It represents the disk bytes transferred.
- // Instrument: counter
- // Unit: By
- // Stability: Experimental
- ProcessDiskIoName = "process.disk.io"
- ProcessDiskIoUnit = "By"
- ProcessDiskIoDescription = "Disk bytes transferred."
-
- // ProcessNetworkIo is the metric conforming to the "process.network.io"
- // semantic conventions. It represents the network bytes transferred.
- // Instrument: counter
- // Unit: By
- // Stability: Experimental
- ProcessNetworkIoName = "process.network.io"
- ProcessNetworkIoUnit = "By"
- ProcessNetworkIoDescription = "Network bytes transferred."
-
- // ProcessThreadCount is the metric conforming to the "process.thread.count"
- // semantic conventions. It represents the process threads count.
- // Instrument: updowncounter
- // Unit: {thread}
- // Stability: Experimental
- ProcessThreadCountName = "process.thread.count"
- ProcessThreadCountUnit = "{thread}"
- ProcessThreadCountDescription = "Process threads count."
-
- // ProcessOpenFileDescriptorCount is the metric conforming to the
- // "process.open_file_descriptor.count" semantic conventions. It represents the
- // number of file descriptors in use by the process.
- // Instrument: updowncounter
- // Unit: {count}
- // Stability: Experimental
- ProcessOpenFileDescriptorCountName = "process.open_file_descriptor.count"
- ProcessOpenFileDescriptorCountUnit = "{count}"
- ProcessOpenFileDescriptorCountDescription = "Number of file descriptors in use by the process."
-
- // ProcessContextSwitches is the metric conforming to the
- // "process.context_switches" semantic conventions. It represents the number of
- // times the process has been context switched.
- // Instrument: counter
- // Unit: {count}
- // Stability: Experimental
- ProcessContextSwitchesName = "process.context_switches"
- ProcessContextSwitchesUnit = "{count}"
- ProcessContextSwitchesDescription = "Number of times the process has been context switched."
-
- // ProcessPagingFaults is the metric conforming to the "process.paging.faults"
- // semantic conventions. It represents the number of page faults the process
- // has made.
- // Instrument: counter
- // Unit: {fault}
- // Stability: Experimental
- ProcessPagingFaultsName = "process.paging.faults"
- ProcessPagingFaultsUnit = "{fault}"
- ProcessPagingFaultsDescription = "Number of page faults the process has made."
-
- // RPCServerDuration is the metric conforming to the "rpc.server.duration"
- // semantic conventions. It represents the measures the duration of inbound
- // RPC.
- // Instrument: histogram
- // Unit: ms
- // Stability: Experimental
- RPCServerDurationName = "rpc.server.duration"
- RPCServerDurationUnit = "ms"
- RPCServerDurationDescription = "Measures the duration of inbound RPC."
-
- // RPCServerRequestSize is the metric conforming to the
- // "rpc.server.request.size" semantic conventions. It represents the measures
- // the size of RPC request messages (uncompressed).
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- RPCServerRequestSizeName = "rpc.server.request.size"
- RPCServerRequestSizeUnit = "By"
- RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)."
-
- // RPCServerResponseSize is the metric conforming to the
- // "rpc.server.response.size" semantic conventions. It represents the measures
- // the size of RPC response messages (uncompressed).
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- RPCServerResponseSizeName = "rpc.server.response.size"
- RPCServerResponseSizeUnit = "By"
- RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)."
-
- // RPCServerRequestsPerRPC is the metric conforming to the
- // "rpc.server.requests_per_rpc" semantic conventions. It represents the
- // measures the number of messages received per RPC.
- // Instrument: histogram
- // Unit: {count}
- // Stability: Experimental
- RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc"
- RPCServerRequestsPerRPCUnit = "{count}"
- RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC."
-
- // RPCServerResponsesPerRPC is the metric conforming to the
- // "rpc.server.responses_per_rpc" semantic conventions. It represents the
- // measures the number of messages sent per RPC.
- // Instrument: histogram
- // Unit: {count}
- // Stability: Experimental
- RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc"
- RPCServerResponsesPerRPCUnit = "{count}"
- RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC."
-
- // RPCClientDuration is the metric conforming to the "rpc.client.duration"
- // semantic conventions. It represents the measures the duration of outbound
- // RPC.
- // Instrument: histogram
- // Unit: ms
- // Stability: Experimental
- RPCClientDurationName = "rpc.client.duration"
- RPCClientDurationUnit = "ms"
- RPCClientDurationDescription = "Measures the duration of outbound RPC."
-
- // RPCClientRequestSize is the metric conforming to the
- // "rpc.client.request.size" semantic conventions. It represents the measures
- // the size of RPC request messages (uncompressed).
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- RPCClientRequestSizeName = "rpc.client.request.size"
- RPCClientRequestSizeUnit = "By"
- RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)."
-
- // RPCClientResponseSize is the metric conforming to the
- // "rpc.client.response.size" semantic conventions. It represents the measures
- // the size of RPC response messages (uncompressed).
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- RPCClientResponseSizeName = "rpc.client.response.size"
- RPCClientResponseSizeUnit = "By"
- RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)."
-
- // RPCClientRequestsPerRPC is the metric conforming to the
- // "rpc.client.requests_per_rpc" semantic conventions. It represents the
- // measures the number of messages received per RPC.
- // Instrument: histogram
- // Unit: {count}
- // Stability: Experimental
- RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc"
- RPCClientRequestsPerRPCUnit = "{count}"
- RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC."
-
- // RPCClientResponsesPerRPC is the metric conforming to the
- // "rpc.client.responses_per_rpc" semantic conventions. It represents the
- // measures the number of messages sent per RPC.
- // Instrument: histogram
- // Unit: {count}
- // Stability: Experimental
- RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc"
- RPCClientResponsesPerRPCUnit = "{count}"
- RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC."
-
- // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic
- // conventions. It represents the seconds each logical CPU spent on each mode.
- // Instrument: counter
- // Unit: s
- // Stability: Experimental
- SystemCPUTimeName = "system.cpu.time"
- SystemCPUTimeUnit = "s"
- SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode"
-
- // SystemCPUUtilization is the metric conforming to the
- // "system.cpu.utilization" semantic conventions. It represents the difference
- // in system.cpu.time since the last measurement, divided by the elapsed time
- // and number of logical CPUs.
- // Instrument: gauge
- // Unit: 1
- // Stability: Experimental
- SystemCPUUtilizationName = "system.cpu.utilization"
- SystemCPUUtilizationUnit = "1"
- SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs"
-
- // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency"
- // semantic conventions. It represents the reports the current frequency of the
- // CPU in Hz.
- // Instrument: gauge
- // Unit: {Hz}
- // Stability: Experimental
- SystemCPUFrequencyName = "system.cpu.frequency"
- SystemCPUFrequencyUnit = "{Hz}"
- SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz"
-
- // SystemCPUPhysicalCount is the metric conforming to the
- // "system.cpu.physical.count" semantic conventions. It represents the reports
- // the number of actual physical processor cores on the hardware.
- // Instrument: updowncounter
- // Unit: {cpu}
- // Stability: Experimental
- SystemCPUPhysicalCountName = "system.cpu.physical.count"
- SystemCPUPhysicalCountUnit = "{cpu}"
- SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware"
-
- // SystemCPULogicalCount is the metric conforming to the
- // "system.cpu.logical.count" semantic conventions. It represents the reports
- // the number of logical (virtual) processor cores created by the operating
- // system to manage multitasking.
- // Instrument: updowncounter
- // Unit: {cpu}
- // Stability: Experimental
- SystemCPULogicalCountName = "system.cpu.logical.count"
- SystemCPULogicalCountUnit = "{cpu}"
- SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking"
-
- // SystemMemoryUsage is the metric conforming to the "system.memory.usage"
- // semantic conventions. It represents the reports memory in use by state.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- SystemMemoryUsageName = "system.memory.usage"
- SystemMemoryUsageUnit = "By"
- SystemMemoryUsageDescription = "Reports memory in use by state."
-
- // SystemMemoryLimit is the metric conforming to the "system.memory.limit"
- // semantic conventions. It represents the total memory available in the
- // system.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- SystemMemoryLimitName = "system.memory.limit"
- SystemMemoryLimitUnit = "By"
- SystemMemoryLimitDescription = "Total memory available in the system."
-
- // SystemMemoryShared is the metric conforming to the "system.memory.shared"
- // semantic conventions. It represents the shared memory used (mostly by
- // tmpfs).
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- SystemMemorySharedName = "system.memory.shared"
- SystemMemorySharedUnit = "By"
- SystemMemorySharedDescription = "Shared memory used (mostly by tmpfs)."
-
- // SystemMemoryUtilization is the metric conforming to the
- // "system.memory.utilization" semantic conventions.
- // Instrument: gauge
- // Unit: 1
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemMemoryUtilizationName = "system.memory.utilization"
- SystemMemoryUtilizationUnit = "1"
-
- // SystemPagingUsage is the metric conforming to the "system.paging.usage"
- // semantic conventions. It represents the unix swap or windows pagefile usage.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- SystemPagingUsageName = "system.paging.usage"
- SystemPagingUsageUnit = "By"
- SystemPagingUsageDescription = "Unix swap or windows pagefile usage"
-
- // SystemPagingUtilization is the metric conforming to the
- // "system.paging.utilization" semantic conventions.
- // Instrument: gauge
- // Unit: 1
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemPagingUtilizationName = "system.paging.utilization"
- SystemPagingUtilizationUnit = "1"
-
- // SystemPagingFaults is the metric conforming to the "system.paging.faults"
- // semantic conventions.
- // Instrument: counter
- // Unit: {fault}
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemPagingFaultsName = "system.paging.faults"
- SystemPagingFaultsUnit = "{fault}"
-
- // SystemPagingOperations is the metric conforming to the
- // "system.paging.operations" semantic conventions.
- // Instrument: counter
- // Unit: {operation}
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemPagingOperationsName = "system.paging.operations"
- SystemPagingOperationsUnit = "{operation}"
-
- // SystemDiskIo is the metric conforming to the "system.disk.io" semantic
- // conventions.
- // Instrument: counter
- // Unit: By
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemDiskIoName = "system.disk.io"
- SystemDiskIoUnit = "By"
-
- // SystemDiskOperations is the metric conforming to the
- // "system.disk.operations" semantic conventions.
- // Instrument: counter
- // Unit: {operation}
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemDiskOperationsName = "system.disk.operations"
- SystemDiskOperationsUnit = "{operation}"
-
- // SystemDiskIoTime is the metric conforming to the "system.disk.io_time"
- // semantic conventions. It represents the time disk spent activated.
- // Instrument: counter
- // Unit: s
- // Stability: Experimental
- SystemDiskIoTimeName = "system.disk.io_time"
- SystemDiskIoTimeUnit = "s"
- SystemDiskIoTimeDescription = "Time disk spent activated"
-
- // SystemDiskOperationTime is the metric conforming to the
- // "system.disk.operation_time" semantic conventions. It represents the sum of
- // the time each operation took to complete.
- // Instrument: counter
- // Unit: s
- // Stability: Experimental
- SystemDiskOperationTimeName = "system.disk.operation_time"
- SystemDiskOperationTimeUnit = "s"
- SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete"
-
- // SystemDiskMerged is the metric conforming to the "system.disk.merged"
- // semantic conventions.
- // Instrument: counter
- // Unit: {operation}
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemDiskMergedName = "system.disk.merged"
- SystemDiskMergedUnit = "{operation}"
-
- // SystemFilesystemUsage is the metric conforming to the
- // "system.filesystem.usage" semantic conventions.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemFilesystemUsageName = "system.filesystem.usage"
- SystemFilesystemUsageUnit = "By"
-
- // SystemFilesystemUtilization is the metric conforming to the
- // "system.filesystem.utilization" semantic conventions.
- // Instrument: gauge
- // Unit: 1
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemFilesystemUtilizationName = "system.filesystem.utilization"
- SystemFilesystemUtilizationUnit = "1"
-
- // SystemNetworkDropped is the metric conforming to the
- // "system.network.dropped" semantic conventions. It represents the count of
- // packets that are dropped or discarded even though there was no error.
- // Instrument: counter
- // Unit: {packet}
- // Stability: Experimental
- SystemNetworkDroppedName = "system.network.dropped"
- SystemNetworkDroppedUnit = "{packet}"
- SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error"
-
- // SystemNetworkPackets is the metric conforming to the
- // "system.network.packets" semantic conventions.
- // Instrument: counter
- // Unit: {packet}
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemNetworkPacketsName = "system.network.packets"
- SystemNetworkPacketsUnit = "{packet}"
-
- // SystemNetworkErrors is the metric conforming to the "system.network.errors"
- // semantic conventions. It represents the count of network errors detected.
- // Instrument: counter
- // Unit: {error}
- // Stability: Experimental
- SystemNetworkErrorsName = "system.network.errors"
- SystemNetworkErrorsUnit = "{error}"
- SystemNetworkErrorsDescription = "Count of network errors detected"
-
- // SystemNetworkIo is the metric conforming to the "system.network.io" semantic
- // conventions.
- // Instrument: counter
- // Unit: By
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemNetworkIoName = "system.network.io"
- SystemNetworkIoUnit = "By"
-
- // SystemNetworkConnections is the metric conforming to the
- // "system.network.connections" semantic conventions.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemNetworkConnectionsName = "system.network.connections"
- SystemNetworkConnectionsUnit = "{connection}"
-
- // SystemProcessCount is the metric conforming to the "system.process.count"
- // semantic conventions. It represents the total number of processes in each
- // state.
- // Instrument: updowncounter
- // Unit: {process}
- // Stability: Experimental
- SystemProcessCountName = "system.process.count"
- SystemProcessCountUnit = "{process}"
- SystemProcessCountDescription = "Total number of processes in each state"
-
- // SystemProcessCreated is the metric conforming to the
- // "system.process.created" semantic conventions. It represents the total
- // number of processes created over uptime of the host.
- // Instrument: counter
- // Unit: {process}
- // Stability: Experimental
- SystemProcessCreatedName = "system.process.created"
- SystemProcessCreatedUnit = "{process}"
- SystemProcessCreatedDescription = "Total number of processes created over uptime of the host"
-
- // SystemLinuxMemoryAvailable is the metric conforming to the
- // "system.linux.memory.available" semantic conventions. It represents an
- // estimate of how much memory is available for starting new applications,
- // without causing swapping.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- SystemLinuxMemoryAvailableName = "system.linux.memory.available"
- SystemLinuxMemoryAvailableUnit = "By"
- SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping"
-)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go
deleted file mode 100644
index 4c87c7adc..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
-
-// SchemaURL is the schema URL that matches the version of the semantic conventions
-// that this package defines. Semconv packages starting from v1.4.0 must declare
-// non-empty schema URL in the form https://opentelemetry.io/schemas/<version>
-const SchemaURL = "https://opentelemetry.io/schemas/1.26.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/README.md
deleted file mode 100644
index bf578303f..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Semconv v1.7.0
-
-[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.7.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.7.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/doc.go
deleted file mode 100644
index 2b2a112e0..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/doc.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Package semconv implements OpenTelemetry semantic conventions.
-//
-// OpenTelemetry semantic conventions are agreed standardized naming
-// patterns for OpenTelemetry things. This package represents the conventions
-// as of the v1.7.0 version of the OpenTelemetry specification.
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.7.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/exception.go
deleted file mode 100644
index 4e882afed..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/exception.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.7.0"
-
-const (
- // ExceptionEventName is the name of the Span event representing an exception.
- ExceptionEventName = "exception"
-)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/http.go
deleted file mode 100644
index c60061334..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/http.go
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.7.0"
-
-import (
- "net/http"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/codes"
- "go.opentelemetry.io/otel/semconv/internal"
- "go.opentelemetry.io/otel/trace"
-)
-
-// HTTP scheme attributes.
-var (
- HTTPSchemeHTTP = HTTPSchemeKey.String("http")
- HTTPSchemeHTTPS = HTTPSchemeKey.String("https")
-)
-
-var sc = &internal.SemanticConventions{
- EnduserIDKey: EnduserIDKey,
- HTTPClientIPKey: HTTPClientIPKey,
- HTTPFlavorKey: HTTPFlavorKey,
- HTTPHostKey: HTTPHostKey,
- HTTPMethodKey: HTTPMethodKey,
- HTTPRequestContentLengthKey: HTTPRequestContentLengthKey,
- HTTPRouteKey: HTTPRouteKey,
- HTTPSchemeHTTP: HTTPSchemeHTTP,
- HTTPSchemeHTTPS: HTTPSchemeHTTPS,
- HTTPServerNameKey: HTTPServerNameKey,
- HTTPStatusCodeKey: HTTPStatusCodeKey,
- HTTPTargetKey: HTTPTargetKey,
- HTTPURLKey: HTTPURLKey,
- HTTPUserAgentKey: HTTPUserAgentKey,
- NetHostIPKey: NetHostIPKey,
- NetHostNameKey: NetHostNameKey,
- NetHostPortKey: NetHostPortKey,
- NetPeerIPKey: NetPeerIPKey,
- NetPeerNameKey: NetPeerNameKey,
- NetPeerPortKey: NetPeerPortKey,
- NetTransportIP: NetTransportIP,
- NetTransportOther: NetTransportOther,
- NetTransportTCP: NetTransportTCP,
- NetTransportUDP: NetTransportUDP,
- NetTransportUnix: NetTransportUnix,
-}
-
-// NetAttributesFromHTTPRequest generates attributes of the net
-// namespace as specified by the OpenTelemetry specification for a
-// span. The network parameter is a string that net.Dial function
-// from standard library can understand.
-func NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue {
- return sc.NetAttributesFromHTTPRequest(network, request)
-}
-
-// EndUserAttributesFromHTTPRequest generates attributes of the
-// enduser namespace as specified by the OpenTelemetry specification
-// for a span.
-func EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue {
- return sc.EndUserAttributesFromHTTPRequest(request)
-}
-
-// HTTPClientAttributesFromHTTPRequest generates attributes of the
-// http namespace as specified by the OpenTelemetry specification for
-// a span on the client side.
-func HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue {
- return sc.HTTPClientAttributesFromHTTPRequest(request)
-}
-
-// HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes
-// to be used with server-side HTTP metrics.
-func HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue {
- return sc.HTTPServerMetricAttributesFromHTTPRequest(serverName, request)
-}
-
-// HTTPServerAttributesFromHTTPRequest generates attributes of the
-// http namespace as specified by the OpenTelemetry specification for
-// a span on the server side. Currently, only basic authentication is
-// supported.
-func HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue {
- return sc.HTTPServerAttributesFromHTTPRequest(serverName, route, request)
-}
-
-// HTTPAttributesFromHTTPStatusCode generates attributes of the http
-// namespace as specified by the OpenTelemetry specification for a
-// span.
-func HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue {
- return sc.HTTPAttributesFromHTTPStatusCode(code)
-}
-
-// SpanStatusFromHTTPStatusCode generates a status code and a message
-// as specified by the OpenTelemetry specification for a span.
-func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) {
- return internal.SpanStatusFromHTTPStatusCode(code)
-}
-
-// SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message
-// as specified by the OpenTelemetry specification for a span.
-// Exclude 4xx for SERVER to set the appropriate status.
-func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) {
- return internal.SpanStatusFromHTTPStatusCodeAndSpanKind(code, spanKind)
-}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/resource.go
deleted file mode 100644
index 4de145246..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/resource.go
+++ /dev/null
@@ -1,935 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.7.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// A cloud environment (e.g. GCP, Azure, AWS)
-const (
- // Name of the cloud provider.
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- CloudProviderKey = attribute.Key("cloud.provider")
- // The cloud account ID the resource is assigned to.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '111111111111', 'opentelemetry'
- CloudAccountIDKey = attribute.Key("cloud.account.id")
- // The geographical region the resource is running. Refer to your provider's docs
- // to see the available regions, for example [Alibaba Cloud
- // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS
- // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/),
- // [Azure regions](https://azure.microsoft.com/en-us/global-
- // infrastructure/geographies/), or [Google Cloud
- // regions](https://cloud.google.com/about/locations).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'us-central1', 'us-east-1'
- CloudRegionKey = attribute.Key("cloud.region")
- // Cloud regions often have multiple, isolated locations known as zones to
- // increase availability. Availability zone represents the zone where the resource
- // is running.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'us-east-1c'
- // Note: Availability zones are called "zones" on Alibaba Cloud and Google Cloud.
- CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
- // The cloud platform in use.
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- // Note: The prefix of the service SHOULD match the one specified in
- // `cloud.provider`.
- CloudPlatformKey = attribute.Key("cloud.platform")
-)
-
-var (
- // Alibaba Cloud
- CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
- // Amazon Web Services
- CloudProviderAWS = CloudProviderKey.String("aws")
- // Microsoft Azure
- CloudProviderAzure = CloudProviderKey.String("azure")
- // Google Cloud Platform
- CloudProviderGCP = CloudProviderKey.String("gcp")
-)
-
-var (
- // Alibaba Cloud Elastic Compute Service
- CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
- // Alibaba Cloud Function Compute
- CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
- // AWS Elastic Compute Cloud
- CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
- // AWS Elastic Container Service
- CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
- // AWS Elastic Kubernetes Service
- CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
- // AWS Lambda
- CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
- // AWS Elastic Beanstalk
- CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
- // Azure Virtual Machines
- CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
- // Azure Container Instances
- CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
- // Azure Kubernetes Service
- CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
- // Azure Functions
- CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
- // Azure App Service
- CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
- // Google Cloud Compute Engine (GCE)
- CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
- // Google Cloud Run
- CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
- // Google Cloud Kubernetes Engine (GKE)
- CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
- // Google Cloud Functions (GCF)
- CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
- // Google Cloud App Engine (GAE)
- CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
-)
-
-// Resources used by AWS Elastic Container Service (ECS).
-const (
- // The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws.
- // amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'arn:aws:ecs:us-
- // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
- AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
- // The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/develo
- // perguide/clusters.html).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
- AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
- // The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/l
- // aunch_types.html) for an ECS task.
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
- // The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/lates
- // t/developerguide/task_definitions.html).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'arn:aws:ecs:us-
- // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b'
- AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
- // The task definition family this task definition is a member of.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'opentelemetry-family'
- AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
- // The revision for this task definition.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '8', '26'
- AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
-)
-
-var (
- // ec2
- AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
- // fargate
- AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
-)
-
-// Resources used by AWS Elastic Kubernetes Service (EKS).
-const (
- // The ARN of an EKS cluster.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
- AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
-)
-
-// Resources specific to Amazon Web Services.
-const (
- // The name(s) of the AWS log group(s) an application is writing to.
- //
- // Type: string[]
- // Required: No
- // Stability: stable
- // Examples: '/aws/lambda/my-function', 'opentelemetry-service'
- // Note: Multiple log groups must be supported for cases like multi-container
- // applications, where a single application has sidecar containers, and each write
- // to their own log group.
- AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
- // The Amazon Resource Name(s) (ARN) of the AWS log group(s).
- //
- // Type: string[]
- // Required: No
- // Stability: stable
- // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
- // Note: See the [log group ARN format
- // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-
- // access-control-overview-cwl.html#CWL_ARN_Format).
- AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
- // The name(s) of the AWS log stream(s) an application is writing to.
- //
- // Type: string[]
- // Required: No
- // Stability: stable
- // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
- AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
- // The ARN(s) of the AWS log stream(s).
- //
- // Type: string[]
- // Required: No
- // Stability: stable
- // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-
- // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
- // Note: See the [log stream ARN format
- // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-
- // access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain
- // several log streams, so these ARNs necessarily identify both a log group and a
- // log stream.
- AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
-)
-
-// A container instance.
-const (
- // Container name.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'opentelemetry-autoconf'
- ContainerNameKey = attribute.Key("container.name")
- // Container ID. Usually a UUID, as for example used to [identify Docker
- // containers](https://docs.docker.com/engine/reference/run/#container-
- // identification). The UUID might be abbreviated.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'a3bf90e006b2'
- ContainerIDKey = attribute.Key("container.id")
- // The container runtime managing this container.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'docker', 'containerd', 'rkt'
- ContainerRuntimeKey = attribute.Key("container.runtime")
- // Name of the image the container was built on.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'gcr.io/opentelemetry/operator'
- ContainerImageNameKey = attribute.Key("container.image.name")
- // Container image tag.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '0.1'
- ContainerImageTagKey = attribute.Key("container.image.tag")
-)
-
-// The software deployment.
-const (
- // Name of the [deployment
- // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
- // deployment tier).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'staging', 'production'
- DeploymentEnvironmentKey = attribute.Key("deployment.environment")
-)
-
-// The device on which the process represented by this resource is running.
-const (
- // A unique identifier representing the device
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
- // Note: The device identifier MUST only be defined using the values outlined
- // below. This value is not an advertising identifier and MUST NOT be used as
- // such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor id
- // entifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-iden
- // tifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the
- // Firebase Installation ID or a globally unique UUID which is persisted across
- // sessions in your application. More information can be found
- // [here](https://developer.android.com/training/articles/user-data-ids) on best
- // practices and exact implementation details. Caution should be taken when
- // storing personal data or anything which can identify a user. GDPR and data
- // protection laws may apply, ensure you do your own due diligence.
- DeviceIDKey = attribute.Key("device.id")
- // The model identifier for the device
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'iPhone3,4', 'SM-G920F'
- // Note: It's recommended this value represents a machine readable version of the
- // model identifier rather than the market or consumer-friendly name of the
- // device.
- DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
- // The marketing name for the device model
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
- // Note: It's recommended this value represents a human readable version of the
- // device model rather than a machine readable alternative.
- DeviceModelNameKey = attribute.Key("device.model.name")
-)
-
-// A serverless instance.
-const (
- // The name of the single function that this runtime instance executes.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'my-function'
- // Note: This is the name of the function as configured/deployed on the FaaS
- // platform and is usually different from the name of the callback function (which
- // may be stored in the
- // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-
- // general.md#source-code-attributes) span attributes).
- FaaSNameKey = attribute.Key("faas.name")
- // The unique ID of the single function that this runtime instance executes.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function'
- // Note: Depending on the cloud provider, use:
-
- // * **AWS Lambda:** The function
- // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-
- // namespaces.html).
- // Take care not to use the "invoked ARN" directly but replace any
- // [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-
- // aliases.html) with the resolved function version, as the same runtime instance
- // may be invokable with multiple
- // different aliases.
- // * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full-
- // resource-names)
- // * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en-
- // us/rest/api/resources/resources/get-by-id).
-
- // On some providers, it may not be possible to determine the full ID at startup,
- // which is why this field cannot be made required. For example, on AWS the
- // account ID
- // part of the ARN is not available without calling another AWS API
- // which may be deemed too slow for a short-running lambda function.
- // As an alternative, consider setting `faas.id` as a span attribute instead.
- FaaSIDKey = attribute.Key("faas.id")
- // The immutable version of the function being executed.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '26', 'pinkfroid-00002'
- // Note: Depending on the cloud provider and platform, use:
-
- // * **AWS Lambda:** The [function
- // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-
- // versions.html)
- // (an integer represented as a decimal string).
- // * **Google Cloud Run:** The
- // [revision](https://cloud.google.com/run/docs/managing/revisions)
- // (i.e., the function name plus the revision suffix).
- // * **Google Cloud Functions:** The value of the
- // [`K_REVISION` environment
- // variable](https://cloud.google.com/functions/docs/env-
- // var#runtime_environment_variables_set_automatically).
- // * **Azure Functions:** Not applicable. Do not set this attribute.
- FaaSVersionKey = attribute.Key("faas.version")
- // The execution environment ID as a string, that will be potentially reused for
- // other invocations to the same function/function version.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
- // Note: * **AWS Lambda:** Use the (full) log stream name.
- FaaSInstanceKey = attribute.Key("faas.instance")
- // The amount of memory available to the serverless function in MiB.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 128
- // Note: It's recommended to set this attribute since e.g. too little memory can
- // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda,
- // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this
- // information.
- FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
-)
-
-// A host is defined as a general computing instance.
-const (
- // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud
- // provider.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'opentelemetry-test'
- HostIDKey = attribute.Key("host.id")
- // Name of the host. On Unix systems, it may contain what the hostname command
- // returns, or the fully qualified hostname, or another name specified by the
- // user.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'opentelemetry-test'
- HostNameKey = attribute.Key("host.name")
- // Type of host. For Cloud, this must be the machine type.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'n1-standard-1'
- HostTypeKey = attribute.Key("host.type")
- // The CPU architecture the host system is running on.
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- HostArchKey = attribute.Key("host.arch")
- // Name of the VM image or OS install the host was instantiated from.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
- HostImageNameKey = attribute.Key("host.image.name")
- // VM image ID. For Cloud, this value is from the provider.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'ami-07b06b442921831e5'
- HostImageIDKey = attribute.Key("host.image.id")
- // The version string of the VM image as defined in [Version
- // Attributes](README.md#version-attributes).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '0.1'
- HostImageVersionKey = attribute.Key("host.image.version")
-)
-
-var (
- // AMD64
- HostArchAMD64 = HostArchKey.String("amd64")
- // ARM32
- HostArchARM32 = HostArchKey.String("arm32")
- // ARM64
- HostArchARM64 = HostArchKey.String("arm64")
- // Itanium
- HostArchIA64 = HostArchKey.String("ia64")
- // 32-bit PowerPC
- HostArchPPC32 = HostArchKey.String("ppc32")
- // 64-bit PowerPC
- HostArchPPC64 = HostArchKey.String("ppc64")
- // 32-bit x86
- HostArchX86 = HostArchKey.String("x86")
-)
-
-// A Kubernetes Cluster.
-const (
- // The name of the cluster.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'opentelemetry-cluster'
- K8SClusterNameKey = attribute.Key("k8s.cluster.name")
-)
-
-// A Kubernetes Node object.
-const (
- // The name of the Node.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'node-1'
- K8SNodeNameKey = attribute.Key("k8s.node.name")
- // The UID of the Node.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
- K8SNodeUIDKey = attribute.Key("k8s.node.uid")
-)
-
-// A Kubernetes Namespace.
-const (
- // The name of the namespace that the pod is running in.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'default'
- K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
-)
-
-// A Kubernetes Pod object.
-const (
- // The UID of the Pod.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SPodUIDKey = attribute.Key("k8s.pod.uid")
- // The name of the Pod.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'opentelemetry-pod-autoconf'
- K8SPodNameKey = attribute.Key("k8s.pod.name")
-)
-
-// A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates).
-const (
- // The name of the Container in a Pod template.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'redis'
- K8SContainerNameKey = attribute.Key("k8s.container.name")
-)
-
-// A Kubernetes ReplicaSet object.
-const (
- // The UID of the ReplicaSet.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
- // The name of the ReplicaSet.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
-)
-
-// A Kubernetes Deployment object.
-const (
- // The UID of the Deployment.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
- // The name of the Deployment.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
-)
-
-// A Kubernetes StatefulSet object.
-const (
- // The UID of the StatefulSet.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
- // The name of the StatefulSet.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
-)
-
-// A Kubernetes DaemonSet object.
-const (
- // The UID of the DaemonSet.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
- // The name of the DaemonSet.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
-)
-
-// A Kubernetes Job object.
-const (
- // The UID of the Job.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SJobUIDKey = attribute.Key("k8s.job.uid")
- // The name of the Job.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SJobNameKey = attribute.Key("k8s.job.name")
-)
-
-// A Kubernetes CronJob object.
-const (
- // The UID of the CronJob.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
- // The name of the CronJob.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
-)
-
-// The operating system (OS) on which the process represented by this resource is running.
-const (
- // The operating system type.
- //
- // Type: Enum
- // Required: Always
- // Stability: stable
- OSTypeKey = attribute.Key("os.type")
- // Human readable (not intended to be parsed) OS version information, like e.g.
- // reported by `ver` or `lsb_release -a` commands.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS'
- OSDescriptionKey = attribute.Key("os.description")
- // Human readable operating system name.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'iOS', 'Android', 'Ubuntu'
- OSNameKey = attribute.Key("os.name")
- // The version string of the operating system as defined in [Version
- // Attributes](../../resource/semantic_conventions/README.md#version-attributes).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '14.2.1', '18.04.1'
- OSVersionKey = attribute.Key("os.version")
-)
-
-var (
- // Microsoft Windows
- OSTypeWindows = OSTypeKey.String("windows")
- // Linux
- OSTypeLinux = OSTypeKey.String("linux")
- // Apple Darwin
- OSTypeDarwin = OSTypeKey.String("darwin")
- // FreeBSD
- OSTypeFreeBSD = OSTypeKey.String("freebsd")
- // NetBSD
- OSTypeNetBSD = OSTypeKey.String("netbsd")
- // OpenBSD
- OSTypeOpenBSD = OSTypeKey.String("openbsd")
- // DragonFly BSD
- OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
- // HP-UX (Hewlett Packard Unix)
- OSTypeHPUX = OSTypeKey.String("hpux")
- // AIX (Advanced Interactive eXecutive)
- OSTypeAIX = OSTypeKey.String("aix")
- // Oracle Solaris
- OSTypeSolaris = OSTypeKey.String("solaris")
- // IBM z/OS
- OSTypeZOS = OSTypeKey.String("z_os")
-)
-
-// An operating system process.
-const (
- // Process identifier (PID).
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 1234
- ProcessPIDKey = attribute.Key("process.pid")
- // The name of the process executable. On Linux based systems, can be set to the
- // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of
- // `GetProcessImageFileNameW`.
- //
- // Type: string
- // Required: See below
- // Stability: stable
- // Examples: 'otelcol'
- ProcessExecutableNameKey = attribute.Key("process.executable.name")
- // The full path to the process executable. On Linux based systems, can be set to
- // the target of `proc/[pid]/exe`. On Windows, can be set to the result of
- // `GetProcessImageFileNameW`.
- //
- // Type: string
- // Required: See below
- // Stability: stable
- // Examples: '/usr/bin/cmd/otelcol'
- ProcessExecutablePathKey = attribute.Key("process.executable.path")
- // The command used to launch the process (i.e. the command name). On Linux based
- // systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows,
- // can be set to the first parameter extracted from `GetCommandLineW`.
- //
- // Type: string
- // Required: See below
- // Stability: stable
- // Examples: 'cmd/otelcol'
- ProcessCommandKey = attribute.Key("process.command")
- // The full command used to launch the process as a single string representing the
- // full command. On Windows, can be set to the result of `GetCommandLineW`. Do not
- // set this if you have to assemble it just for monitoring; use
- // `process.command_args` instead.
- //
- // Type: string
- // Required: See below
- // Stability: stable
- // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
- ProcessCommandLineKey = attribute.Key("process.command_line")
- // All the command arguments (including the command/executable itself) as received
- // by the process. On Linux-based systems (and some other Unixoid systems
- // supporting procfs), can be set according to the list of null-delimited strings
- // extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be
- // the full argv vector passed to `main`.
- //
- // Type: string[]
- // Required: See below
- // Stability: stable
- // Examples: 'cmd/otecol', '--config=config.yaml'
- ProcessCommandArgsKey = attribute.Key("process.command_args")
- // The username of the user that owns the process.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'root'
- ProcessOwnerKey = attribute.Key("process.owner")
-)
-
-// The single (language) runtime instance which is monitored.
-const (
- // The name of the runtime of this process. For compiled native binaries, this
- // SHOULD be the name of the compiler.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'OpenJDK Runtime Environment'
- ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
- // The version of the runtime of this process, as returned by the runtime without
- // modification.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '14.0.2'
- ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
- // An additional description about the runtime of the process, for example a
- // specific vendor customization of the runtime environment.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
- ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
-)
-
-// A service instance.
-const (
- // Logical name of the service.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'shoppingcart'
- // Note: MUST be the same for all instances of horizontally scaled services. If
- // the value was not specified, SDKs MUST fallback to `unknown_service:`
- // concatenated with [`process.executable.name`](process.md#process), e.g.
- // `unknown_service:bash`. If `process.executable.name` is not available, the
- // value MUST be set to `unknown_service`.
- ServiceNameKey = attribute.Key("service.name")
- // A namespace for `service.name`.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'Shop'
- // Note: A string value having a meaning that helps to distinguish a group of
- // services, for example the team name that owns a group of services.
- // `service.name` is expected to be unique within the same namespace. If
- // `service.namespace` is not specified in the Resource then `service.name` is
- // expected to be unique for all services that have no explicit namespace defined
- // (so the empty/unspecified namespace is simply one more valid namespace). Zero-
- // length namespace string is assumed equal to unspecified namespace.
- ServiceNamespaceKey = attribute.Key("service.namespace")
- // The string ID of the service instance.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '627cc493-f310-47de-96bd-71410b7dec09'
- // Note: MUST be unique for each instance of the same
- // `service.namespace,service.name` pair (in other words
- // `service.namespace,service.name,service.instance.id` triplet MUST be globally
- // unique). The ID helps to distinguish instances of the same service that exist
- // at the same time (e.g. instances of a horizontally scaled service). It is
- // preferable for the ID to be persistent and stay the same for the lifetime of
- // the service instance, however it is acceptable that the ID is ephemeral and
- // changes during important lifetime events for the service (e.g. service
- // restarts). If the service has no inherent unique ID that can be used as the
- // value of this attribute it is recommended to generate a random Version 1 or
- // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use
- // Version 5, see RFC 4122 for more recommendations).
- ServiceInstanceIDKey = attribute.Key("service.instance.id")
- // The version string of the service API or implementation.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '2.0.0'
- ServiceVersionKey = attribute.Key("service.version")
-)
-
-// The telemetry SDK used to capture data recorded by the instrumentation libraries.
-const (
- // The name of the telemetry SDK as defined above.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'opentelemetry'
- TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
- // The language of the telemetry SDK.
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
- // The version string of the telemetry SDK.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '1.2.3'
- TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
- // The version string of the auto instrumentation agent, if used.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '1.2.3'
- TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version")
-)
-
-var (
- // cpp
- TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
- // dotnet
- TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
- // erlang
- TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
- // go
- TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
- // java
- TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
- // nodejs
- TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
- // php
- TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
- // python
- TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
- // ruby
- TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
- // webjs
- TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
-)
-
-// Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime.
-const (
- // The name of the web engine.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'WildFly'
- WebEngineNameKey = attribute.Key("webengine.name")
- // The version of the web engine.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '21.0.0'
- WebEngineVersionKey = attribute.Key("webengine.version")
- // Additional description of the web engine (e.g. detailed version and edition
- // information).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final'
- WebEngineDescriptionKey = attribute.Key("webengine.description")
-)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/schema.go
deleted file mode 100644
index c21e6ae1a..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/schema.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.7.0"
-
-// SchemaURL is the schema URL that matches the version of the semantic conventions
-// that this package defines. Semconv packages starting from v1.4.0 must declare
-// non-empty schema URL in the form https://opentelemetry.io/schemas/<version>
-const SchemaURL = "https://opentelemetry.io/schemas/1.7.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/trace.go
deleted file mode 100644
index 3c5a4df9f..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/trace.go
+++ /dev/null
@@ -1,1547 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.7.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// Span attributes used by AWS Lambda (in addition to general `faas` attributes).
-const (
- // The full invoked ARN as provided on the `Context` passed to the function
- // (`Lambda-Runtime-Invoked-Function-ARN` header on the `/runtime/invocation/next`
- // applicable).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
- // Note: This may be different from `faas.id` if an alias is involved.
- AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
-)
-
-// This document defines the attributes used to perform database client calls.
-const (
- // An identifier for the database management system (DBMS) product being used. See
- // below for a list of well-known identifiers.
- //
- // Type: Enum
- // Required: Always
- // Stability: stable
- DBSystemKey = attribute.Key("db.system")
- // The connection string used to connect to the database. It is recommended to
- // remove embedded credentials.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;'
- DBConnectionStringKey = attribute.Key("db.connection_string")
- // Username for accessing the database.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'readonly_user', 'reporting_user'
- DBUserKey = attribute.Key("db.user")
- // The fully-qualified class name of the [Java Database Connectivity
- // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver
- // used to connect.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'org.postgresql.Driver',
- // 'com.microsoft.sqlserver.jdbc.SQLServerDriver'
- DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname")
- // If no [tech-specific attribute](#call-level-attributes-for-specific-
- // technologies) is defined, this attribute is used to report the name of the
- // database being accessed. For commands that switch the database, this should be
- // set to the target database (even if the command fails).
- //
- // Type: string
- // Required: Required, if applicable and no more-specific attribute is defined.
- // Stability: stable
- // Examples: 'customers', 'main'
- // Note: In some SQL databases, the database name to be used is called "schema
- // name".
- DBNameKey = attribute.Key("db.name")
- // The database statement being executed.
- //
- // Type: string
- // Required: Required if applicable and not explicitly disabled via
- // instrumentation configuration.
- // Stability: stable
- // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"'
- // Note: The value may be sanitized to exclude sensitive information.
- DBStatementKey = attribute.Key("db.statement")
- // The name of the operation being executed, e.g. the [MongoDB command
- // name](https://docs.mongodb.com/manual/reference/command/#database-operations)
- // such as `findAndModify`, or the SQL keyword.
- //
- // Type: string
- // Required: Required, if `db.statement` is not applicable.
- // Stability: stable
- // Examples: 'findAndModify', 'HMSET', 'SELECT'
- // Note: When setting this to an SQL keyword, it is not recommended to attempt any
- // client-side parsing of `db.statement` just to get this property, but it should
- // be set if the operation name is provided by the library being instrumented. If
- // the SQL statement has an ambiguous operation, or performs more than one
- // operation, this value may be omitted.
- DBOperationKey = attribute.Key("db.operation")
-)
-
-var (
- // Some other SQL database. Fallback only. See notes
- DBSystemOtherSQL = DBSystemKey.String("other_sql")
- // Microsoft SQL Server
- DBSystemMSSQL = DBSystemKey.String("mssql")
- // MySQL
- DBSystemMySQL = DBSystemKey.String("mysql")
- // Oracle Database
- DBSystemOracle = DBSystemKey.String("oracle")
- // IBM DB2
- DBSystemDB2 = DBSystemKey.String("db2")
- // PostgreSQL
- DBSystemPostgreSQL = DBSystemKey.String("postgresql")
- // Amazon Redshift
- DBSystemRedshift = DBSystemKey.String("redshift")
- // Apache Hive
- DBSystemHive = DBSystemKey.String("hive")
- // Cloudscape
- DBSystemCloudscape = DBSystemKey.String("cloudscape")
- // HyperSQL DataBase
- DBSystemHSQLDB = DBSystemKey.String("hsqldb")
- // Progress Database
- DBSystemProgress = DBSystemKey.String("progress")
- // SAP MaxDB
- DBSystemMaxDB = DBSystemKey.String("maxdb")
- // SAP HANA
- DBSystemHanaDB = DBSystemKey.String("hanadb")
- // Ingres
- DBSystemIngres = DBSystemKey.String("ingres")
- // FirstSQL
- DBSystemFirstSQL = DBSystemKey.String("firstsql")
- // EnterpriseDB
- DBSystemEDB = DBSystemKey.String("edb")
- // InterSystems Caché
- DBSystemCache = DBSystemKey.String("cache")
- // Adabas (Adaptable Database System)
- DBSystemAdabas = DBSystemKey.String("adabas")
- // Firebird
- DBSystemFirebird = DBSystemKey.String("firebird")
- // Apache Derby
- DBSystemDerby = DBSystemKey.String("derby")
- // FileMaker
- DBSystemFilemaker = DBSystemKey.String("filemaker")
- // Informix
- DBSystemInformix = DBSystemKey.String("informix")
- // InstantDB
- DBSystemInstantDB = DBSystemKey.String("instantdb")
- // InterBase
- DBSystemInterbase = DBSystemKey.String("interbase")
- // MariaDB
- DBSystemMariaDB = DBSystemKey.String("mariadb")
- // Netezza
- DBSystemNetezza = DBSystemKey.String("netezza")
- // Pervasive PSQL
- DBSystemPervasive = DBSystemKey.String("pervasive")
- // PointBase
- DBSystemPointbase = DBSystemKey.String("pointbase")
- // SQLite
- DBSystemSqlite = DBSystemKey.String("sqlite")
- // Sybase
- DBSystemSybase = DBSystemKey.String("sybase")
- // Teradata
- DBSystemTeradata = DBSystemKey.String("teradata")
- // Vertica
- DBSystemVertica = DBSystemKey.String("vertica")
- // H2
- DBSystemH2 = DBSystemKey.String("h2")
- // ColdFusion IMQ
- DBSystemColdfusion = DBSystemKey.String("coldfusion")
- // Apache Cassandra
- DBSystemCassandra = DBSystemKey.String("cassandra")
- // Apache HBase
- DBSystemHBase = DBSystemKey.String("hbase")
- // MongoDB
- DBSystemMongoDB = DBSystemKey.String("mongodb")
- // Redis
- DBSystemRedis = DBSystemKey.String("redis")
- // Couchbase
- DBSystemCouchbase = DBSystemKey.String("couchbase")
- // CouchDB
- DBSystemCouchDB = DBSystemKey.String("couchdb")
- // Microsoft Azure Cosmos DB
- DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
- // Amazon DynamoDB
- DBSystemDynamoDB = DBSystemKey.String("dynamodb")
- // Neo4j
- DBSystemNeo4j = DBSystemKey.String("neo4j")
- // Apache Geode
- DBSystemGeode = DBSystemKey.String("geode")
- // Elasticsearch
- DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
- // Memcached
- DBSystemMemcached = DBSystemKey.String("memcached")
- // CockroachDB
- DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
-)
-
-// Connection-level attributes for Microsoft SQL Server
-const (
- // The Microsoft SQL Server [instance name](https://docs.microsoft.com/en-
- // us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
- // connecting to. This name is used to determine the port of a named instance.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'MSSQLSERVER'
- // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no longer
- // required (but still recommended if non-standard).
- DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name")
-)
-
-// Call-level attributes for Cassandra
-const (
- // The name of the keyspace being accessed. To be used instead of the generic
- // `db.name` attribute.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'mykeyspace'
- DBCassandraKeyspaceKey = attribute.Key("db.cassandra.keyspace")
- // The fetch size used for paging, i.e. how many rows will be returned at once.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 5000
- DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
- // The consistency level of the query. Based on consistency values from
- // [CQL](https://docs.datastax.com/en/cassandra-
- // oss/3.0/cassandra/dml/dmlConfigConsistency.html).
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
- // The name of the primary table that the operation is acting upon, including the
- // schema name (if applicable).
- //
- // Type: string
- // Required: Recommended if available.
- // Stability: stable
- // Examples: 'mytable'
- // Note: This mirrors the db.sql.table attribute but references cassandra rather
- // than sql. It is not recommended to attempt any client-side parsing of
- // `db.statement` just to get this property, but it should be set if it is
- // provided by the library being instrumented. If the operation is acting upon an
- // anonymous table, or more than one table, this value MUST NOT be set.
- DBCassandraTableKey = attribute.Key("db.cassandra.table")
- // Whether or not the query is idempotent.
- //
- // Type: boolean
- // Required: No
- // Stability: stable
- DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
- // The number of times a query was speculatively executed. Not set or `0` if the
- // query was not executed speculatively.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 0, 2
- DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
- // The ID of the coordinating node for a query.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
- DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
- // The data center of the coordinating node for a query.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'us-west-2'
- DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
-)
-
-var (
- // all
- DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
- // each_quorum
- DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
- // quorum
- DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
- // local_quorum
- DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
- // one
- DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
- // two
- DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
- // three
- DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
- // local_one
- DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
- // any
- DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
- // serial
- DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
- // local_serial
- DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
-)
-
-// Call-level attributes for Apache HBase
-const (
- // The [HBase namespace](https://hbase.apache.org/book.html#_namespace) being
- // accessed. To be used instead of the generic `db.name` attribute.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'default'
- DBHBaseNamespaceKey = attribute.Key("db.hbase.namespace")
-)
-
-// Call-level attributes for Redis
-const (
- // The index of the database being accessed as used in the [`SELECT`
- // command](https://redis.io/commands/select), provided as an integer. To be used
- // instead of the generic `db.name` attribute.
- //
- // Type: int
- // Required: Required, if other than the default database (`0`).
- // Stability: stable
- // Examples: 0, 1, 15
- DBRedisDBIndexKey = attribute.Key("db.redis.database_index")
-)
-
-// Call-level attributes for MongoDB
-const (
- // The collection being accessed within the database stated in `db.name`.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'customers', 'products'
- DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection")
-)
-
-// Call-level attrbiutes for SQL databases
-const (
- // The name of the primary table that the operation is acting upon, including the
- // schema name (if applicable).
- //
- // Type: string
- // Required: Recommended if available.
- // Stability: stable
- // Examples: 'public.users', 'customers'
- // Note: It is not recommended to attempt any client-side parsing of
- // `db.statement` just to get this property, but it should be set if it is
- // provided by the library being instrumented. If the operation is acting upon an
- // anonymous table, or more than one table, this value MUST NOT be set.
- DBSQLTableKey = attribute.Key("db.sql.table")
-)
-
-// This document defines the attributes used to report a single exception associated with a span.
-const (
- // The type of the exception (its fully-qualified class name, if applicable). The
- // dynamic type of the exception should be preferred over the static type in
- // languages that support it.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'java.net.ConnectException', 'OSError'
- ExceptionTypeKey = attribute.Key("exception.type")
- // The exception message.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly"
- ExceptionMessageKey = attribute.Key("exception.message")
- // A stacktrace as a string in the natural representation for the language
- // runtime. The representation is to be determined and documented by each language
- // SIG.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
- // exception\\n at '
- // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
- // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
- // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
- ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
- // SHOULD be set to true if the exception event is recorded at a point where it is
- // known that the exception is escaping the scope of the span.
- //
- // Type: boolean
- // Required: No
- // Stability: stable
- // Note: An exception is considered to have escaped (or left) the scope of a span,
- // if that span is ended while the exception is still logically "in flight".
- // This may be actually "in flight" in some languages (e.g. if the exception
- // is passed to a Context manager's `__exit__` method in Python) but will
- // usually be caught at the point of recording the exception in most languages.
-
- // It is usually not possible to determine at the point where an exception is
- // thrown
- // whether it will escape the scope of a span.
- // However, it is trivial to know that an exception
- // will escape, if one checks for an active exception just before ending the span,
- // as done in the [example above](#exception-end-example).
-
- // It follows that an exception may still escape the scope of the span
- // even if the `exception.escaped` attribute was not set or set to false,
- // since the event might have been recorded at a time where it was not
- // clear whether the exception will escape.
- ExceptionEscapedKey = attribute.Key("exception.escaped")
-)
-
-// This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans.
-const (
- // Type of the trigger on which the function is executed.
- //
- // Type: Enum
- // Required: On FaaS instances, faas.trigger MUST be set on incoming invocations.
- // Clients invoking FaaS instances MUST set `faas.trigger` on outgoing
- // invocations, if it is known to the client. This is, for example, not the case,
- // when the transport layer is abstracted in a FaaS client framework without
- // access to its configuration.
- // Stability: stable
- FaaSTriggerKey = attribute.Key("faas.trigger")
- // The execution ID of the current function execution.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
- FaaSExecutionKey = attribute.Key("faas.execution")
-)
-
-var (
- // A response to some data source operation such as a database or filesystem read/write
- FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
- // To provide an answer to an inbound HTTP request
- FaaSTriggerHTTP = FaaSTriggerKey.String("http")
- // A function is set to be executed when messages are sent to a messaging system
- FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
- // A function is scheduled to be executed regularly
- FaaSTriggerTimer = FaaSTriggerKey.String("timer")
- // If none of the others apply
- FaaSTriggerOther = FaaSTriggerKey.String("other")
-)
-
-// Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write.
-const (
- // The name of the source on which the triggering operation was performed. For
- // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos
- // DB to the database name.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'myBucketName', 'myDBName'
- FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
- // Describes the type of the operation that was performed on the data.
- //
- // Type: Enum
- // Required: Always
- // Stability: stable
- FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
- // A string containing the time when the data was accessed in the [ISO
- // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed
- // in [UTC](https://www.w3.org/TR/NOTE-datetime).
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: '2020-01-23T13:47:06Z'
- FaaSDocumentTimeKey = attribute.Key("faas.document.time")
- // The document name/table subjected to the operation. For example, in Cloud
- // Storage or S3 is the name of the file, and in Cosmos DB the table name.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'myFile.txt', 'myTableName'
- FaaSDocumentNameKey = attribute.Key("faas.document.name")
-)
-
-var (
- // When a new object is created
- FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
- // When an object is modified
- FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
- // When an object is deleted
- FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
-)
-
-// Semantic Convention for FaaS scheduled to be executed regularly.
-const (
- // A string containing the function invocation time in the [ISO
- // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed
- // in [UTC](https://www.w3.org/TR/NOTE-datetime).
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: '2020-01-23T13:47:06Z'
- FaaSTimeKey = attribute.Key("faas.time")
- // A string containing the schedule period as [Cron Expression](https://docs.oracl
- // e.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '0/5 * * * ? *'
- FaaSCronKey = attribute.Key("faas.cron")
-)
-
-// Contains additional attributes for incoming FaaS spans.
-const (
- // A boolean that is true if the serverless function is executed for the first
- // time (aka cold-start).
- //
- // Type: boolean
- // Required: No
- // Stability: stable
- FaaSColdstartKey = attribute.Key("faas.coldstart")
-)
-
-// Contains additional attributes for outgoing FaaS spans.
-const (
- // The name of the invoked function.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'my-function'
- // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked
- // function.
- FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
- // The cloud provider of the invoked function.
- //
- // Type: Enum
- // Required: Always
- // Stability: stable
- // Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked
- // function.
- FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
- // The cloud region of the invoked function.
- //
- // Type: string
- // Required: For some cloud providers, like AWS or GCP, the region in which a
- // function is hosted is essential to uniquely identify the function and also part
- // of its endpoint. Since it's part of the endpoint being called, the region is
- // always known to clients. In these cases, `faas.invoked_region` MUST be set
- // accordingly. If the region is unknown to the client or not required for
- // identifying the invoked function, setting `faas.invoked_region` is optional.
- // Stability: stable
- // Examples: 'eu-central-1'
- // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked
- // function.
- FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
-)
-
-var (
- // Alibaba Cloud
- FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
- // Amazon Web Services
- FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
- // Microsoft Azure
- FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
- // Google Cloud Platform
- FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
-)
-
-// These attributes may be used for any network related operation.
-const (
- // Transport protocol used. See note below.
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- NetTransportKey = attribute.Key("net.transport")
- // Remote address of the peer (dotted decimal for IPv4 or
- // [RFC5952](https://tools.ietf.org/html/rfc5952) for IPv6)
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '127.0.0.1'
- NetPeerIPKey = attribute.Key("net.peer.ip")
- // Remote port number.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 80, 8080, 443
- NetPeerPortKey = attribute.Key("net.peer.port")
- // Remote hostname or similar, see note below.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'example.com'
- NetPeerNameKey = attribute.Key("net.peer.name")
- // Like `net.peer.ip` but for the host IP. Useful in case of a multi-IP host.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '192.168.0.1'
- NetHostIPKey = attribute.Key("net.host.ip")
- // Like `net.peer.port` but for the host port.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 35555
- NetHostPortKey = attribute.Key("net.host.port")
- // Local hostname or similar, see note below.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'localhost'
- NetHostNameKey = attribute.Key("net.host.name")
- // The internet connection type currently being used by the host.
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- // Examples: 'wifi'
- NetHostConnectionTypeKey = attribute.Key("net.host.connection.type")
- // This describes more details regarding the connection.type. It may be the type
- // of cell technology connection, but it could be used for describing details
- // about a wifi connection.
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- // Examples: 'LTE'
- NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype")
- // The name of the mobile carrier.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'sprint'
- NetHostCarrierNameKey = attribute.Key("net.host.carrier.name")
- // The mobile carrier country code.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '310'
- NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc")
- // The mobile carrier network code.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '001'
- NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc")
- // The ISO 3166-1 alpha-2 2-character country code associated with the mobile
- // carrier network.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'DE'
- NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc")
-)
-
-var (
- // ip_tcp
- NetTransportTCP = NetTransportKey.String("ip_tcp")
- // ip_udp
- NetTransportUDP = NetTransportKey.String("ip_udp")
- // Another IP-based protocol
- NetTransportIP = NetTransportKey.String("ip")
- // Unix Domain socket. See below
- NetTransportUnix = NetTransportKey.String("unix")
- // Named or anonymous pipe. See note below
- NetTransportPipe = NetTransportKey.String("pipe")
- // In-process communication
- NetTransportInProc = NetTransportKey.String("inproc")
- // Something else (non IP-based)
- NetTransportOther = NetTransportKey.String("other")
-)
-
-var (
- // wifi
- NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi")
- // wired
- NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired")
- // cell
- NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell")
- // unavailable
- NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable")
- // unknown
- NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown")
-)
-
-var (
- // GPRS
- NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs")
- // EDGE
- NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge")
- // UMTS
- NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts")
- // CDMA
- NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma")
- // EVDO Rel. 0
- NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0")
- // EVDO Rev. A
- NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a")
- // CDMA2000 1XRTT
- NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt")
- // HSDPA
- NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa")
- // HSUPA
- NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa")
- // HSPA
- NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa")
- // IDEN
- NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden")
- // EVDO Rev. B
- NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b")
- // LTE
- NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte")
- // EHRPD
- NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd")
- // HSPAP
- NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap")
- // GSM
- NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm")
- // TD-SCDMA
- NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma")
- // IWLAN
- NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan")
- // 5G NR (New Radio)
- NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr")
- // 5G NRNSA (New Radio Non-Standalone)
- NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa")
- // LTE CA
- NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca")
-)
-
-// Operations that access some remote service.
-const (
- // The [`service.name`](../../resource/semantic_conventions/README.md#service) of
- // the remote service. SHOULD be equal to the actual `service.name` resource
- // attribute of the remote service if any.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'AuthTokenCache'
- PeerServiceKey = attribute.Key("peer.service")
-)
-
-// These attributes may be used for any operation with an authenticated and/or authorized enduser.
-const (
- // Username or client_id extracted from the access token or
- // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the
- // inbound request from outside the system.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'username'
- EnduserIDKey = attribute.Key("enduser.id")
- // Actual/assumed role the client is making the request under extracted from token
- // or application security context.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'admin'
- EnduserRoleKey = attribute.Key("enduser.role")
- // Scopes or granted authorities the client currently possesses extracted from
- // token or application security context. The value would come from the scope
- // associated with an [OAuth 2.0 Access
- // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value
- // in a [SAML 2.0 Assertion](http://docs.oasis-
- // open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'read:message, write:files'
- EnduserScopeKey = attribute.Key("enduser.scope")
-)
-
-// These attributes may be used for any operation to store information about a thread that started a span.
-const (
- // Current "managed" thread ID (as opposed to OS thread ID).
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 42
- ThreadIDKey = attribute.Key("thread.id")
- // Current thread name.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'main'
- ThreadNameKey = attribute.Key("thread.name")
-)
-
-// These attributes allow to report this unit of code and therefore to provide more context about the span.
-const (
- // The method or function name, or equivalent (usually rightmost part of the code
- // unit's name).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'serveRequest'
- CodeFunctionKey = attribute.Key("code.function")
- // The "namespace" within which `code.function` is defined. Usually the qualified
- // class or module name, such that `code.namespace` + some separator +
- // `code.function` form a unique identifier for the code unit.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'com.example.MyHTTPService'
- CodeNamespaceKey = attribute.Key("code.namespace")
- // The source code file name that identifies the code unit as uniquely as possible
- // (preferably an absolute file path).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '/usr/local/MyApplication/content_root/app/index.php'
- CodeFilepathKey = attribute.Key("code.filepath")
- // The line number in `code.filepath` best representing the operation. It SHOULD
- // point within the code unit named in `code.function`.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 42
- CodeLineNumberKey = attribute.Key("code.lineno")
-)
-
-// This document defines semantic conventions for HTTP client and server Spans.
-const (
- // HTTP request method.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'GET', 'POST', 'HEAD'
- HTTPMethodKey = attribute.Key("http.method")
- // Full HTTP request URL in the form `scheme://host[:port]/path?query[#fragment]`.
- // Usually the fragment is not transmitted over HTTP, but if it is known, it
- // should be included nevertheless.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv'
- // Note: `http.url` MUST NOT contain credentials passed via URL in form of
- // `https://username:password@www.example.com/`. In such case the attribute's
- // value should be `https://www.example.com/`.
- HTTPURLKey = attribute.Key("http.url")
- // The full request target as passed in a HTTP request line or equivalent.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '/path/12314/?q=ddds#123'
- HTTPTargetKey = attribute.Key("http.target")
- // The value of the [HTTP host
- // header](https://tools.ietf.org/html/rfc7230#section-5.4). An empty Host header
- // should also be reported, see note.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'www.example.org'
- // Note: When the header is present but empty the attribute SHOULD be set to the
- // empty string. Note that this is a valid situation that is expected in certain
- // cases, according the aforementioned [section of RFC
- // 7230](https://tools.ietf.org/html/rfc7230#section-5.4). When the header is not
- // set the attribute MUST NOT be set.
- HTTPHostKey = attribute.Key("http.host")
- // The URI scheme identifying the used protocol.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'http', 'https'
- HTTPSchemeKey = attribute.Key("http.scheme")
- // [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6).
- //
- // Type: int
- // Required: If and only if one was received/sent.
- // Stability: stable
- // Examples: 200
- HTTPStatusCodeKey = attribute.Key("http.status_code")
- // Kind of HTTP protocol used.
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- // Note: If `net.transport` is not specified, it can be assumed to be `IP.TCP`
- // except if `http.flavor` is `QUIC`, in which case `IP.UDP` is assumed.
- HTTPFlavorKey = attribute.Key("http.flavor")
- // Value of the [HTTP User-
- // Agent](https://tools.ietf.org/html/rfc7231#section-5.5.3) header sent by the
- // client.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'CERN-LineMode/2.15 libwww/2.17b3'
- HTTPUserAgentKey = attribute.Key("http.user_agent")
- // The size of the request payload body in bytes. This is the number of bytes
- // transferred excluding headers and is often, but not always, present as the
- // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For
- // requests using transport encoding, this should be the compressed size.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 3495
- HTTPRequestContentLengthKey = attribute.Key("http.request_content_length")
- // The size of the uncompressed request payload body after transport decoding. Not
- // set if transport encoding not used.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 5493
- HTTPRequestContentLengthUncompressedKey = attribute.Key("http.request_content_length_uncompressed")
- // The size of the response payload body in bytes. This is the number of bytes
- // transferred excluding headers and is often, but not always, present as the
- // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For
- // requests using transport encoding, this should be the compressed size.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 3495
- HTTPResponseContentLengthKey = attribute.Key("http.response_content_length")
- // The size of the uncompressed response payload body after transport decoding.
- // Not set if transport encoding not used.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 5493
- HTTPResponseContentLengthUncompressedKey = attribute.Key("http.response_content_length_uncompressed")
-)
-
-var (
- // HTTP 1.0
- HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0")
- // HTTP 1.1
- HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1")
- // HTTP 2
- HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0")
- // SPDY protocol
- HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY")
- // QUIC protocol
- HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC")
-)
-
-// Semantic Convention for HTTP Server
-const (
- // The primary server name of the matched virtual host. This should be obtained
- // via configuration. If no such configuration can be obtained, this attribute
- // MUST NOT be set ( `net.host.name` should be used instead).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'example.com'
- // Note: `http.url` is usually not readily available on the server side but would
- // have to be assembled in a cumbersome and sometimes lossy process from other
- // information (see e.g. open-telemetry/opentelemetry-python/pull/148). It is thus
- // preferred to supply the raw data that is available.
- HTTPServerNameKey = attribute.Key("http.server_name")
- // The matched route (path template).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '/users/:userID?'
- HTTPRouteKey = attribute.Key("http.route")
- // The IP address of the original client behind all proxies, if known (e.g. from
- // [X-Forwarded-For](https://developer.mozilla.org/en-
- // US/docs/Web/HTTP/Headers/X-Forwarded-For)).
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '83.164.160.102'
- // Note: This is not necessarily the same as `net.peer.ip`, which would
- // identify the network-level peer, which may be a proxy.
-
- // This attribute should be set when a source of information different
- // from the one used for `net.peer.ip`, is available even if that other
- // source just confirms the same value as `net.peer.ip`.
- // Rationale: For `net.peer.ip`, one typically does not know if it
- // comes from a proxy, reverse proxy, or the actual client. Setting
- // `http.client_ip` when it's the same as `net.peer.ip` means that
- // one is at least somewhat confident that the address is not that of
- // the closest proxy.
- HTTPClientIPKey = attribute.Key("http.client_ip")
-)
-
-// Attributes that exist for multiple DynamoDB request types.
-const (
- // The keys in the `RequestItems` object field.
- //
- // Type: string[]
- // Required: No
- // Stability: stable
- // Examples: 'Users', 'Cats'
- AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
- // The JSON-serialized value of each item in the `ConsumedCapacity` response
- // field.
- //
- // Type: string[]
- // Required: No
- // Stability: stable
- // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : {
- // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits":
- // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number,
- // "ReadCapacityUnits": number, "WriteCapacityUnits": number } },
- // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number,
- // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName":
- // "string", "WriteCapacityUnits": number }'
- AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
- // The JSON-serialized value of the `ItemCollectionMetrics` response field.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob,
- // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" :
- // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S":
- // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }'
- AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
- // The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter.
- //
- // Type: double
- // Required: No
- // Stability: stable
- // Examples: 1.0, 2.0
- AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
- // The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter.
- //
- // Type: double
- // Required: No
- // Stability: stable
- // Examples: 1.0, 2.0
- AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
- // The value of the `ConsistentRead` request parameter.
- //
- // Type: boolean
- // Required: No
- // Stability: stable
- AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
- // The value of the `ProjectionExpression` request parameter.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems,
- // ProductReviews'
- AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
- // The value of the `Limit` request parameter.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 10
- AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
- // The value of the `AttributesToGet` request parameter.
- //
- // Type: string[]
- // Required: No
- // Stability: stable
- // Examples: 'lives', 'id'
- AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
- // The value of the `IndexName` request parameter.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'name_to_group'
- AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
- // The value of the `Select` request parameter.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'ALL_ATTRIBUTES', 'COUNT'
- AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
-)
-
-// DynamoDB.CreateTable
-const (
- // The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request
- // field
- //
- // Type: string[]
- // Required: No
- // Stability: stable
- // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string",
- // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ],
- // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits":
- // number, "WriteCapacityUnits": number } }'
- AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
- // The JSON-serialized value of each item of the `LocalSecondaryIndexes` request
- // field.
- //
- // Type: string[]
- // Required: No
- // Stability: stable
- // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes":
- // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string",
- // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ],
- // "ProjectionType": "string" } }'
- AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
-)
-
-// DynamoDB.ListTables
-const (
- // The value of the `ExclusiveStartTableName` request parameter.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'Users', 'CatsTable'
- AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
- // The number of items in the `TableNames` response parameter.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 20
- AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
-)
-
-// DynamoDB.Query
-const (
- // The value of the `ScanIndexForward` request parameter.
- //
- // Type: boolean
- // Required: No
- // Stability: stable
- AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
-)
-
-// DynamoDB.Scan
-const (
- // The value of the `Segment` request parameter.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 10
- AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
- // The value of the `TotalSegments` request parameter.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 100
- AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
- // The value of the `Count` response parameter.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 10
- AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
- // The value of the `ScannedCount` response parameter.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 50
- AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
-)
-
-// DynamoDB.UpdateTable
-const (
- // The JSON-serialized value of each item in the `AttributeDefinitions` request
- // field.
- //
- // Type: string[]
- // Required: No
- // Stability: stable
- // Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
- AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
- // The JSON-serialized value of each item in the `GlobalSecondaryIndexUpdates`
- // request field.
- //
- // Type: string[]
- // Required: No
- // Stability: stable
- // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
- // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
- // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
- // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits":
- // number } }'
- AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
-)
-
-// This document defines the attributes used in messaging systems.
-const (
- // A string identifying the messaging system.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'kafka', 'rabbitmq', 'activemq', 'AmazonSQS'
- MessagingSystemKey = attribute.Key("messaging.system")
- // The message destination name. This might be equal to the span name but is
- // required nevertheless.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'MyQueue', 'MyTopic'
- MessagingDestinationKey = attribute.Key("messaging.destination")
- // The kind of message destination
- //
- // Type: Enum
- // Required: Required only if the message destination is either a `queue` or
- // `topic`.
- // Stability: stable
- MessagingDestinationKindKey = attribute.Key("messaging.destination_kind")
- // A boolean that is true if the message destination is temporary.
- //
- // Type: boolean
- // Required: If missing, it is assumed to be false.
- // Stability: stable
- MessagingTempDestinationKey = attribute.Key("messaging.temp_destination")
- // The name of the transport protocol.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'AMQP', 'MQTT'
- MessagingProtocolKey = attribute.Key("messaging.protocol")
- // The version of the transport protocol.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '0.9.1'
- MessagingProtocolVersionKey = attribute.Key("messaging.protocol_version")
- // Connection string.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'tibjmsnaming://localhost:7222',
- // 'https://queue.amazonaws.com/80398EXAMPLE/MyQueue'
- MessagingURLKey = attribute.Key("messaging.url")
- // A value used by the messaging system as an identifier for the message,
- // represented as a string.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '452a7c7c7c7048c2f887f61572b18fc2'
- MessagingMessageIDKey = attribute.Key("messaging.message_id")
- // The [conversation ID](#conversations) identifying the conversation to which the
- // message belongs, represented as a string. Sometimes called "Correlation ID".
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'MyConversationID'
- MessagingConversationIDKey = attribute.Key("messaging.conversation_id")
- // The (uncompressed) size of the message payload in bytes. Also use this
- // attribute if it is unknown whether the compressed or uncompressed payload size
- // is reported.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 2738
- MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message_payload_size_bytes")
- // The compressed size of the message payload in bytes.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 2048
- MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message_payload_compressed_size_bytes")
-)
-
-var (
- // A message sent to a queue
- MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue")
- // A message sent to a topic
- MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic")
-)
-
-// Semantic convention for a consumer of messages received from a messaging system
-const (
- // A string identifying the kind of message consumption as defined in the
- // [Operation names](#operation-names) section above. If the operation is "send",
- // this attribute MUST NOT be set, since the operation can be inferred from the
- // span kind in that case.
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- MessagingOperationKey = attribute.Key("messaging.operation")
- // The identifier for the consumer receiving a message. For Kafka, set it to
- // `{messaging.kafka.consumer_group} - {messaging.kafka.client_id}`, if both are
- // present, or only `messaging.kafka.consumer_group`. For brokers, such as
- // RabbitMQ and Artemis, set it to the `client_id` of the client consuming the
- // message.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'mygroup - client-6'
- MessagingConsumerIDKey = attribute.Key("messaging.consumer_id")
-)
-
-var (
- // receive
- MessagingOperationReceive = MessagingOperationKey.String("receive")
- // process
- MessagingOperationProcess = MessagingOperationKey.String("process")
-)
-
-// Attributes for RabbitMQ
-const (
- // RabbitMQ message routing key.
- //
- // Type: string
- // Required: Unless it is empty.
- // Stability: stable
- // Examples: 'myKey'
- MessagingRabbitmqRoutingKeyKey = attribute.Key("messaging.rabbitmq.routing_key")
-)
-
-// Attributes for Apache Kafka
-const (
- // Message keys in Kafka are used for grouping alike messages to ensure they're
- // processed on the same partition. They differ from `messaging.message_id` in
- // that they're not unique. If the key is `null`, the attribute MUST NOT be set.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'myKey'
- // Note: If the key type is not string, it's string representation has to be
- // supplied for the attribute. If the key has no unambiguous, canonical string
- // form, don't include its value.
- MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message_key")
- // Name of the Kafka Consumer Group that is handling the message. Only applies to
- // consumers, not producers.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'my-group'
- MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer_group")
- // Client ID for the Consumer or Producer that is handling the message.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'client-5'
- MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id")
- // Partition the message is sent to.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Examples: 2
- MessagingKafkaPartitionKey = attribute.Key("messaging.kafka.partition")
- // A boolean that is true if the message is a tombstone.
- //
- // Type: boolean
- // Required: If missing, it is assumed to be false.
- // Stability: stable
- MessagingKafkaTombstoneKey = attribute.Key("messaging.kafka.tombstone")
-)
-
-// This document defines semantic conventions for remote procedure calls.
-const (
- // A string identifying the remoting system.
- //
- // Type: string
- // Required: Always
- // Stability: stable
- // Examples: 'grpc', 'java_rmi', 'wcf'
- RPCSystemKey = attribute.Key("rpc.system")
- // The full (logical) name of the service being called, including its package
- // name, if applicable.
- //
- // Type: string
- // Required: No, but recommended
- // Stability: stable
- // Examples: 'myservice.EchoService'
- // Note: This is the logical name of the service from the RPC interface
- // perspective, which can be different from the name of any implementing class.
- // The `code.namespace` attribute may be used to store the latter (despite the
- // attribute name, it may include a class name; e.g., class with method actually
- // executing the call on the server side, RPC client stub class on the client
- // side).
- RPCServiceKey = attribute.Key("rpc.service")
- // The name of the (logical) method being called, must be equal to the $method
- // part in the span name.
- //
- // Type: string
- // Required: No, but recommended
- // Stability: stable
- // Examples: 'exampleMethod'
- // Note: This is the logical name of the method from the RPC interface
- // perspective, which can be different from the name of any implementing
- // method/function. The `code.function` attribute may be used to store the latter
- // (e.g., method actually executing the call on the server side, RPC client stub
- // method on the client side).
- RPCMethodKey = attribute.Key("rpc.method")
-)
-
-// Tech-specific attributes for gRPC.
-const (
- // The [numeric status
- // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC
- // request.
- //
- // Type: Enum
- // Required: Always
- // Stability: stable
- RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
-)
-
-var (
- // OK
- RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
- // CANCELLED
- RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
- // UNKNOWN
- RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
- // INVALID_ARGUMENT
- RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
- // DEADLINE_EXCEEDED
- RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
- // NOT_FOUND
- RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
- // ALREADY_EXISTS
- RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
- // PERMISSION_DENIED
- RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
- // RESOURCE_EXHAUSTED
- RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
- // FAILED_PRECONDITION
- RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
- // ABORTED
- RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
- // OUT_OF_RANGE
- RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
- // UNIMPLEMENTED
- RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
- // INTERNAL
- RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
- // UNAVAILABLE
- RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
- // DATA_LOSS
- RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
- // UNAUTHENTICATED
- RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
-)
-
-// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/).
-const (
- // Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC
- // 1.0 does not specify this, the value can be omitted.
- //
- // Type: string
- // Required: If missing, it is assumed to be "1.0".
- // Stability: stable
- // Examples: '2.0', '1.0'
- RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
- // `id` property of request or response. Since protocol allows id to be int,
- // string, `null` or missing (for notifications), value is expected to be cast to
- // string for simplicity. Use empty string in case of `null` value. Omit entirely
- // if this is a notification.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: '10', 'request-7', ''
- RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
- // `error.code` property of response if it is an error response.
- //
- // Type: int
- // Required: If missing, response is assumed to be successful.
- // Stability: stable
- // Examples: -32700, 100
- RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
- // `error.message` property of response if it is an error response.
- //
- // Type: string
- // Required: No
- // Stability: stable
- // Examples: 'Parse error', 'User already exists'
- RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
-)
-
-// RPC received/sent message.
-const (
- // Whether this is a received or sent message.
- //
- // Type: Enum
- // Required: No
- // Stability: stable
- MessageTypeKey = attribute.Key("message.type")
- // MUST be calculated as two different counters starting from `1` one for sent
- // messages and one for received message.
- //
- // Type: int
- // Required: No
- // Stability: stable
- // Note: This way we guarantee that the values will be consistent between
- // different implementations.
- MessageIDKey = attribute.Key("message.id")
- // Compressed size of the message in bytes.
- //
- // Type: int
- // Required: No
- // Stability: stable
- MessageCompressedSizeKey = attribute.Key("message.compressed_size")
- // Uncompressed size of the message in bytes.
- //
- // Type: int
- // Required: No
- // Stability: stable
- MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
-)
-
-var (
- // sent
- MessageTypeSent = MessageTypeKey.String("SENT")
- // received
- MessageTypeReceived = MessageTypeKey.String("RECEIVED")
-)
diff --git a/vendor/go.opentelemetry.io/otel/trace.go b/vendor/go.opentelemetry.io/otel/trace.go
deleted file mode 100644
index 6836c6547..000000000
--- a/vendor/go.opentelemetry.io/otel/trace.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package otel // import "go.opentelemetry.io/otel"
-
-import (
- "go.opentelemetry.io/otel/internal/global"
- "go.opentelemetry.io/otel/trace"
-)
-
-// Tracer creates a named tracer that implements Tracer interface.
-// If the name is an empty string then provider uses default name.
-//
-// This is short for GetTracerProvider().Tracer(name, opts...)
-func Tracer(name string, opts ...trace.TracerOption) trace.Tracer {
- return GetTracerProvider().Tracer(name, opts...)
-}
-
-// GetTracerProvider returns the registered global trace provider.
-// If none is registered then an instance of NoopTracerProvider is returned.
-//
-// Use the trace provider to create a named tracer. E.g.
-//
-// tracer := otel.GetTracerProvider().Tracer("example.com/foo")
-//
-// or
-//
-// tracer := otel.Tracer("example.com/foo")
-func GetTracerProvider() trace.TracerProvider {
- return global.TracerProvider()
-}
-
-// SetTracerProvider registers `tp` as the global trace provider.
-func SetTracerProvider(tp trace.TracerProvider) {
- global.SetTracerProvider(tp)
-}
diff --git a/vendor/go.opentelemetry.io/otel/trace/LICENSE b/vendor/go.opentelemetry.io/otel/trace/LICENSE
deleted file mode 100644
index 261eeb9e9..000000000
--- a/vendor/go.opentelemetry.io/otel/trace/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/trace/README.md b/vendor/go.opentelemetry.io/otel/trace/README.md
deleted file mode 100644
index 58ccaba69..000000000
--- a/vendor/go.opentelemetry.io/otel/trace/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Trace API
-
-[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace)](https://pkg.go.dev/go.opentelemetry.io/otel/trace)
diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go
deleted file mode 100644
index 9c0b720a4..000000000
--- a/vendor/go.opentelemetry.io/otel/trace/config.go
+++ /dev/null
@@ -1,323 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package trace // import "go.opentelemetry.io/otel/trace"
-
-import (
- "time"
-
- "go.opentelemetry.io/otel/attribute"
-)
-
-// TracerConfig is a group of options for a Tracer.
-type TracerConfig struct {
- instrumentationVersion string
- // Schema URL of the telemetry emitted by the Tracer.
- schemaURL string
- attrs attribute.Set
-}
-
-// InstrumentationVersion returns the version of the library providing instrumentation.
-func (t *TracerConfig) InstrumentationVersion() string {
- return t.instrumentationVersion
-}
-
-// InstrumentationAttributes returns the attributes associated with the library
-// providing instrumentation.
-func (t *TracerConfig) InstrumentationAttributes() attribute.Set {
- return t.attrs
-}
-
-// SchemaURL returns the Schema URL of the telemetry emitted by the Tracer.
-func (t *TracerConfig) SchemaURL() string {
- return t.schemaURL
-}
-
-// NewTracerConfig applies all the options to a returned TracerConfig.
-func NewTracerConfig(options ...TracerOption) TracerConfig {
- var config TracerConfig
- for _, option := range options {
- config = option.apply(config)
- }
- return config
-}
-
-// TracerOption applies an option to a TracerConfig.
-type TracerOption interface {
- apply(TracerConfig) TracerConfig
-}
-
-type tracerOptionFunc func(TracerConfig) TracerConfig
-
-func (fn tracerOptionFunc) apply(cfg TracerConfig) TracerConfig {
- return fn(cfg)
-}
-
-// SpanConfig is a group of options for a Span.
-type SpanConfig struct {
- attributes []attribute.KeyValue
- timestamp time.Time
- links []Link
- newRoot bool
- spanKind SpanKind
- stackTrace bool
-}
-
-// Attributes describe the associated qualities of a Span.
-func (cfg *SpanConfig) Attributes() []attribute.KeyValue {
- return cfg.attributes
-}
-
-// Timestamp is a time in a Span life-cycle.
-func (cfg *SpanConfig) Timestamp() time.Time {
- return cfg.timestamp
-}
-
-// StackTrace checks whether stack trace capturing is enabled.
-func (cfg *SpanConfig) StackTrace() bool {
- return cfg.stackTrace
-}
-
-// Links are the associations a Span has with other Spans.
-func (cfg *SpanConfig) Links() []Link {
- return cfg.links
-}
-
-// NewRoot identifies a Span as the root Span for a new trace. This is
-// commonly used when an existing trace crosses trust boundaries and the
-// remote parent span context should be ignored for security.
-func (cfg *SpanConfig) NewRoot() bool {
- return cfg.newRoot
-}
-
-// SpanKind is the role a Span has in a trace.
-func (cfg *SpanConfig) SpanKind() SpanKind {
- return cfg.spanKind
-}
-
-// NewSpanStartConfig applies all the options to a returned SpanConfig.
-// No validation is performed on the returned SpanConfig (e.g. no uniqueness
-// checking or bounding of data), it is left to the SDK to perform this
-// action.
-func NewSpanStartConfig(options ...SpanStartOption) SpanConfig {
- var c SpanConfig
- for _, option := range options {
- c = option.applySpanStart(c)
- }
- return c
-}
-
-// NewSpanEndConfig applies all the options to a returned SpanConfig.
-// No validation is performed on the returned SpanConfig (e.g. no uniqueness
-// checking or bounding of data), it is left to the SDK to perform this
-// action.
-func NewSpanEndConfig(options ...SpanEndOption) SpanConfig {
- var c SpanConfig
- for _, option := range options {
- c = option.applySpanEnd(c)
- }
- return c
-}
-
-// SpanStartOption applies an option to a SpanConfig. These options are applicable
-// only when the span is created.
-type SpanStartOption interface {
- applySpanStart(SpanConfig) SpanConfig
-}
-
-type spanOptionFunc func(SpanConfig) SpanConfig
-
-func (fn spanOptionFunc) applySpanStart(cfg SpanConfig) SpanConfig {
- return fn(cfg)
-}
-
-// SpanEndOption applies an option to a SpanConfig. These options are
-// applicable only when the span is ended.
-type SpanEndOption interface {
- applySpanEnd(SpanConfig) SpanConfig
-}
-
-// EventConfig is a group of options for an Event.
-type EventConfig struct {
- attributes []attribute.KeyValue
- timestamp time.Time
- stackTrace bool
-}
-
-// Attributes describe the associated qualities of an Event.
-func (cfg *EventConfig) Attributes() []attribute.KeyValue {
- return cfg.attributes
-}
-
-// Timestamp is a time in an Event life-cycle.
-func (cfg *EventConfig) Timestamp() time.Time {
- return cfg.timestamp
-}
-
-// StackTrace checks whether stack trace capturing is enabled.
-func (cfg *EventConfig) StackTrace() bool {
- return cfg.stackTrace
-}
-
-// NewEventConfig applies all the EventOptions to a returned EventConfig. If no
-// timestamp option is passed, the returned EventConfig will have a Timestamp
-// set to the call time, otherwise no validation is performed on the returned
-// EventConfig.
-func NewEventConfig(options ...EventOption) EventConfig {
- var c EventConfig
- for _, option := range options {
- c = option.applyEvent(c)
- }
- if c.timestamp.IsZero() {
- c.timestamp = time.Now()
- }
- return c
-}
-
-// EventOption applies span event options to an EventConfig.
-type EventOption interface {
- applyEvent(EventConfig) EventConfig
-}
-
-// SpanOption are options that can be used at both the beginning and end of a span.
-type SpanOption interface {
- SpanStartOption
- SpanEndOption
-}
-
-// SpanStartEventOption are options that can be used at the start of a span, or with an event.
-type SpanStartEventOption interface {
- SpanStartOption
- EventOption
-}
-
-// SpanEndEventOption are options that can be used at the end of a span, or with an event.
-type SpanEndEventOption interface {
- SpanEndOption
- EventOption
-}
-
-type attributeOption []attribute.KeyValue
-
-func (o attributeOption) applySpan(c SpanConfig) SpanConfig {
- c.attributes = append(c.attributes, []attribute.KeyValue(o)...)
- return c
-}
-func (o attributeOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) }
-func (o attributeOption) applyEvent(c EventConfig) EventConfig {
- c.attributes = append(c.attributes, []attribute.KeyValue(o)...)
- return c
-}
-
-var _ SpanStartEventOption = attributeOption{}
-
-// WithAttributes adds the attributes related to a span life-cycle event.
-// These attributes are used to describe the work a Span represents when this
-// option is provided to a Span's start event. Otherwise, these
-// attributes provide additional information about the event being recorded
-// (e.g. error, state change, processing progress, system event).
-//
-// If multiple of these options are passed the attributes of each successive
-// option will extend the attributes instead of overwriting. There is no
-// guarantee of uniqueness in the resulting attributes.
-func WithAttributes(attributes ...attribute.KeyValue) SpanStartEventOption {
- return attributeOption(attributes)
-}
-
-// SpanEventOption are options that can be used with an event or a span.
-type SpanEventOption interface {
- SpanOption
- EventOption
-}
-
-type timestampOption time.Time
-
-func (o timestampOption) applySpan(c SpanConfig) SpanConfig {
- c.timestamp = time.Time(o)
- return c
-}
-func (o timestampOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) }
-func (o timestampOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) }
-func (o timestampOption) applyEvent(c EventConfig) EventConfig {
- c.timestamp = time.Time(o)
- return c
-}
-
-var _ SpanEventOption = timestampOption{}
-
-// WithTimestamp sets the time of a Span or Event life-cycle moment (e.g.
-// started, stopped, errored).
-func WithTimestamp(t time.Time) SpanEventOption {
- return timestampOption(t)
-}
-
-type stackTraceOption bool
-
-func (o stackTraceOption) applyEvent(c EventConfig) EventConfig {
- c.stackTrace = bool(o)
- return c
-}
-
-func (o stackTraceOption) applySpan(c SpanConfig) SpanConfig {
- c.stackTrace = bool(o)
- return c
-}
-func (o stackTraceOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) }
-
-// WithStackTrace sets the flag to capture the error with stack trace (e.g. true, false).
-func WithStackTrace(b bool) SpanEndEventOption {
- return stackTraceOption(b)
-}
-
-// WithLinks adds links to a Span. The links are added to the existing Span
-// links, i.e. this does not overwrite. Links with invalid span context are ignored.
-func WithLinks(links ...Link) SpanStartOption {
- return spanOptionFunc(func(cfg SpanConfig) SpanConfig {
- cfg.links = append(cfg.links, links...)
- return cfg
- })
-}
-
-// WithNewRoot specifies that the Span should be treated as a root Span. Any
-// existing parent span context will be ignored when defining the Span's trace
-// identifiers.
-func WithNewRoot() SpanStartOption {
- return spanOptionFunc(func(cfg SpanConfig) SpanConfig {
- cfg.newRoot = true
- return cfg
- })
-}
-
-// WithSpanKind sets the SpanKind of a Span.
-func WithSpanKind(kind SpanKind) SpanStartOption {
- return spanOptionFunc(func(cfg SpanConfig) SpanConfig {
- cfg.spanKind = kind
- return cfg
- })
-}
-
-// WithInstrumentationVersion sets the instrumentation version.
-func WithInstrumentationVersion(version string) TracerOption {
- return tracerOptionFunc(func(cfg TracerConfig) TracerConfig {
- cfg.instrumentationVersion = version
- return cfg
- })
-}
-
-// WithInstrumentationAttributes sets the instrumentation attributes.
-//
-// The passed attributes will be de-duplicated.
-func WithInstrumentationAttributes(attr ...attribute.KeyValue) TracerOption {
- return tracerOptionFunc(func(config TracerConfig) TracerConfig {
- config.attrs = attribute.NewSet(attr...)
- return config
- })
-}
-
-// WithSchemaURL sets the schema URL for the Tracer.
-func WithSchemaURL(schemaURL string) TracerOption {
- return tracerOptionFunc(func(cfg TracerConfig) TracerConfig {
- cfg.schemaURL = schemaURL
- return cfg
- })
-}
diff --git a/vendor/go.opentelemetry.io/otel/trace/context.go b/vendor/go.opentelemetry.io/otel/trace/context.go
deleted file mode 100644
index 8c45a7107..000000000
--- a/vendor/go.opentelemetry.io/otel/trace/context.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package trace // import "go.opentelemetry.io/otel/trace"
-
-import "context"
-
-type traceContextKeyType int
-
-const currentSpanKey traceContextKeyType = iota
-
-// ContextWithSpan returns a copy of parent with span set as the current Span.
-func ContextWithSpan(parent context.Context, span Span) context.Context {
- return context.WithValue(parent, currentSpanKey, span)
-}
-
-// ContextWithSpanContext returns a copy of parent with sc as the current
-// Span. The Span implementation that wraps sc is non-recording and performs
-// no operations other than to return sc as the SpanContext from the
-// SpanContext method.
-func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Context {
- return ContextWithSpan(parent, nonRecordingSpan{sc: sc})
-}
-
-// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicitly
-// as a remote SpanContext and as the current Span. The Span implementation
-// that wraps rsc is non-recording and performs no operations other than to
-// return rsc as the SpanContext from the SpanContext method.
-func ContextWithRemoteSpanContext(parent context.Context, rsc SpanContext) context.Context {
- return ContextWithSpanContext(parent, rsc.WithRemote(true))
-}
-
-// SpanFromContext returns the current Span from ctx.
-//
-// If no Span is currently set in ctx an implementation of a Span that
-// performs no operations is returned.
-func SpanFromContext(ctx context.Context) Span {
- if ctx == nil {
- return noopSpanInstance
- }
- if span, ok := ctx.Value(currentSpanKey).(Span); ok {
- return span
- }
- return noopSpanInstance
-}
-
-// SpanContextFromContext returns the current Span's SpanContext.
-func SpanContextFromContext(ctx context.Context) SpanContext {
- return SpanFromContext(ctx).SpanContext()
-}
diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go
deleted file mode 100644
index cdbf41d6d..000000000
--- a/vendor/go.opentelemetry.io/otel/trace/doc.go
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-/*
-Package trace provides an implementation of the tracing part of the
-OpenTelemetry API.
-
-To participate in distributed traces a Span needs to be created for the
-operation being performed as part of a traced workflow. In its simplest form:
-
- var tracer trace.Tracer
-
- func init() {
- tracer = otel.Tracer("instrumentation/package/name")
- }
-
- func operation(ctx context.Context) {
- var span trace.Span
- ctx, span = tracer.Start(ctx, "operation")
- defer span.End()
- // ...
- }
-
-A Tracer is unique to the instrumentation and is used to create Spans.
-Instrumentation should be designed to accept a TracerProvider from which it
-can create its own unique Tracer. Alternatively, the registered global
-TracerProvider from the go.opentelemetry.io/otel package can be used as
-a default.
-
- const (
- name = "instrumentation/package/name"
- version = "0.1.0"
- )
-
- type Instrumentation struct {
- tracer trace.Tracer
- }
-
- func NewInstrumentation(tp trace.TracerProvider) *Instrumentation {
- if tp == nil {
- tp = otel.TracerProvider()
- }
- return &Instrumentation{
- tracer: tp.Tracer(name, trace.WithInstrumentationVersion(version)),
- }
- }
-
- func operation(ctx context.Context, inst *Instrumentation) {
- var span trace.Span
- ctx, span = inst.tracer.Start(ctx, "operation")
- defer span.End()
- // ...
- }
-
-# API Implementations
-
-This package does not conform to the standard Go versioning policy; all of its
-interfaces may have methods added to them without a package major version bump.
-This non-standard API evolution could surprise an uninformed implementation
-author. They could unknowingly build their implementation in a way that would
-result in a runtime panic for their users that update to the new API.
-
-The API is designed to help inform an instrumentation author about this
-non-standard API evolution. It requires them to choose a default behavior for
-unimplemented interface methods. There are three behavior choices they can
-make:
-
- - Compilation failure
- - Panic
- - Default to another implementation
-
-All interfaces in this API embed a corresponding interface from
-[go.opentelemetry.io/otel/trace/embedded]. If an author wants the default
-behavior of their implementations to be a compilation failure, signaling to
-their users they need to update to the latest version of that implementation,
-they need to embed the corresponding interface from
-[go.opentelemetry.io/otel/trace/embedded] in their implementation. For
-example,
-
- import "go.opentelemetry.io/otel/trace/embedded"
-
- type TracerProvider struct {
- embedded.TracerProvider
- // ...
- }
-
-If an author wants the default behavior of their implementations to panic, they
-can embed the API interface directly.
-
- import "go.opentelemetry.io/otel/trace"
-
- type TracerProvider struct {
- trace.TracerProvider
- // ...
- }
-
-This option is not recommended. It will lead to publishing packages that
-contain runtime panics when users update to newer versions of
-[go.opentelemetry.io/otel/trace], which may be done with a transitive
-dependency.
-
-Finally, an author can embed another implementation in theirs. The embedded
-implementation will be used for methods not defined by the author. For example,
-an author who wants to default to silently dropping the call can use
-[go.opentelemetry.io/otel/trace/noop]:
-
- import "go.opentelemetry.io/otel/trace/noop"
-
- type TracerProvider struct {
- noop.TracerProvider
- // ...
- }
-
-It is strongly recommended that authors only embed
-[go.opentelemetry.io/otel/trace/noop] if they choose this default behavior.
-That implementation is the only one OpenTelemetry authors can guarantee will
-fully implement all the API interfaces when a user updates their API.
-*/
-package trace // import "go.opentelemetry.io/otel/trace"
diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/README.md b/vendor/go.opentelemetry.io/otel/trace/embedded/README.md
deleted file mode 100644
index 7754a239e..000000000
--- a/vendor/go.opentelemetry.io/otel/trace/embedded/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Trace Embedded
-
-[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace/embedded)](https://pkg.go.dev/go.opentelemetry.io/otel/trace/embedded)
diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go
deleted file mode 100644
index 3e359a00b..000000000
--- a/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Package embedded provides interfaces embedded within the [OpenTelemetry
-// trace API].
-//
-// Implementers of the [OpenTelemetry trace API] can embed the relevant type
-// from this package into their implementation directly. Doing so will result
-// in a compilation error for users when the [OpenTelemetry trace API] is
-// extended (which is something that can happen without a major version bump of
-// the API package).
-//
-// [OpenTelemetry trace API]: https://pkg.go.dev/go.opentelemetry.io/otel/trace
-package embedded // import "go.opentelemetry.io/otel/trace/embedded"
-
-// TracerProvider is embedded in
-// [go.opentelemetry.io/otel/trace.TracerProvider].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/trace.TracerProvider] if you want users to
-// experience a compilation error, signaling they need to update to your latest
-// implementation, when the [go.opentelemetry.io/otel/trace.TracerProvider]
-// interface is extended (which is something that can happen without a major
-// version bump of the API package).
-type TracerProvider interface{ tracerProvider() }
-
-// Tracer is embedded in [go.opentelemetry.io/otel/trace.Tracer].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/trace.Tracer] if you want users to experience a
-// compilation error, signaling they need to update to your latest
-// implementation, when the [go.opentelemetry.io/otel/trace.Tracer] interface
-// is extended (which is something that can happen without a major version bump
-// of the API package).
-type Tracer interface{ tracer() }
-
-// Span is embedded in [go.opentelemetry.io/otel/trace.Span].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/trace.Span] if you want users to experience a
-// compilation error, signaling they need to update to your latest
-// implementation, when the [go.opentelemetry.io/otel/trace.Span] interface is
-// extended (which is something that can happen without a major version bump of
-// the API package).
-type Span interface{ span() }
diff --git a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go
deleted file mode 100644
index c00221e7b..000000000
--- a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package trace // import "go.opentelemetry.io/otel/trace"
-
-// nonRecordingSpan is a minimal implementation of a Span that wraps a
-// SpanContext. It performs no operations other than to return the wrapped
-// SpanContext.
-type nonRecordingSpan struct {
- noopSpan
-
- sc SpanContext
-}
-
-// SpanContext returns the wrapped SpanContext.
-func (s nonRecordingSpan) SpanContext() SpanContext { return s.sc }
diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go
deleted file mode 100644
index ca20e9997..000000000
--- a/vendor/go.opentelemetry.io/otel/trace/noop.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package trace // import "go.opentelemetry.io/otel/trace"
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/codes"
- "go.opentelemetry.io/otel/trace/embedded"
-)
-
-// NewNoopTracerProvider returns an implementation of TracerProvider that
-// performs no operations. The Tracer and Spans created from the returned
-// TracerProvider also perform no operations.
-//
-// Deprecated: Use [go.opentelemetry.io/otel/trace/noop.NewTracerProvider]
-// instead.
-func NewNoopTracerProvider() TracerProvider {
- return noopTracerProvider{}
-}
-
-type noopTracerProvider struct{ embedded.TracerProvider }
-
-var _ TracerProvider = noopTracerProvider{}
-
-// Tracer returns noop implementation of Tracer.
-func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer {
- return noopTracer{}
-}
-
-// noopTracer is an implementation of Tracer that performs no operations.
-type noopTracer struct{ embedded.Tracer }
-
-var _ Tracer = noopTracer{}
-
-// Start carries forward a non-recording Span, if one is present in the context, otherwise it
-// creates a no-op Span.
-func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption) (context.Context, Span) {
- span := SpanFromContext(ctx)
- if _, ok := span.(nonRecordingSpan); !ok {
- // span is likely already a noopSpan, but let's be sure
- span = noopSpanInstance
- }
- return ContextWithSpan(ctx, span), span
-}
-
-// noopSpan is an implementation of Span that performs no operations.
-type noopSpan struct{ embedded.Span }
-
-var noopSpanInstance Span = noopSpan{}
-
-// SpanContext returns an empty span context.
-func (noopSpan) SpanContext() SpanContext { return SpanContext{} }
-
-// IsRecording always returns false.
-func (noopSpan) IsRecording() bool { return false }
-
-// SetStatus does nothing.
-func (noopSpan) SetStatus(codes.Code, string) {}
-
-// SetError does nothing.
-func (noopSpan) SetError(bool) {}
-
-// SetAttributes does nothing.
-func (noopSpan) SetAttributes(...attribute.KeyValue) {}
-
-// End does nothing.
-func (noopSpan) End(...SpanEndOption) {}
-
-// RecordError does nothing.
-func (noopSpan) RecordError(error, ...EventOption) {}
-
-// AddEvent does nothing.
-func (noopSpan) AddEvent(string, ...EventOption) {}
-
-// AddLink does nothing.
-func (noopSpan) AddLink(Link) {}
-
-// SetName does nothing.
-func (noopSpan) SetName(string) {}
-
-// TracerProvider returns a no-op TracerProvider.
-func (noopSpan) TracerProvider() TracerProvider { return noopTracerProvider{} }
diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/README.md b/vendor/go.opentelemetry.io/otel/trace/noop/README.md
deleted file mode 100644
index cd382c82a..000000000
--- a/vendor/go.opentelemetry.io/otel/trace/noop/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Trace Noop
-
-[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace/noop)](https://pkg.go.dev/go.opentelemetry.io/otel/trace/noop)
diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go
deleted file mode 100644
index 64a4f1b36..000000000
--- a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Package noop provides an implementation of the OpenTelemetry trace API that
-// produces no telemetry and minimizes used computation resources.
-//
-// Using this package to implement the OpenTelemetry trace API will effectively
-// disable OpenTelemetry.
-//
-// This implementation can be embedded in other implementations of the
-// OpenTelemetry trace API. Doing so will mean the implementation defaults to
-// no operation for methods it does not implement.
-package noop // import "go.opentelemetry.io/otel/trace/noop"
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/codes"
- "go.opentelemetry.io/otel/trace"
- "go.opentelemetry.io/otel/trace/embedded"
-)
-
-var (
- // Compile-time check this implements the OpenTelemetry API.
-
- _ trace.TracerProvider = TracerProvider{}
- _ trace.Tracer = Tracer{}
- _ trace.Span = Span{}
-)
-
-// TracerProvider is an OpenTelemetry No-Op TracerProvider.
-type TracerProvider struct{ embedded.TracerProvider }
-
-// NewTracerProvider returns a TracerProvider that does not record any telemetry.
-func NewTracerProvider() TracerProvider {
- return TracerProvider{}
-}
-
-// Tracer returns an OpenTelemetry Tracer that does not record any telemetry.
-func (TracerProvider) Tracer(string, ...trace.TracerOption) trace.Tracer {
- return Tracer{}
-}
-
-// Tracer is an OpenTelemetry No-Op Tracer.
-type Tracer struct{ embedded.Tracer }
-
-// Start creates a span. The created span will be set in a child context of ctx
-// and returned with the span.
-//
-// If ctx contains a span context, the returned span will also contain that
-// span context. If the span context in ctx is for a non-recording span, that
-// span instance will be returned directly.
-func (t Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) {
- span := trace.SpanFromContext(ctx)
-
- // If the parent context contains a non-zero span context, that span
- // context needs to be returned as a non-recording span
- // (https://github.com/open-telemetry/opentelemetry-specification/blob/3a1dde966a4ce87cce5adf464359fe369741bbea/specification/trace/api.md#behavior-of-the-api-in-the-absence-of-an-installed-sdk).
- var zeroSC trace.SpanContext
- if sc := span.SpanContext(); !sc.Equal(zeroSC) {
- if !span.IsRecording() {
- // If the span is not recording return it directly.
- return ctx, span
- }
- // Otherwise, return the span context needs in a non-recording span.
- span = Span{sc: sc}
- } else {
- // No parent, return a No-Op span with an empty span context.
- span = noopSpanInstance
- }
- return trace.ContextWithSpan(ctx, span), span
-}
-
-var noopSpanInstance trace.Span = Span{}
-
-// Span is an OpenTelemetry No-Op Span.
-type Span struct {
- embedded.Span
-
- sc trace.SpanContext
-}
-
-// SpanContext returns an empty span context.
-func (s Span) SpanContext() trace.SpanContext { return s.sc }
-
-// IsRecording always returns false.
-func (Span) IsRecording() bool { return false }
-
-// SetStatus does nothing.
-func (Span) SetStatus(codes.Code, string) {}
-
-// SetAttributes does nothing.
-func (Span) SetAttributes(...attribute.KeyValue) {}
-
-// End does nothing.
-func (Span) End(...trace.SpanEndOption) {}
-
-// RecordError does nothing.
-func (Span) RecordError(error, ...trace.EventOption) {}
-
-// AddEvent does nothing.
-func (Span) AddEvent(string, ...trace.EventOption) {}
-
-// AddLink does nothing.
-func (Span) AddLink(trace.Link) {}
-
-// SetName does nothing.
-func (Span) SetName(string) {}
-
-// TracerProvider returns a No-Op TracerProvider.
-func (Span) TracerProvider() trace.TracerProvider { return TracerProvider{} }
diff --git a/vendor/go.opentelemetry.io/otel/trace/provider.go b/vendor/go.opentelemetry.io/otel/trace/provider.go
deleted file mode 100644
index ef85cb70c..000000000
--- a/vendor/go.opentelemetry.io/otel/trace/provider.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package trace // import "go.opentelemetry.io/otel/trace"
-
-import "go.opentelemetry.io/otel/trace/embedded"
-
-// TracerProvider provides Tracers that are used by instrumentation code to
-// trace computational workflows.
-//
-// A TracerProvider is the collection destination of all Spans from Tracers it
-// provides, it represents a unique telemetry collection pipeline. How that
-// pipeline is defined, meaning how those Spans are collected, processed, and
-// where they are exported, depends on its implementation. Instrumentation
-// authors do not need to define this implementation, rather just use the
-// provided Tracers to instrument code.
-//
-// Commonly, instrumentation code will accept a TracerProvider implementation
-// at runtime from its users or it can simply use the globally registered one
-// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider).
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type TracerProvider interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.TracerProvider
-
- // Tracer returns a unique Tracer scoped to be used by instrumentation code
- // to trace computational workflows. The scope and identity of that
- // instrumentation code is uniquely defined by the name and options passed.
- //
- // The passed name needs to uniquely identify instrumentation code.
- // Therefore, it is recommended that name is the Go package name of the
- // library providing instrumentation (note: not the code being
- // instrumented). Instrumentation libraries can have multiple versions,
- // therefore, the WithInstrumentationVersion option should be used to
- // distinguish these different codebases. Additionally, instrumentation
- // libraries may sometimes use traces to communicate different domains of
- // workflow data (i.e. using spans to communicate workflow events only). If
- // this is the case, the WithScopeAttributes option should be used to
- // uniquely identify Tracers that handle the different domains of workflow
- // data.
- //
- // If the same name and options are passed multiple times, the same Tracer
- // will be returned (it is up to the implementation if this will be the
- // same underlying instance of that Tracer or not). It is not necessary to
- // call this multiple times with the same name and options to get an
- // up-to-date Tracer. All implementations will ensure any TracerProvider
- // configuration changes are propagated to all provided Tracers.
- //
- // If name is empty, then an implementation defined default name will be
- // used instead.
- //
- // This method is safe to call concurrently.
- Tracer(name string, options ...TracerOption) Tracer
-}
diff --git a/vendor/go.opentelemetry.io/otel/trace/span.go b/vendor/go.opentelemetry.io/otel/trace/span.go
deleted file mode 100644
index d3aa476ee..000000000
--- a/vendor/go.opentelemetry.io/otel/trace/span.go
+++ /dev/null
@@ -1,177 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package trace // import "go.opentelemetry.io/otel/trace"
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/codes"
- "go.opentelemetry.io/otel/trace/embedded"
-)
-
-// Span is the individual component of a trace. It represents a single named
-// and timed operation of a workflow that is traced. A Tracer is used to
-// create a Span and it is then up to the operation the Span represents to
-// properly end the Span when the operation itself ends.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Span interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Span
-
- // End completes the Span. The Span is considered complete and ready to be
- // delivered through the rest of the telemetry pipeline after this method
- // is called. Therefore, updates to the Span are not allowed after this
- // method has been called.
- End(options ...SpanEndOption)
-
- // AddEvent adds an event with the provided name and options.
- AddEvent(name string, options ...EventOption)
-
- // AddLink adds a link.
- // Adding links at span creation using WithLinks is preferred to calling AddLink
- // later, for contexts that are available during span creation, because head
- // sampling decisions can only consider information present during span creation.
- AddLink(link Link)
-
- // IsRecording returns the recording state of the Span. It will return
- // true if the Span is active and events can be recorded.
- IsRecording() bool
-
- // RecordError will record err as an exception span event for this span. An
- // additional call to SetStatus is required if the Status of the Span should
- // be set to Error, as this method does not change the Span status. If this
- // span is not being recorded or err is nil then this method does nothing.
- RecordError(err error, options ...EventOption)
-
- // SpanContext returns the SpanContext of the Span. The returned SpanContext
- // is usable even after the End method has been called for the Span.
- SpanContext() SpanContext
-
- // SetStatus sets the status of the Span in the form of a code and a
- // description, provided the status hasn't already been set to a higher
- // value before (OK > Error > Unset). The description is only included in a
- // status when the code is for an error.
- SetStatus(code codes.Code, description string)
-
- // SetName sets the Span name.
- SetName(name string)
-
- // SetAttributes sets kv as attributes of the Span. If a key from kv
- // already exists for an attribute of the Span it will be overwritten with
- // the value contained in kv.
- SetAttributes(kv ...attribute.KeyValue)
-
- // TracerProvider returns a TracerProvider that can be used to generate
- // additional Spans on the same telemetry pipeline as the current Span.
- TracerProvider() TracerProvider
-}
-
-// Link is the relationship between two Spans. The relationship can be within
-// the same Trace or across different Traces.
-//
-// For example, a Link is used in the following situations:
-//
-// 1. Batch Processing: A batch of operations may contain operations
-// associated with one or more traces/spans. Since there can only be one
-// parent SpanContext, a Link is used to keep reference to the
-// SpanContext of all operations in the batch.
-// 2. Public Endpoint: A SpanContext for an in incoming client request on a
-// public endpoint should be considered untrusted. In such a case, a new
-// trace with its own identity and sampling decision needs to be created,
-// but this new trace needs to be related to the original trace in some
-// form. A Link is used to keep reference to the original SpanContext and
-// track the relationship.
-type Link struct {
- // SpanContext of the linked Span.
- SpanContext SpanContext
-
- // Attributes describe the aspects of the link.
- Attributes []attribute.KeyValue
-}
-
-// LinkFromContext returns a link encapsulating the SpanContext in the provided
-// ctx.
-func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link {
- return Link{
- SpanContext: SpanContextFromContext(ctx),
- Attributes: attrs,
- }
-}
-
-// SpanKind is the role a Span plays in a Trace.
-type SpanKind int
-
-// As a convenience, these match the proto definition, see
-// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129
-//
-// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()`
-// to coerce a span kind to a valid value.
-const (
- // SpanKindUnspecified is an unspecified SpanKind and is not a valid
- // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal
- // if it is received.
- SpanKindUnspecified SpanKind = 0
- // SpanKindInternal is a SpanKind for a Span that represents an internal
- // operation within an application.
- SpanKindInternal SpanKind = 1
- // SpanKindServer is a SpanKind for a Span that represents the operation
- // of handling a request from a client.
- SpanKindServer SpanKind = 2
- // SpanKindClient is a SpanKind for a Span that represents the operation
- // of client making a request to a server.
- SpanKindClient SpanKind = 3
- // SpanKindProducer is a SpanKind for a Span that represents the operation
- // of a producer sending a message to a message broker. Unlike
- // SpanKindClient and SpanKindServer, there is often no direct
- // relationship between this kind of Span and a SpanKindConsumer kind. A
- // SpanKindProducer Span will end once the message is accepted by the
- // message broker which might not overlap with the processing of that
- // message.
- SpanKindProducer SpanKind = 4
- // SpanKindConsumer is a SpanKind for a Span that represents the operation
- // of a consumer receiving a message from a message broker. Like
- // SpanKindProducer Spans, there is often no direct relationship between
- // this Span and the Span that produced the message.
- SpanKindConsumer SpanKind = 5
-)
-
-// ValidateSpanKind returns a valid span kind value. This will coerce
-// invalid values into the default value, SpanKindInternal.
-func ValidateSpanKind(spanKind SpanKind) SpanKind {
- switch spanKind {
- case SpanKindInternal,
- SpanKindServer,
- SpanKindClient,
- SpanKindProducer,
- SpanKindConsumer:
- // valid
- return spanKind
- default:
- return SpanKindInternal
- }
-}
-
-// String returns the specified name of the SpanKind in lower-case.
-func (sk SpanKind) String() string {
- switch sk {
- case SpanKindInternal:
- return "internal"
- case SpanKindServer:
- return "server"
- case SpanKindClient:
- return "client"
- case SpanKindProducer:
- return "producer"
- case SpanKindConsumer:
- return "consumer"
- default:
- return "unspecified"
- }
-}
diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go
deleted file mode 100644
index d49adf671..000000000
--- a/vendor/go.opentelemetry.io/otel/trace/trace.go
+++ /dev/null
@@ -1,323 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package trace // import "go.opentelemetry.io/otel/trace"
-
-import (
- "bytes"
- "encoding/hex"
- "encoding/json"
-)
-
-const (
- // FlagsSampled is a bitmask with the sampled bit set. A SpanContext
- // with the sampling bit set means the span is sampled.
- FlagsSampled = TraceFlags(0x01)
-
- errInvalidHexID errorConst = "trace-id and span-id can only contain [0-9a-f] characters, all lowercase"
-
- errInvalidTraceIDLength errorConst = "hex encoded trace-id must have length equals to 32"
- errNilTraceID errorConst = "trace-id can't be all zero"
-
- errInvalidSpanIDLength errorConst = "hex encoded span-id must have length equals to 16"
- errNilSpanID errorConst = "span-id can't be all zero"
-)
-
-type errorConst string
-
-func (e errorConst) Error() string {
- return string(e)
-}
-
-// TraceID is a unique identity of a trace.
-// nolint:revive // revive complains about stutter of `trace.TraceID`.
-type TraceID [16]byte
-
-var (
- nilTraceID TraceID
- _ json.Marshaler = nilTraceID
-)
-
-// IsValid checks whether the trace TraceID is valid. A valid trace ID does
-// not consist of zeros only.
-func (t TraceID) IsValid() bool {
- return !bytes.Equal(t[:], nilTraceID[:])
-}
-
-// MarshalJSON implements a custom marshal function to encode TraceID
-// as a hex string.
-func (t TraceID) MarshalJSON() ([]byte, error) {
- return json.Marshal(t.String())
-}
-
-// String returns the hex string representation form of a TraceID.
-func (t TraceID) String() string {
- return hex.EncodeToString(t[:])
-}
-
-// SpanID is a unique identity of a span in a trace.
-type SpanID [8]byte
-
-var (
- nilSpanID SpanID
- _ json.Marshaler = nilSpanID
-)
-
-// IsValid checks whether the SpanID is valid. A valid SpanID does not consist
-// of zeros only.
-func (s SpanID) IsValid() bool {
- return !bytes.Equal(s[:], nilSpanID[:])
-}
-
-// MarshalJSON implements a custom marshal function to encode SpanID
-// as a hex string.
-func (s SpanID) MarshalJSON() ([]byte, error) {
- return json.Marshal(s.String())
-}
-
-// String returns the hex string representation form of a SpanID.
-func (s SpanID) String() string {
- return hex.EncodeToString(s[:])
-}
-
-// TraceIDFromHex returns a TraceID from a hex string if it is compliant with
-// the W3C trace-context specification. See more at
-// https://www.w3.org/TR/trace-context/#trace-id
-// nolint:revive // revive complains about stutter of `trace.TraceIDFromHex`.
-func TraceIDFromHex(h string) (TraceID, error) {
- t := TraceID{}
- if len(h) != 32 {
- return t, errInvalidTraceIDLength
- }
-
- if err := decodeHex(h, t[:]); err != nil {
- return t, err
- }
-
- if !t.IsValid() {
- return t, errNilTraceID
- }
- return t, nil
-}
-
-// SpanIDFromHex returns a SpanID from a hex string if it is compliant
-// with the w3c trace-context specification.
-// See more at https://www.w3.org/TR/trace-context/#parent-id
-func SpanIDFromHex(h string) (SpanID, error) {
- s := SpanID{}
- if len(h) != 16 {
- return s, errInvalidSpanIDLength
- }
-
- if err := decodeHex(h, s[:]); err != nil {
- return s, err
- }
-
- if !s.IsValid() {
- return s, errNilSpanID
- }
- return s, nil
-}
-
-func decodeHex(h string, b []byte) error {
- for _, r := range h {
- switch {
- case 'a' <= r && r <= 'f':
- continue
- case '0' <= r && r <= '9':
- continue
- default:
- return errInvalidHexID
- }
- }
-
- decoded, err := hex.DecodeString(h)
- if err != nil {
- return err
- }
-
- copy(b, decoded)
- return nil
-}
-
-// TraceFlags contains flags that can be set on a SpanContext.
-type TraceFlags byte //nolint:revive // revive complains about stutter of `trace.TraceFlags`.
-
-// IsSampled returns if the sampling bit is set in the TraceFlags.
-func (tf TraceFlags) IsSampled() bool {
- return tf&FlagsSampled == FlagsSampled
-}
-
-// WithSampled sets the sampling bit in a new copy of the TraceFlags.
-func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { // nolint:revive // sampled is not a control flag.
- if sampled {
- return tf | FlagsSampled
- }
-
- return tf &^ FlagsSampled
-}
-
-// MarshalJSON implements a custom marshal function to encode TraceFlags
-// as a hex string.
-func (tf TraceFlags) MarshalJSON() ([]byte, error) {
- return json.Marshal(tf.String())
-}
-
-// String returns the hex string representation form of TraceFlags.
-func (tf TraceFlags) String() string {
- return hex.EncodeToString([]byte{byte(tf)}[:])
-}
-
-// SpanContextConfig contains mutable fields usable for constructing
-// an immutable SpanContext.
-type SpanContextConfig struct {
- TraceID TraceID
- SpanID SpanID
- TraceFlags TraceFlags
- TraceState TraceState
- Remote bool
-}
-
-// NewSpanContext constructs a SpanContext using values from the provided
-// SpanContextConfig.
-func NewSpanContext(config SpanContextConfig) SpanContext {
- return SpanContext{
- traceID: config.TraceID,
- spanID: config.SpanID,
- traceFlags: config.TraceFlags,
- traceState: config.TraceState,
- remote: config.Remote,
- }
-}
-
-// SpanContext contains identifying trace information about a Span.
-type SpanContext struct {
- traceID TraceID
- spanID SpanID
- traceFlags TraceFlags
- traceState TraceState
- remote bool
-}
-
-var _ json.Marshaler = SpanContext{}
-
-// IsValid returns if the SpanContext is valid. A valid span context has a
-// valid TraceID and SpanID.
-func (sc SpanContext) IsValid() bool {
- return sc.HasTraceID() && sc.HasSpanID()
-}
-
-// IsRemote indicates whether the SpanContext represents a remotely-created Span.
-func (sc SpanContext) IsRemote() bool {
- return sc.remote
-}
-
-// WithRemote returns a copy of sc with the Remote property set to remote.
-func (sc SpanContext) WithRemote(remote bool) SpanContext {
- return SpanContext{
- traceID: sc.traceID,
- spanID: sc.spanID,
- traceFlags: sc.traceFlags,
- traceState: sc.traceState,
- remote: remote,
- }
-}
-
-// TraceID returns the TraceID from the SpanContext.
-func (sc SpanContext) TraceID() TraceID {
- return sc.traceID
-}
-
-// HasTraceID checks if the SpanContext has a valid TraceID.
-func (sc SpanContext) HasTraceID() bool {
- return sc.traceID.IsValid()
-}
-
-// WithTraceID returns a new SpanContext with the TraceID replaced.
-func (sc SpanContext) WithTraceID(traceID TraceID) SpanContext {
- return SpanContext{
- traceID: traceID,
- spanID: sc.spanID,
- traceFlags: sc.traceFlags,
- traceState: sc.traceState,
- remote: sc.remote,
- }
-}
-
-// SpanID returns the SpanID from the SpanContext.
-func (sc SpanContext) SpanID() SpanID {
- return sc.spanID
-}
-
-// HasSpanID checks if the SpanContext has a valid SpanID.
-func (sc SpanContext) HasSpanID() bool {
- return sc.spanID.IsValid()
-}
-
-// WithSpanID returns a new SpanContext with the SpanID replaced.
-func (sc SpanContext) WithSpanID(spanID SpanID) SpanContext {
- return SpanContext{
- traceID: sc.traceID,
- spanID: spanID,
- traceFlags: sc.traceFlags,
- traceState: sc.traceState,
- remote: sc.remote,
- }
-}
-
-// TraceFlags returns the flags from the SpanContext.
-func (sc SpanContext) TraceFlags() TraceFlags {
- return sc.traceFlags
-}
-
-// IsSampled returns if the sampling bit is set in the SpanContext's TraceFlags.
-func (sc SpanContext) IsSampled() bool {
- return sc.traceFlags.IsSampled()
-}
-
-// WithTraceFlags returns a new SpanContext with the TraceFlags replaced.
-func (sc SpanContext) WithTraceFlags(flags TraceFlags) SpanContext {
- return SpanContext{
- traceID: sc.traceID,
- spanID: sc.spanID,
- traceFlags: flags,
- traceState: sc.traceState,
- remote: sc.remote,
- }
-}
-
-// TraceState returns the TraceState from the SpanContext.
-func (sc SpanContext) TraceState() TraceState {
- return sc.traceState
-}
-
-// WithTraceState returns a new SpanContext with the TraceState replaced.
-func (sc SpanContext) WithTraceState(state TraceState) SpanContext {
- return SpanContext{
- traceID: sc.traceID,
- spanID: sc.spanID,
- traceFlags: sc.traceFlags,
- traceState: state,
- remote: sc.remote,
- }
-}
-
-// Equal is a predicate that determines whether two SpanContext values are equal.
-func (sc SpanContext) Equal(other SpanContext) bool {
- return sc.traceID == other.traceID &&
- sc.spanID == other.spanID &&
- sc.traceFlags == other.traceFlags &&
- sc.traceState.String() == other.traceState.String() &&
- sc.remote == other.remote
-}
-
-// MarshalJSON implements a custom marshal function to encode a SpanContext.
-func (sc SpanContext) MarshalJSON() ([]byte, error) {
- return json.Marshal(SpanContextConfig{
- TraceID: sc.traceID,
- SpanID: sc.spanID,
- TraceFlags: sc.traceFlags,
- TraceState: sc.traceState,
- Remote: sc.remote,
- })
-}
diff --git a/vendor/go.opentelemetry.io/otel/trace/tracer.go b/vendor/go.opentelemetry.io/otel/trace/tracer.go
deleted file mode 100644
index 77952d2a0..000000000
--- a/vendor/go.opentelemetry.io/otel/trace/tracer.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package trace // import "go.opentelemetry.io/otel/trace"
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/trace/embedded"
-)
-
-// Tracer is the creator of Spans.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Tracer interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Tracer
-
- // Start creates a span and a context.Context containing the newly-created span.
- //
- // If the context.Context provided in `ctx` contains a Span then the newly-created
- // Span will be a child of that span, otherwise it will be a root span. This behavior
- // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the
- // newly-created Span to be a root span even if `ctx` contains a Span.
- //
- // When creating a Span it is recommended to provide all known span attributes using
- // the `WithAttributes()` SpanOption as samplers will only have access to the
- // attributes provided when a Span is created.
- //
- // Any Span that is created MUST also be ended. This is the responsibility of the user.
- // Implementations of this API may leak memory or other resources if Spans are not ended.
- Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span)
-}
diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go
deleted file mode 100644
index dc5e34cad..000000000
--- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go
+++ /dev/null
@@ -1,330 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package trace // import "go.opentelemetry.io/otel/trace"
-
-import (
- "encoding/json"
- "fmt"
- "strings"
-)
-
-const (
- maxListMembers = 32
-
- listDelimiters = ","
- memberDelimiter = "="
-
- errInvalidKey errorConst = "invalid tracestate key"
- errInvalidValue errorConst = "invalid tracestate value"
- errInvalidMember errorConst = "invalid tracestate list-member"
- errMemberNumber errorConst = "too many list-members in tracestate"
- errDuplicate errorConst = "duplicate list-member in tracestate"
-)
-
-type member struct {
- Key string
- Value string
-}
-
-// according to (chr = %x20 / (nblk-char = %x21-2B / %x2D-3C / %x3E-7E) )
-// means (chr = %x20-2B / %x2D-3C / %x3E-7E) .
-func checkValueChar(v byte) bool {
- return v >= '\x20' && v <= '\x7e' && v != '\x2c' && v != '\x3d'
-}
-
-// according to (nblk-chr = %x21-2B / %x2D-3C / %x3E-7E) .
-func checkValueLast(v byte) bool {
- return v >= '\x21' && v <= '\x7e' && v != '\x2c' && v != '\x3d'
-}
-
-// based on the W3C Trace Context specification
-//
-// value = (0*255(chr)) nblk-chr
-// nblk-chr = %x21-2B / %x2D-3C / %x3E-7E
-// chr = %x20 / nblk-chr
-//
-// see https://www.w3.org/TR/trace-context-1/#value
-func checkValue(val string) bool {
- n := len(val)
- if n == 0 || n > 256 {
- return false
- }
- for i := 0; i < n-1; i++ {
- if !checkValueChar(val[i]) {
- return false
- }
- }
- return checkValueLast(val[n-1])
-}
-
-func checkKeyRemain(key string) bool {
- // ( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )
- for _, v := range key {
- if isAlphaNum(byte(v)) {
- continue
- }
- switch v {
- case '_', '-', '*', '/':
- continue
- }
- return false
- }
- return true
-}
-
-// according to
-//
-// simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
-// system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
-//
-// param n is remain part length, should be 255 in simple-key or 13 in system-id.
-func checkKeyPart(key string, n int) bool {
- if len(key) == 0 {
- return false
- }
- first := key[0] // key's first char
- ret := len(key[1:]) <= n
- ret = ret && first >= 'a' && first <= 'z'
- return ret && checkKeyRemain(key[1:])
-}
-
-func isAlphaNum(c byte) bool {
- if c >= 'a' && c <= 'z' {
- return true
- }
- return c >= '0' && c <= '9'
-}
-
-// according to
-//
-// tenant-id = ( lcalpha / DIGIT ) 0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )
-//
-// param n is remain part length, should be 240 exactly.
-func checkKeyTenant(key string, n int) bool {
- if len(key) == 0 {
- return false
- }
- return isAlphaNum(key[0]) && len(key[1:]) <= n && checkKeyRemain(key[1:])
-}
-
-// based on the W3C Trace Context specification
-//
-// key = simple-key / multi-tenant-key
-// simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
-// multi-tenant-key = tenant-id "@" system-id
-// tenant-id = ( lcalpha / DIGIT ) (0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
-// system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
-// lcalpha = %x61-7A ; a-z
-//
-// see https://www.w3.org/TR/trace-context-1/#tracestate-header.
-func checkKey(key string) bool {
- tenant, system, ok := strings.Cut(key, "@")
- if !ok {
- return checkKeyPart(key, 255)
- }
- return checkKeyTenant(tenant, 240) && checkKeyPart(system, 13)
-}
-
-func newMember(key, value string) (member, error) {
- if !checkKey(key) {
- return member{}, errInvalidKey
- }
- if !checkValue(value) {
- return member{}, errInvalidValue
- }
- return member{Key: key, Value: value}, nil
-}
-
-func parseMember(m string) (member, error) {
- key, val, ok := strings.Cut(m, memberDelimiter)
- if !ok {
- return member{}, fmt.Errorf("%w: %s", errInvalidMember, m)
- }
- key = strings.TrimLeft(key, " \t")
- val = strings.TrimRight(val, " \t")
- result, e := newMember(key, val)
- if e != nil {
- return member{}, fmt.Errorf("%w: %s", errInvalidMember, m)
- }
- return result, nil
-}
-
-// String encodes member into a string compliant with the W3C Trace Context
-// specification.
-func (m member) String() string {
- return m.Key + "=" + m.Value
-}
-
-// TraceState provides additional vendor-specific trace identification
-// information across different distributed tracing systems. It represents an
-// immutable list consisting of key/value pairs, each pair is referred to as a
-// list-member.
-//
-// TraceState conforms to the W3C Trace Context specification
-// (https://www.w3.org/TR/trace-context-1). All operations that create or copy
-// a TraceState do so by validating all input and will only produce TraceState
-// that conform to the specification. Specifically, this means that all
-// list-member's key/value pairs are valid, no duplicate list-members exist,
-// and the maximum number of list-members (32) is not exceeded.
-type TraceState struct { //nolint:revive // revive complains about stutter of `trace.TraceState`
- // list is the members in order.
- list []member
-}
-
-var _ json.Marshaler = TraceState{}
-
-// ParseTraceState attempts to decode a TraceState from the passed
-// string. It returns an error if the input is invalid according to the W3C
-// Trace Context specification.
-func ParseTraceState(ts string) (TraceState, error) {
- if ts == "" {
- return TraceState{}, nil
- }
-
- wrapErr := func(err error) error {
- return fmt.Errorf("failed to parse tracestate: %w", err)
- }
-
- var members []member
- found := make(map[string]struct{})
- for ts != "" {
- var memberStr string
- memberStr, ts, _ = strings.Cut(ts, listDelimiters)
- if len(memberStr) == 0 {
- continue
- }
-
- m, err := parseMember(memberStr)
- if err != nil {
- return TraceState{}, wrapErr(err)
- }
-
- if _, ok := found[m.Key]; ok {
- return TraceState{}, wrapErr(errDuplicate)
- }
- found[m.Key] = struct{}{}
-
- members = append(members, m)
- if n := len(members); n > maxListMembers {
- return TraceState{}, wrapErr(errMemberNumber)
- }
- }
-
- return TraceState{list: members}, nil
-}
-
-// MarshalJSON marshals the TraceState into JSON.
-func (ts TraceState) MarshalJSON() ([]byte, error) {
- return json.Marshal(ts.String())
-}
-
-// String encodes the TraceState into a string compliant with the W3C
-// Trace Context specification. The returned string will be invalid if the
-// TraceState contains any invalid members.
-func (ts TraceState) String() string {
- if len(ts.list) == 0 {
- return ""
- }
- var n int
- n += len(ts.list) // member delimiters: '='
- n += len(ts.list) - 1 // list delimiters: ','
- for _, mem := range ts.list {
- n += len(mem.Key)
- n += len(mem.Value)
- }
-
- var sb strings.Builder
- sb.Grow(n)
- _, _ = sb.WriteString(ts.list[0].Key)
- _ = sb.WriteByte('=')
- _, _ = sb.WriteString(ts.list[0].Value)
- for i := 1; i < len(ts.list); i++ {
- _ = sb.WriteByte(listDelimiters[0])
- _, _ = sb.WriteString(ts.list[i].Key)
- _ = sb.WriteByte('=')
- _, _ = sb.WriteString(ts.list[i].Value)
- }
- return sb.String()
-}
-
-// Get returns the value paired with key from the corresponding TraceState
-// list-member if it exists, otherwise an empty string is returned.
-func (ts TraceState) Get(key string) string {
- for _, member := range ts.list {
- if member.Key == key {
- return member.Value
- }
- }
-
- return ""
-}
-
-// Walk walks all key value pairs in the TraceState by calling f
-// Iteration stops if f returns false.
-func (ts TraceState) Walk(f func(key, value string) bool) {
- for _, m := range ts.list {
- if !f(m.Key, m.Value) {
- break
- }
- }
-}
-
-// Insert adds a new list-member defined by the key/value pair to the
-// TraceState. If a list-member already exists for the given key, that
-// list-member's value is updated. The new or updated list-member is always
-// moved to the beginning of the TraceState as specified by the W3C Trace
-// Context specification.
-//
-// If key or value are invalid according to the W3C Trace Context
-// specification an error is returned with the original TraceState.
-//
-// If adding a new list-member means the TraceState would have more members
-// then is allowed, the new list-member will be inserted and the right-most
-// list-member will be dropped in the returned TraceState.
-func (ts TraceState) Insert(key, value string) (TraceState, error) {
- m, err := newMember(key, value)
- if err != nil {
- return ts, err
- }
- n := len(ts.list)
- found := n
- for i := range ts.list {
- if ts.list[i].Key == key {
- found = i
- }
- }
- cTS := TraceState{}
- if found == n && n < maxListMembers {
- cTS.list = make([]member, n+1)
- } else {
- cTS.list = make([]member, n)
- }
- cTS.list[0] = m
- // When the number of members exceeds capacity, drop the "right-most".
- copy(cTS.list[1:], ts.list[0:found])
- if found < n {
- copy(cTS.list[1+found:], ts.list[found+1:])
- }
- return cTS, nil
-}
-
-// Delete returns a copy of the TraceState with the list-member identified by
-// key removed.
-func (ts TraceState) Delete(key string) TraceState {
- members := make([]member, ts.Len())
- copy(members, ts.list)
- for i, member := range ts.list {
- if member.Key == key {
- members = append(members[:i], members[i+1:]...)
- // TraceState should contain no duplicate members.
- break
- }
- }
- return TraceState{list: members}
-}
-
-// Len returns the number of list-members in the TraceState.
-func (ts TraceState) Len() int {
- return len(ts.list)
-}
diff --git a/vendor/go.opentelemetry.io/otel/verify_readmes.sh b/vendor/go.opentelemetry.io/otel/verify_readmes.sh
deleted file mode 100644
index 1e87855ee..000000000
--- a/vendor/go.opentelemetry.io/otel/verify_readmes.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/bash
-
-# Copyright The OpenTelemetry Authors
-# SPDX-License-Identifier: Apache-2.0
-
-set -euo pipefail
-
-dirs=$(find . -type d -not -path "*/internal*" -not -path "*/test*" -not -path "*/example*" -not -path "*/.*" | sort)
-
-missingReadme=false
-for dir in $dirs; do
- if [ ! -f "$dir/README.md" ]; then
- echo "couldn't find README.md for $dir"
- missingReadme=true
- fi
-done
-
-if [ "$missingReadme" = true ] ; then
- echo "Error: some READMEs couldn't be found."
- exit 1
-fi
diff --git a/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh
deleted file mode 100644
index c9b7cdbbf..000000000
--- a/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/bin/bash
-
-# Copyright The OpenTelemetry Authors
-# SPDX-License-Identifier: Apache-2.0
-
-set -euo pipefail
-
-TARGET="${1:?Must provide target ref}"
-
-FILE="CHANGELOG.md"
-TEMP_DIR=$(mktemp -d)
-echo "Temp folder: $TEMP_DIR"
-
-# Only the latest commit of the feature branch is available
-# automatically. To diff with the base branch, we need to
-# fetch that too (and we only need its latest commit).
-git fetch origin "${TARGET}" --depth=1
-
-# Checkout the previous version on the base branch of the changelog to tmpfolder
-git --work-tree="$TEMP_DIR" checkout FETCH_HEAD $FILE
-
-PREVIOUS_FILE="$TEMP_DIR/$FILE"
-CURRENT_FILE="$FILE"
-PREVIOUS_LOCKED_FILE="$TEMP_DIR/previous_locked_section.md"
-CURRENT_LOCKED_FILE="$TEMP_DIR/current_locked_section.md"
-
-# Extract released sections from the previous version
-awk '/^<!-- Released section -->/ {flag=1} /^<!-- Released section ended -->/ {flag=0} flag' "$PREVIOUS_FILE" > "$PREVIOUS_LOCKED_FILE"
-
-# Extract released sections from the current version
-awk '/^<!-- Released section -->/ {flag=1} /^<!-- Released section ended -->/ {flag=0} flag' "$CURRENT_FILE" > "$CURRENT_LOCKED_FILE"
-
-# Compare the released sections
-if ! diff -q "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE"; then
- echo "Error: The released sections of the changelog file have been modified."
- diff "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE"
- rm -rf "$TEMP_DIR"
- false
-fi
-
-rm -rf "$TEMP_DIR"
-echo "The released sections remain unchanged."
diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go
deleted file mode 100644
index eb22002d8..000000000
--- a/vendor/go.opentelemetry.io/otel/version.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package otel // import "go.opentelemetry.io/otel"
-
-// Version is the current release version of OpenTelemetry in use.
-func Version() string {
- return "1.34.0"
-}
diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml
deleted file mode 100644
index ce4fe59b0..000000000
--- a/vendor/go.opentelemetry.io/otel/versions.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright The OpenTelemetry Authors
-# SPDX-License-Identifier: Apache-2.0
-
-module-sets:
- stable-v1:
- version: v1.34.0
- modules:
- - go.opentelemetry.io/otel
- - go.opentelemetry.io/otel/bridge/opencensus
- - go.opentelemetry.io/otel/bridge/opencensus/test
- - go.opentelemetry.io/otel/bridge/opentracing
- - go.opentelemetry.io/otel/bridge/opentracing/test
- - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc
- - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp
- - go.opentelemetry.io/otel/exporters/otlp/otlptrace
- - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc
- - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp
- - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric
- - go.opentelemetry.io/otel/exporters/stdout/stdouttrace
- - go.opentelemetry.io/otel/exporters/zipkin
- - go.opentelemetry.io/otel/metric
- - go.opentelemetry.io/otel/sdk
- - go.opentelemetry.io/otel/sdk/metric
- - go.opentelemetry.io/otel/trace
- experimental-metrics:
- version: v0.56.0
- modules:
- - go.opentelemetry.io/otel/exporters/prometheus
- experimental-logs:
- version: v0.10.0
- modules:
- - go.opentelemetry.io/otel/log
- - go.opentelemetry.io/otel/sdk/log
- - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc
- - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp
- - go.opentelemetry.io/otel/exporters/stdout/stdoutlog
- experimental-schema:
- version: v0.0.12
- modules:
- - go.opentelemetry.io/otel/schema
-excluded-modules:
- - go.opentelemetry.io/otel/internal/tools